]> git.ipfire.org Git - people/ms/linux.git/commitdiff
Importing "grsecurity-3.1-3.19.2-201503201903.patch" grsecurity-3.1-3.19.x
authorMichael Tremer <michael.tremer@ipfire.org>
Wed, 25 Mar 2015 15:31:57 +0000 (11:31 -0400)
committerMichael Tremer <michael.tremer@ipfire.org>
Wed, 25 Mar 2015 15:31:57 +0000 (11:31 -0400)
2051 files changed:
Documentation/dontdiff
Documentation/kbuild/makefiles.txt
Documentation/kernel-parameters.txt
Makefile
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/cache.h
arch/alpha/include/asm/elf.h
arch/alpha/include/asm/pgalloc.h
arch/alpha/include/asm/pgtable.h
arch/alpha/kernel/module.c
arch/alpha/kernel/osf_sys.c
arch/alpha/mm/fault.c
arch/arm/Kconfig
arch/arm/include/asm/atomic.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/cache.h
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/checksum.h
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/elf.h
arch/arm/include/asm/fncpy.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/kmap_types.h
arch/arm/include/asm/mach/dma.h
arch/arm/include/asm/mach/map.h
arch/arm/include/asm/outercache.h
arch/arm/include/asm/page.h
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/psci.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tls.h
arch/arm/include/asm/uaccess.h
arch/arm/include/uapi/asm/ptrace.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/fiq.c
arch/arm/kernel/head.S
arch/arm/kernel/module.c
arch/arm/kernel/patch.c
arch/arm/kernel/process.c
arch/arm/kernel/psci.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp.c
arch/arm/kernel/tcm.c
arch/arm/kernel/traps.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/kvm/arm.c
arch/arm/lib/clear_user.S
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_page.S
arch/arm/lib/copy_to_user.S
arch/arm/lib/csumpartialcopyuser.S
arch/arm/lib/delay.c
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-at91/setup.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-keystone/keystone.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/omap-mpuss-lowpower.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/omap_device.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/powerdomains43xx_data.c
arch/arm/mach-omap2/wd_timer.c
arch/arm/mach-tegra/cpuidle-tegra20.c
arch/arm/mach-tegra/irq.c
arch/arm/mach-ux500/pm.c
arch/arm/mach-ux500/setup.h
arch/arm/mach-zynq/platsmp.c
arch/arm/mm/Kconfig
arch/arm/mm/alignment.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/context.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm/mm/init.c
arch/arm/mm/ioremap.c
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/arm/plat-iop/setup.c
arch/arm/plat-omap/sram.c
arch/arm/plat-samsung/include/plat/dma-ops.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/uaccess.h
arch/avr32/include/asm/cache.h
arch/avr32/include/asm/elf.h
arch/avr32/include/asm/kmap_types.h
arch/avr32/mm/fault.c
arch/blackfin/include/asm/cache.h
arch/cris/include/arch-v10/arch/cache.h
arch/cris/include/arch-v32/arch/cache.h
arch/frv/include/asm/atomic.h
arch/frv/include/asm/cache.h
arch/frv/include/asm/kmap_types.h
arch/frv/mm/elf-fdpic.c
arch/hexagon/include/asm/cache.h
arch/ia64/Kconfig
arch/ia64/Makefile
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/barrier.h
arch/ia64/include/asm/cache.h
arch/ia64/include/asm/elf.h
arch/ia64/include/asm/pgalloc.h
arch/ia64/include/asm/pgtable.h
arch/ia64/include/asm/spinlock.h
arch/ia64/include/asm/uaccess.h
arch/ia64/kernel/module.c
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/sys_ia64.c
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/mm/fault.c
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/init.c
arch/m32r/include/asm/cache.h
arch/m32r/lib/usercopy.c
arch/m68k/include/asm/cache.h
arch/metag/include/asm/barrier.h
arch/metag/mm/hugetlbpage.c
arch/microblaze/include/asm/cache.h
arch/mips/Kconfig
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/include/asm/atomic.h
arch/mips/include/asm/barrier.h
arch/mips/include/asm/cache.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/exec.h
arch/mips/include/asm/hw_irq.h
arch/mips/include/asm/local.h
arch/mips/include/asm/page.h
arch/mips/include/asm/pgalloc.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/uaccess.h
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/i8259.c
arch/mips/kernel/irq-gt641xx.c
arch/mips/kernel/irq.c
arch/mips/kernel/pm-cps.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/reset.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/traps.c
arch/mips/kvm/mips.c
arch/mips/mm/fault.c
arch/mips/mm/mmap.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mips/sgi-ip27/ip27-nmi.c
arch/mips/sni/rm200.c
arch/mips/vr41xx/common/icu.c
arch/mips/vr41xx/common/irq.c
arch/mn10300/proc-mn103e010/include/proc/cache.h
arch/mn10300/proc-mn2ws0050/include/proc/cache.h
arch/openrisc/include/asm/cache.h
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/cache.h
arch/parisc/include/asm/elf.h
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/module.c
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/traps.c
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/cache.h
arch/powerpc/include/asm/elf.h
arch/powerpc/include/asm/exec.h
arch/powerpc/include/asm/kmap_types.h
arch/powerpc/include/asm/local.h
arch/powerpc/include/asm/mman.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/page_64.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/include/asm/pte-hash32.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/spinlock.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/usercopy_64.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/mmap.c
arch/powerpc/mm/slice.c
arch/powerpc/platforms/cell/celleb_scc_pciex.c
arch/powerpc/platforms/cell/spufs/file.c
arch/s390/include/asm/atomic.h
arch/s390/include/asm/barrier.h
arch/s390/include/asm/cache.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/exec.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/module.c
arch/s390/kernel/process.c
arch/s390/mm/mmap.c
arch/score/include/asm/cache.h
arch/score/include/asm/exec.h
arch/score/kernel/process.c
arch/sh/include/asm/cache.h
arch/sh/mm/mmap.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/barrier_64.h
arch/sparc/include/asm/cache.h
arch/sparc/include/asm/elf_32.h
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/include/asm/pgtable.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtsrmmu.h
arch/sparc/include/asm/setup.h
arch/sparc/include/asm/spinlock_64.h
arch/sparc/include/asm/thread_info_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/prom_common.c
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/syscalls.S
arch/sparc/kernel/traps_32.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/lib/Makefile
arch/sparc/lib/atomic_64.S
arch/sparc/lib/ksyms.c
arch/sparc/mm/Makefile
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/hugetlbpage.c
arch/sparc/mm/init_64.c
arch/tile/Kconfig
arch/tile/include/asm/atomic_64.h
arch/tile/include/asm/cache.h
arch/tile/include/asm/uaccess.h
arch/tile/mm/hugetlbpage.c
arch/um/Makefile
arch/um/include/asm/cache.h
arch/um/include/asm/kmap_types.h
arch/um/include/asm/page.h
arch/um/include/asm/pgtable-3level.h
arch/um/kernel/process.c
arch/unicore32/include/asm/cache.h
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/bitops.h
arch/x86/boot/boot.h
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/efi_stub_32.S
arch/x86/boot/compressed/efi_thunk_64.S
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/cpucheck.c
arch/x86/boot/header.S
arch/x86/boot/memory.c
arch/x86/boot/video-vesa.c
arch/x86/boot/video.c
arch/x86/crypto/aes-x86_64-asm_64.S
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/blowfish-x86_64-asm_64.S
arch/x86/crypto/camellia-aesni-avx-asm_64.S
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
arch/x86/crypto/camellia-x86_64-asm_64.S
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
arch/x86/crypto/ghash-clmulni-intel_asm.S
arch/x86/crypto/salsa20-x86_64-asm_64.S
arch/x86/crypto/serpent-avx-x86_64-asm_64.S
arch/x86/crypto/serpent-avx2-asm_64.S
arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
arch/x86/crypto/sha1_ssse3_asm.S
arch/x86/crypto/sha256-avx-asm.S
arch/x86/crypto/sha256-avx2-asm.S
arch/x86/crypto/sha256-ssse3-asm.S
arch/x86/crypto/sha512-avx-asm.S
arch/x86/crypto/sha512-avx2-asm.S
arch/x86/crypto/sha512-ssse3-asm.S
arch/x86/crypto/twofish-avx-x86_64-asm_64.S
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
arch/x86/crypto/twofish-x86_64-asm_64.S
arch/x86/ia32/ia32_aout.c
arch/x86/ia32/ia32_signal.c
arch/x86/ia32/ia32entry.S
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/apm.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/boot.h
arch/x86/include/asm/cache.h
arch/x86/include/asm/calling.h
arch/x86/include/asm/checksum_32.h
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/desc_defs.h
arch/x86/include/asm/div64.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/emergency-restart.h
arch/x86/include/asm/floppy.h
arch/x86/include/asm/fpu-internal.h
arch/x86/include/asm/futex.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/i8259.h
arch/x86/include/asm/io.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/local.h
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/module.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/page.h
arch/x86/include/asm/page_64.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/preempt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/qrwlock.h
arch/x86/include/asm/realmode.h
arch/x86/include/asm/reboot.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/segment.h
arch/x86/include/asm/smap.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/stackprotector.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/word-at-a-time.h
arch/x86/include/asm/x86_init.h
arch/x86/include/asm/xen/page.h
arch/x86/include/asm/xsave.h
arch/x86/include/uapi/asm/e820.h
arch/x86/include/uapi/asm/ptrace-abi.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/wakeup_32.S
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/p5.c
arch/x86/kernel/cpu/mcheck/winchip.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/mtrr/mtrr.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_iommu.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpuid.c
arch/x86/kernel/crash.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/doublefault.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/e820.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/espfix_64.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/i386_ksyms_32.c
arch/x86/kernel/i387.c
arch/x86/kernel/i8259.c
arch/x86/kernel/io_delay.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/jump_label.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/ksysfs.c
arch/x86/kernel/ldt.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/mcount_64.S
arch/x86/kernel/module.c
arch/x86/kernel/msr.c
arch/x86/kernel/nmi.c
arch/x86/kernel/nmi_selftest.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-iommu_table.c
arch/x86/kernel/pci-swiotlb.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/pvclock.c
arch/x86/kernel/reboot.c
arch/x86/kernel/reboot_fixups_32.c
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/signal.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/step.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kernel/tboot.c
arch/x86/kernel/time.c
arch/x86/kernel/tls.c
arch/x86/kernel/tracepoint.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/verify_cpu.S
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kernel/vsyscall_64.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/kernel/x86_init.c
arch/x86/kernel/xsave.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/atomic64_386_32.S
arch/x86/lib/atomic64_cx8_32.S
arch/x86/lib/checksum_32.S
arch/x86/lib/clear_page_64.S
arch/x86/lib/cmpxchg16b_emu.S
arch/x86/lib/copy_page_64.S
arch/x86/lib/copy_user_64.S
arch/x86/lib/copy_user_nocache_64.S
arch/x86/lib/csum-copy_64.S
arch/x86/lib/csum-wrappers_64.c
arch/x86/lib/getuser.S
arch/x86/lib/insn.c
arch/x86/lib/iomap_copy_64.S
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.S
arch/x86/lib/memset_64.S
arch/x86/lib/mmx_32.c
arch/x86/lib/msr-reg.S
arch/x86/lib/putuser.S
arch/x86/lib/rwsem.S
arch/x86/lib/thunk_64.S
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_64.c
arch/x86/mm/Makefile
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/mm/hugetlbpage.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/kmemcheck/kmemcheck.c
arch/x86/mm/mmap.c
arch/x86/mm/mmio-mod.c
arch/x86/mm/numa.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pat_rbtree.c
arch/x86/mm/pf_in.c
arch/x86/mm/pgtable.c
arch/x86/mm/pgtable_32.c
arch/x86/mm/physaddr.c
arch/x86/mm/setup_nx.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit.S
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/backtrace.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_amd.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/oprofile/op_x86_model.h
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/irq.c
arch/x86/pci/pcbios.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_32.S
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-mid/intel_mid_weak_decls.h
arch/x86/platform/intel-mid/mfld.c
arch/x86/platform/intel-mid/mrfl.c
arch/x86/platform/olpc/olpc_dt.c
arch/x86/power/cpu.c
arch/x86/realmode/init.c
arch/x86/realmode/rm/Makefile
arch/x86/realmode/rm/header.S
arch/x86/realmode/rm/trampoline_32.S
arch/x86/realmode/rm/trampoline_64.S
arch/x86/realmode/rm/wakeup_asm.S
arch/x86/tools/Makefile
arch/x86/tools/relocs.c
arch/x86/um/mem_32.c
arch/x86/um/tls_32.c
arch/x86/vdso/Makefile
arch/x86/vdso/vdso2c.h
arch/x86/vdso/vdso32-setup.c
arch/x86/vdso/vma.c
arch/x86/xen/Kconfig
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
arch/xtensa/variants/dc232b/include/variant/core.h
arch/xtensa/variants/fsf/include/variant/core.h
block/bio.c
block/blk-iopoll.c
block/blk-map.c
block/blk-softirq.c
block/bsg.c
block/compat_ioctl.c
block/genhd.c
block/partitions/efi.c
block/scsi_ioctl.c
crypto/cryptd.c
crypto/pcrypt.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/ghes.c
drivers/acpi/bgrt.c
drivers/acpi/blacklist.c
drivers/acpi/custom_method.c
drivers/acpi/device_pm.c
drivers/acpi/processor_idle.c
drivers/acpi/sysfs.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_arasan_cf.c
drivers/atm/adummy.c
drivers/atm/ambassador.c
drivers/atm/atmtcp.c
drivers/atm/eni.c
drivers/atm/firestream.c
drivers/atm/fore200e.c
drivers/atm/he.c
drivers/atm/horizon.c
drivers/atm/idt77252.c
drivers/atm/iphase.c
drivers/atm/lanai.c
drivers/atm/nicstar.c
drivers/atm/solos-pci.c
drivers/atm/suni.c
drivers/atm/uPD98402.c
drivers/atm/zatm.c
drivers/base/bus.c
drivers/base/devtmpfs.c
drivers/base/node.c
drivers/base/power/domain.c
drivers/base/power/sysfs.c
drivers/base/power/wakeup.c
drivers/base/syscore.c
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/cpqarray.c
drivers/block/cpqarray.h
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/block/loop.c
drivers/block/nvme-core.c
drivers/block/pktcdvd.c
drivers/block/rbd.c
drivers/block/smart1,2.h
drivers/bluetooth/btwilink.c
drivers/cdrom/cdrom.c
drivers/cdrom/gdrom.c
drivers/char/Kconfig
drivers/char/agp/compat_ioctl.c
drivers/char/agp/frontend.c
drivers/char/genrtc.c
drivers/char/hpet.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mem.c
drivers/char/nvram.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/random.c
drivers/char/sonypi.c
drivers/char/tpm/tpm_acpi.c
drivers/char/tpm/tpm_eventlog.c
drivers/char/virtio_console.c
drivers/clk/clk-composite.c
drivers/clk/socfpga/clk-gate.c
drivers/clk/socfpga/clk-pll.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpuidle/driver.c
drivers/cpuidle/governor.c
drivers/cpuidle/sysfs.c
drivers/crypto/hifn_795x.c
drivers/devfreq/devfreq.c
drivers/dma/sh/shdma-base.c
drivers/dma/sh/shdmac.c
drivers/edac/edac_device.c
drivers/edac/edac_mc_sysfs.c
drivers/edac/edac_pci.c
drivers/edac/edac_pci_sysfs.c
drivers/edac/mce_amd.h
drivers/firewire/core-card.c
drivers/firewire/core-device.c
drivers/firewire/core-transaction.c
drivers/firewire/core.h
drivers/firewire/ohci.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/efivars.c
drivers/firmware/google/memconsole.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-ich.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-vr41xx.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_global.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/mga/mga_ioc32.c
drivers/gpu/drm/mga/mga_irq.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_ioc32.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_debugfs.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/r128/r128_cce.c
drivers/gpu/drm/r128/r128_drv.h
drivers/gpu/drm/r128/r128_ioc32.c
drivers/gpu/drm/r128/r128_irq.c
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/mkregtable.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_ioc32.c
drivers/gpu/drm/radeon/radeon_irq.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-core.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-wiimote-debug.c
drivers/hv/channel.c
drivers/hv/hv.c
drivers/hv/hv_balloon.c
drivers/hv/hyperv_vmbus.h
drivers/hv/vmbus_drv.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/applesmc.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/coretemp.c
drivers/hwmon/ibmaem.c
drivers/hwmon/iio_hwmon.c
drivers/hwmon/nct6683.c
drivers/hwmon/nct6775.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/sht15.c
drivers/hwmon/via-cputemp.c
drivers/i2c/busses/i2c-amd756-s4882.c
drivers/i2c/busses/i2c-diolan-u2c.c
drivers/i2c/busses/i2c-nforce2-s4985.c
drivers/i2c/i2c-dev.c
drivers/ide/ide-cd.c
drivers/iio/industrialio-core.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_ruc.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_mgt.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
drivers/input/gameport/gameport.c
drivers/input/input.c
drivers/input/joystick/sidewinder.c
drivers/input/joystick/xpad.c
drivers/input/misc/ims-pcu.c
drivers/input/mouse/psmouse.h
drivers/input/mousedev.c
drivers/input/serio/serio.c
drivers/input/serio/serio_raw.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c
drivers/iommu/iommu.c
drivers/iommu/irq_remapping.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-renesas-intc-irqpin.c
drivers/irqchip/irq-renesas-irqc.c
drivers/isdn/capi/capi.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/gigaset/interface.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/isdn/gigaset/usb-gigaset.c
drivers/isdn/hardware/avm/b1.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_concap.c
drivers/isdn/i4l/isdn_tty.c
drivers/isdn/i4l/isdn_x25iface.c
drivers/isdn/icn/icn.c
drivers/isdn/mISDN/dsp_cmx.c
drivers/leds/leds-clevo-mail.c
drivers/leds/leds-ss4200.c
drivers/lguest/core.c
drivers/lguest/page_tables.c
drivers/lguest/x86/core.c
drivers/lguest/x86/switcher_32.S
drivers/md/bcache/closure.h
drivers/md/bitmap.c
drivers/md/dm-ioctl.c
drivers/md/dm-raid1.c
drivers/md/dm-stats.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/md.h
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/md/persistent-data/dm-space-map.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/af9033.h
drivers/media/dvb-frontends/dib3000.h
drivers/media/dvb-frontends/dib7000p.h
drivers/media/dvb-frontends/dib8000.h
drivers/media/pci/cx88/cx88-video.c
drivers/media/pci/ivtv/ivtv-driver.c
drivers/media/pci/solo6x10/solo6x10-core.c
drivers/media/pci/solo6x10/solo6x10-g723.c
drivers/media/pci/solo6x10/solo6x10-p2m.c
drivers/media/pci/solo6x10/solo6x10.h
drivers/media/pci/tw68/tw68-core.c
drivers/media/platform/omap/omap_vout.c
drivers/media/platform/s5p-tv/mixer.h
drivers/media/platform/s5p-tv/mixer_grp_layer.c
drivers/media/platform/s5p-tv/mixer_reg.c
drivers/media/platform/s5p-tv/mixer_video.c
drivers/media/platform/s5p-tv/mixer_vp_layer.c
drivers/media/radio/radio-cadet.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-shark.c
drivers/media/radio/radio-shark2.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/wl128x/fmdrv_common.c
drivers/media/usb/dvb-usb/cinergyT2-core.c
drivers/media/usb/dvb-usb/cinergyT2-fe.c
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/dvb-usb/technisat-usb2.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/media/v4l2-core/v4l2-device.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/memory/omap-gpmc.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptsas.c
drivers/message/i2o/i2o_proc.c
drivers/message/i2o/iop.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/max8925-i2c.c
drivers/mfd/tps65910.c
drivers/mfd/twl4030-irq.c
drivers/misc/c2port/core.c
drivers/misc/eeprom/sunxi_sid.c
drivers/misc/kgdbts.c
drivers/misc/lis3lv02d/lis3lv02d.c
drivers/misc/lis3lv02d/lis3lv02d.h
drivers/misc/sgi-gru/gruhandles.c
drivers/misc/sgi-gru/gruprocfs.c
drivers/misc/sgi-gru/grutables.h
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xp_main.c
drivers/misc/sgi-xp/xpc.h
drivers/misc/sgi-xp/xpc_main.c
drivers/mmc/card/block.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/mmci.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-s3c.c
drivers/mtd/chips/cfi_cmdset_0020.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nftlmount.c
drivers/mtd/sm_ftl.c
drivers/net/bonding/bond_netlink.c
drivers/net/caif/caif_hsi.c
drivers/net/can/Kconfig
drivers/net/can/dev.c
drivers/net/can/vcan.c
drivers/net/dummy.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bna_enet.c
drivers/net/ethernet/chelsio/cxgb3/l2t.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/rndis_filter.c
drivers/net/ifb.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/nlmon.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/usb/sierra_net.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/lmc/lmc_media.c
drivers/net/wan/z85230.c
drivers/net/wimax/i2400m/rx.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/b43/phy_lp.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/ti/wl1251/sdio.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/nfc/nfcwilink.c
drivers/nfc/st21nfca/st21nfca.c
drivers/of/fdt.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/event_buffer.c
drivers/oprofile/oprof.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofile_stats.c
drivers/oprofile/oprofile_stats.h
drivers/oprofile/oprofilefs.c
drivers/oprofile/timer_int.c
drivers/parport/procfs.c
drivers/pci/hotplug/acpiphp_ibm.c
drivers/pci/hotplug/cpcihp_generic.c
drivers/pci/hotplug/cpcihp_zt5550.c
drivers/pci/hotplug/cpqphp_nvram.c
drivers/pci/hotplug/pci_hotplug_core.c
drivers/pci/hotplug/pciehp_core.c
drivers/pci/msi.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.h
drivers/pci/pcie/aspm.c
drivers/pci/probe.c
drivers/pci/proc.c
drivers/platform/chrome/chromeos_laptop.c
drivers/platform/x86/alienware-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/msi-laptop.c
drivers/platform/x86/msi-wmi.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pnp/pnpbios/bioscalls.c
drivers/power/pda_power.c
drivers/power/power_supply.h
drivers/power/power_supply_core.c
drivers/power/power_supply_sysfs.c
drivers/powercap/powercap_sys.c
drivers/ptp/ptp_private.h
drivers/ptp/ptp_sysfs.c
drivers/regulator/core.c
drivers/regulator/max8660.c
drivers/regulator/max8973-regulator.c
drivers/regulator/mc13892-regulator.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-dev.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-m48t59.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_modules.h
drivers/scsi/fcoe/fcoe_sysfs.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/spi/spi.c
drivers/staging/android/timed_output.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/gdm724x/gdm_tty.c
drivers/staging/line6/driver.c
drivers/staging/line6/toneport.c
drivers/staging/lustre/lnet/selftest/brw_test.c
drivers/staging/lustre/lnet/selftest/framework.c
drivers/staging/lustre/lnet/selftest/ping_test.c
drivers/staging/lustre/lustre/include/lustre_dlm.h
drivers/staging/lustre/lustre/include/obd.h
drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
drivers/staging/lustre/lustre/libcfs/module.c
drivers/staging/octeon/ethernet-rx.c
drivers/staging/octeon/ethernet.c
drivers/staging/rtl8188eu/include/hal_intf.h
drivers/staging/rtl8712/rtl871x_io.h
drivers/staging/unisys/visorchipset/visorchipset.h
drivers/target/sbp/sbp_target.c
drivers/target/target_core_device.c
drivers/target/target_core_transport.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/of-thermal.c
drivers/tty/cyclades.c
drivers/tty/hvc/hvc_console.c
drivers/tty/hvc/hvcs.c
drivers/tty/hvc/hvsi.c
drivers/tty/hvc/hvsi_lib.c
drivers/tty/ipwireless/tty.c
drivers/tty/moxa.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/pty.c
drivers/tty/rocket.c
drivers/tty/serial/ioc4_serial.c
drivers/tty/serial/kgdb_nmi.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/sysrq.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/tty/tty_port.c
drivers/tty/vt/keyboard.c
drivers/uio/uio.c
drivers/usb/atm/cxacru.c
drivers/usb/atm/usbatm.c
drivers/usb/core/devices.c
drivers/usb/core/devio.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/core/sysfs.c
drivers/usb/core/usb.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/function/u_uac1.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/hwa-hc.c
drivers/usb/misc/appledisplay.c
drivers/usb/serial/console.c
drivers/usb/storage/usb.h
drivers/usb/usbip/vhci.h
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_rx.c
drivers/usb/wusbcore/wa-hc.h
drivers/usb/wusbcore/wa-xfer.c
drivers/vfio/vfio.c
drivers/vhost/net.c
drivers/vhost/vringh.c
drivers/video/backlight/kb3886_bl.c
drivers/video/fbdev/arcfb.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/mach64_cursor.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/hyperv_fb.c
drivers/video/fbdev/i810/i810_accel.c
drivers/video/fbdev/matrox/matroxfb_DAC1064.c
drivers/video/fbdev/matrox/matroxfb_Ti3026.c
drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
drivers/video/fbdev/nvidia/nvidia.c
drivers/video/fbdev/omap2/dss/display.c
drivers/video/fbdev/s1d13xxxfb.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/smscufx.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/uvesafb.c
drivers/video/fbdev/vesafb.c
drivers/video/fbdev/via/via_clock.h
drivers/video/logo/logo_linux_clut224.ppm
drivers/xen/xenfs/xenstored.c
fs/9p/vfs_addr.c
fs/9p/vfs_inode.c
fs/Kconfig.binfmt
fs/afs/inode.c
fs/aio.c
fs/attr.c
fs/autofs4/waitq.c
fs/befs/endian.h
fs/binfmt_aout.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/ctree.c
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/ioctl.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tree-log.h
fs/buffer.c
fs/cachefiles/bind.c
fs/cachefiles/daemon.c
fs/cachefiles/internal.h
fs/cachefiles/namei.c
fs/cachefiles/proc.c
fs/ceph/dir.c
fs/ceph/super.c
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/file.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/coda/cache.c
fs/compat.c
fs/compat_binfmt_elf.c
fs/compat_ioctl.c
fs/configfs/dir.c
fs/coredump.c
fs/dcache.c
fs/debugfs/inode.c
fs/ecryptfs/inode.c
fs/ecryptfs/miscdev.c
fs/exec.c
fs/ext2/balloc.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/balloc.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/mballoc.c
fs/ext4/mmp.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fcntl.c
fs/fhandle.c
fs/file.c
fs/filesystems.c
fs/fs_struct.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/object.c
fs/fscache/operation.c
fs/fscache/page.c
fs/fscache/stats.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/hostfs/hostfs_kern.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/jffs2/erase.c
fs/jffs2/wbuf.c
fs/jfs/super.c
fs/kernfs/dir.c
fs/kernfs/file.c
fs/kernfs/symlink.c
fs/libfs.c
fs/lockd/clntproc.c
fs/locks.c
fs/mount.h
fs/namei.c
fs/namespace.c
fs/nfs/callback_xdr.c
fs/nfs/inode.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfscache.c
fs/nfsd/vfs.c
fs/nls/nls_base.c
fs/nls/nls_euc-jp.c
fs/nls/nls_koi8-ru.c
fs/notify/fanotify/fanotify_user.c
fs/notify/notification.c
fs/ntfs/dir.c
fs/ntfs/file.c
fs/ntfs/super.c
fs/ocfs2/localalloc.c
fs/ocfs2/ocfs2.h
fs/ocfs2/suballoc.c
fs/ocfs2/super.c
fs/open.c
fs/pipe.c
fs/posix_acl.c
fs/proc/Kconfig
fs/proc/array.c
fs/proc/base.c
fs/proc/cmdline.c
fs/proc/devices.c
fs/proc/fd.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/interrupts.c
fs/proc/kcore.c
fs/proc/meminfo.c
fs/proc/nommu.c
fs/proc/proc_net.c
fs/proc/proc_sysctl.c
fs/proc/root.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/proc/vmcore.c
fs/qnx6/qnx6.h
fs/quota/netlink.c
fs/read_write.c
fs/readdir.c
fs/reiserfs/do_balan.c
fs/reiserfs/item_ops.c
fs/reiserfs/procfs.c
fs/reiserfs/reiserfs.h
fs/reiserfs/super.c
fs/select.c
fs/seq_file.c
fs/splice.c
fs/stat.c
fs/sysfs/dir.c
fs/sysv/sysv.h
fs/ubifs/io.c
fs/udf/misc.c
fs/ufs/swab.h
fs/utimes.c
fs/xattr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_linux.h
include/asm-generic/4level-fixup.h
include/asm-generic/atomic-long.h
include/asm-generic/atomic64.h
include/asm-generic/barrier.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls64.h
include/asm-generic/cache.h
include/asm-generic/emergency-restart.h
include/asm-generic/kmap_types.h
include/asm-generic/local.h
include/asm-generic/pgtable-nopmd.h
include/asm-generic/pgtable-nopud.h
include/asm-generic/pgtable.h
include/asm-generic/uaccess.h
include/asm-generic/vmlinux.lds.h
include/crypto/algapi.h
include/drm/drmP.h
include/drm/drm_crtc_helper.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_memory.h
include/drm/ttm/ttm_page_alloc.h
include/keys/asymmetric-subtype.h
include/linux/atmdev.h
include/linux/atomic.h
include/linux/audit.h
include/linux/binfmts.h
include/linux/bitmap.h
include/linux/bitops.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/cache.h
include/linux/capability.h
include/linux/cdrom.h
include/linux/cleancache.h
include/linux/clk-provider.h
include/linux/compat.h
include/linux/compiler-gcc4.h
include/linux/compiler-gcc5.h
include/linux/compiler.h
include/linux/completion.h
include/linux/configfs.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/cpumask.h
include/linux/cred.h
include/linux/crypto.h
include/linux/ctype.h
include/linux/dcache.h
include/linux/decompress/mm.h
include/linux/devfreq.h
include/linux/device.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/efi.h
include/linux/elf.h
include/linux/err.h
include/linux/extcon.h
include/linux/fb.h
include/linux/fdtable.h
include/linux/frontswap.h
include/linux/fs.h
include/linux/fs_struct.h
include/linux/fscache-cache.h
include/linux/fscache.h
include/linux/fsnotify.h
include/linux/genhd.h
include/linux/genl_magic_func.h
include/linux/gfp.h
include/linux/highmem.h
include/linux/hwmon-sysfs.h
include/linux/i2c.h
include/linux/i2o.h
include/linux/if_pppox.h
include/linux/init.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/iommu.h
include/linux/ioport.h
include/linux/ipc_namespace.h
include/linux/irq.h
include/linux/irqchip/arm-gic.h
include/linux/irqdesc.h
include/linux/jiffies.h
include/linux/kallsyms.h
include/linux/kernel.h
include/linux/key-type.h
include/linux/kgdb.h
include/linux/kmemleak.h
include/linux/kmod.h
include/linux/kobject.h
include/linux/kobject_ns.h
include/linux/kref.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/linkage.h
include/linux/list.h
include/linux/lockref.h
include/linux/math64.h
include/linux/mempolicy.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmiotrace.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/module.h
include/linux/moduleloader.h
include/linux/moduleparam.h
include/linux/mount.h
include/linux/namei.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/nfnetlink.h
include/linux/nls.h
include/linux/notifier.h
include/linux/oprofile.h
include/linux/padata.h
include/linux/path.h
include/linux/pci_hotplug.h
include/linux/percpu.h
include/linux/perf_event.h
include/linux/pid_namespace.h
include/linux/pipe_fs_i.h
include/linux/pm.h
include/linux/pm_domain.h
include/linux/pm_runtime.h
include/linux/pnp.h
include/linux/poison.h
include/linux/power/smartreflex.h
include/linux/ppp-comp.h
include/linux/preempt.h
include/linux/printk.h
include/linux/proc_fs.h
include/linux/proc_ns.h
include/linux/quota.h
include/linux/random.h
include/linux/rbtree_augmented.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/reboot.h
include/linux/regset.h
include/linux/relay.h
include/linux/rio.h
include/linux/rmap.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/security.h
include/linux/semaphore.h
include/linux/seq_file.h
include/linux/shm.h
include/linux/signal.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/slab_def.h
include/linux/slub_def.h
include/linux/smp.h
include/linux/sock_diag.h
include/linux/sonet.h
include/linux/sunrpc/addr.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/svcauth.h
include/linux/swiotlb.h
include/linux/syscalls.h
include/linux/syscore_ops.h
include/linux/sysctl.h
include/linux/sysfs.h
include/linux/sysrq.h
include/linux/thread_info.h
include/linux/tty.h
include/linux/tty_driver.h
include/linux/tty_ldisc.h
include/linux/types.h
include/linux/uaccess.h
include/linux/uidgid.h
include/linux/uio_driver.h
include/linux/unaligned/access_ok.h
include/linux/usb.h
include/linux/usb/renesas_usbhs.h
include/linux/user_namespace.h
include/linux/utsname.h
include/linux/vermagic.h
include/linux/vga_switcheroo.h
include/linux/vmalloc.h
include/linux/vmstat.h
include/linux/xattr.h
include/linux/zlib.h
include/media/v4l2-dev.h
include/media/v4l2-device.h
include/net/9p/transport.h
include/net/af_unix.h
include/net/bluetooth/l2cap.h
include/net/bonding.h
include/net/caif/cfctrl.h
include/net/flow.h
include/net/genetlink.h
include/net/gro_cells.h
include/net/inet_connection_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/irda/ircomm_tty.h
include/net/iucv/af_iucv.h
include/net/llc_c_ac.h
include/net/llc_c_ev.h
include/net/llc_c_st.h
include/net/llc_s_ac.h
include/net/llc_s_st.h
include/net/mac80211.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netns/xfrm.h
include/net/ping.h
include/net/protocol.h
include/net/rtnetlink.h
include/net/sctp/checksum.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
include/rdma/iw_cm.h
include/scsi/libfc.h
include/scsi/scsi_device.h
include/scsi/scsi_transport_fc.h
include/sound/compress_driver.h
include/sound/soc.h
include/target/target_core_base.h
include/trace/events/irq.h
include/uapi/linux/a.out.h
include/uapi/linux/bcache.h
include/uapi/linux/byteorder/little_endian.h
include/uapi/linux/elf.h
include/uapi/linux/personality.h
include/uapi/linux/screen_info.h
include/uapi/linux/swab.h
include/uapi/linux/xattr.h
include/video/udlfb.h
include/video/uvesafb.h
init/Kconfig
init/Makefile
init/do_mounts.c
init/do_mounts.h
init/do_mounts_initrd.c
init/do_mounts_md.c
init/init_task.c
init/initramfs.c
init/main.c
ipc/compat.c
ipc/ipc_sysctl.c
ipc/mq_sysctl.c
ipc/mqueue.c
ipc/shm.c
ipc/util.c
kernel/audit.c
kernel/auditsc.c
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/capability.c
kernel/cgroup.c
kernel/compat.c
kernel/configs.c
kernel/cred.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_main.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/futex_compat.c
kernel/gcov/base.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/kcmp.c
kernel/kexec.c
kernel/kmod.c
kernel/kprobes.c
kernel/ksysfs.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/locking/mcs_spinlock.c
kernel/locking/mcs_spinlock.h
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/rtmutex-tester.c
kernel/module.c
kernel/notifier.c
kernel/padata.c
kernel/panic.c
kernel/pid.c
kernel/pid_namespace.c
kernel/power/Kconfig
kernel/power/process.c
kernel/printk/console_cmdline.h
kernel/printk/printk.c
kernel/profile.c
kernel/ptrace.c
kernel/rcu/rcutorture.c
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/resource.c
kernel/sched/auto_group.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/seccomp.c
kernel/signal.c
kernel/smpboot.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/taskstats.c
kernel/time/alarmtimer.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
kernel/time/time.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/timer_list.c
kernel/time/timer_stats.c
kernel/torture.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_clock.c
kernel/trace/trace_events.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_mmiotrace.c
kernel/trace/trace_output.c
kernel/trace/trace_seq.c
kernel/trace/trace_stack.c
kernel/trace/trace_syscalls.c
kernel/user_namespace.c
kernel/utsname_sysctl.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/average.c
lib/bitmap.c
lib/bug.c
lib/debugobjects.c
lib/div64.c
lib/dma-debug.c
lib/inflate.c
lib/ioremap.c
lib/is_single_threaded.c
lib/kobject.c
lib/list_debug.c
lib/lockref.c
lib/percpu-refcount.c
lib/radix-tree.c
lib/random32.c
lib/rbtree.c
lib/show_mem.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/swiotlb.c
lib/usercopy.c
lib/vsprintf.c
mm/Kconfig
mm/backing-dev.c
mm/filemap.c
mm/fremap.c
mm/gup.c
mm/highmem.c
mm/hugetlb.c
mm/internal.h
mm/kmemleak.c
mm/maccess.c
mm/madvise.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/mremap.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/process_vm_access.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/sparse-vmemmap.c
mm/sparse.c
mm/swap.c
mm/swapfile.c
mm/util.c
mm/vmalloc.c
mm/vmstat.c
net/8021q/vlan.c
net/8021q/vlan_netlink.c
net/9p/client.c
net/9p/mod.c
net/9p/trans_fd.c
net/appletalk/atalk_proc.c
net/atm/atm_misc.c
net/atm/lec.c
net/atm/lec.h
net/atm/mpoa_caches.c
net/atm/proc.c
net/atm/resources.c
net/ax25/sysctl_net_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/fragmentation.c
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/bridge/br.c
net/bridge/br_netlink.c
net/bridge/netfilter/ebtables.c
net/caif/cfctrl.c
net/caif/chnl_net.c
net/can/af_can.c
net/can/bcm.c
net/can/gw.c
net/can/proc.c
net/ceph/messenger.c
net/compat.c
net/core/datagram.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/filter.c
net/core/flow.c
net/core/neighbour.c
net/core/net-procfs.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/scm.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/sysctl_net_core.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/sysctl_net_decnet.c
net/hsr/hsr_netlink.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/reassembly.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/inetpeer.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_probe.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/icmp.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/ping.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_policy.c
net/ipx/ipx_proc.c
net/irda/ircomm/ircomm_tty.c
net/irda/irproc.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/key/af_key.c
net/l2tp/l2tp_eth.c
net/llc/llc_proc.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/pm.c
net/mac80211/rate.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_acct.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_timestamp.c
net/netfilter/nf_log.c
net/netfilter/nf_sockopt.c
net/netfilter/nfnetlink_log.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_recent.c
net/netfilter/xt_statistic.c
net/netlink/af_netlink.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/phonet/pep.c
net/phonet/socket.c
net/phonet/sysctl.c
net/rds/cong.c
net/rds/ib.h
net/rds/ib_cm.c
net/rds/ib_recv.c
net/rds/iw.h
net/rds/iw_cm.c
net/rds/iw_rdma.c
net/rds/iw_recv.c
net/rds/rds.h
net/rds/tcp.c
net/rds/tcp_send.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-call.c
net/rxrpc/ar-connection.c
net/rxrpc/ar-connevent.c
net/rxrpc/ar-input.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-local.c
net/rxrpc/ar-output.c
net/rxrpc/ar-peer.c
net/rxrpc/ar-proc.c
net/rxrpc/ar-transport.c
net/rxrpc/rxkad.c
net/sched/sch_generic.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/sysctl.c
net/socket.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/sched.c
net/sunrpc/stats.c
net/sunrpc/svc.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprtrdma/svc_rdma.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sysctl_net.c
net/tipc/subscr.c
net/unix/af_unix.c
net/unix/sysctl_net_unix.c
net/vmw_vsock/vmci_transport_notify.c
net/vmw_vsock/vmci_transport_notify_qstate.c
net/wireless/wext-core.c
net/x25/sysctl_net_x25.c
net/x25/x25_proc.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_sysctl.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.host
scripts/basic/fixdep.c
scripts/headers_install.sh
scripts/link-vmlinux.sh
scripts/mod/file2alias.c
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/mod/sumversion.c
scripts/module-common.lds
scripts/package/Makefile
scripts/package/builddeb
scripts/package/mkspec
scripts/pnmtologo.c
scripts/sortextable.h
scripts/tags.sh
security/Kconfig
security/apparmor/file.c
security/apparmor/lsm.c
security/commoncap.c
security/integrity/ima/ima.h
security/integrity/ima/ima_api.c
security/integrity/ima/ima_fs.c
security/integrity/ima/ima_queue.c
security/keys/compat.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/request_key.c
security/min_addr.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/xfrm.h
security/smack/smack_lsm.c
security/tomoyo/file.c
security/tomoyo/mount.c
security/tomoyo/tomoyo.c
security/yama/Kconfig
security/yama/yama_lsm.c
sound/aoa/codecs/onyx.c
sound/aoa/codecs/onyx.h
sound/core/oss/pcm_oss.c
sound/core/pcm_compat.c
sound/core/pcm_native.c
sound/core/seq/oss/seq_oss.c
sound/core/seq/seq_device.c
sound/core/seq/seq_midi.c
sound/core/sound.c
sound/drivers/mts64.c
sound/drivers/opl3/opl3_seq.c
sound/drivers/opl4/opl4_lib.c
sound/drivers/opl4/opl4_seq.c
sound/drivers/portman2x4.c
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/isight.c
sound/firewire/scs1x.c
sound/isa/sb/emu8000_synth.c
sound/oss/sb_audio.c
sound/oss/swarm_cs4297a.c
sound/pci/emu10k1/emu10k1_synth.c
sound/pci/hda/hda_codec.c
sound/pci/ymfpci/ymfpci.h
sound/pci/ymfpci/ymfpci_main.c
sound/soc/soc-ac97.c
sound/synth/emux/emux_seq.c
tools/include/linux/compiler.h
tools/lib/api/Makefile
tools/perf/util/include/asm/alternative-asm.h
tools/virtio/linux/uaccess.h
virt/kvm/kvm_main.c

index 9de9813d0ec5df101a48428d40cfc9b9d2df6142..1462492c128fdca5b149af5beb43da587d12c581 100644 (file)
@@ -3,9 +3,11 @@
 *.bc
 *.bin
 *.bz2
+*.c.[012]*.*
 *.cis
 *.cpio
 *.csp
+*.dbg
 *.dsp
 *.dvi
 *.elf
@@ -15,6 +17,7 @@
 *.gcov
 *.gen.S
 *.gif
+*.gmo
 *.grep
 *.grp
 *.gz
 *.tab.h
 *.tex
 *.ver
+*.vim
 *.xml
 *.xz
 *_MODULES
+*_reg_safe.h
 *_vga16.c
 *~
 \#*#
 *.9
-.*
+.[^g]*
+.gen*
 .*.d
 .mm
 53c700_d.h
@@ -72,9 +78,11 @@ Image
 Module.markers
 Module.symvers
 PENDING
+PERF*
 SCCS
 System.map*
 TAGS
+TRACEEVENT-CFLAGS
 aconf
 af_names.h
 aic7*reg.h*
@@ -83,6 +91,7 @@ aic7*seq.h*
 aicasm
 aicdb.h*
 altivec*.c
+ashldi3.S
 asm-offsets.h
 asm_offsets.h
 autoconf.h*
@@ -95,32 +104,40 @@ bounds.h
 bsetup
 btfixupprep
 build
+builtin-policy.h
 bvmlinux
 bzImage*
 capability_names.h
 capflags.c
 classlist.h*
+clut_vga16.c
+common-cmds.h
 comp*.log
 compile.h*
 conf
 config
 config-*
 config_data.h*
+config.c
 config.mak
 config.mak.autogen
+config.tmp
 conmakehash
 consolemap_deftbl.c*
 cpustr.h
 crc32table.h*
 cscope.*
 defkeymap.c
+devicetable-offsets.h
 devlist.h*
 dnotify_test
 docproc
 dslm
+dtc-lexer.lex.c
 elf2ecoff
 elfconfig.h*
 evergreen_reg_safe.h
+exception_policy.conf
 fixdep
 flask.h
 fore200e_mkfirm
@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
 gconf
 gconf.glade.h
 gen-devlist
+gen-kdb_cmds.c
 gen_crc32table
 gen_init_cpio
 generated
 genheaders
 genksyms
 *_gray256.c
+hash
+hid-example
 hpet_example
 hugepage-mmap
 hugepage-shm
@@ -148,14 +168,14 @@ int32.c
 int4.c
 int8.c
 kallsyms
-kconfig
+kern_constants.h
 keywords.c
 ksym.c*
 ksym.h*
 kxgettext
 lex.c
 lex.*.c
-linux
+lib1funcs.S
 logo_*.c
 logo_*_clut224.c
 logo_*_mono.c
@@ -165,14 +185,15 @@ mach-types.h
 machtypes.h
 map
 map_hugetlb
-media
 mconf
+mdp
 miboot*
 mk_elfconfig
 mkboot
 mkbugboot
 mkcpustr
 mkdep
+mkpiggy
 mkprep
 mkregtable
 mktables
@@ -188,6 +209,8 @@ oui.c*
 page-types
 parse.c
 parse.h
+parse-events*
+pasyms.h
 patches*
 pca200e.bin
 pca200e_ecd.bin2
@@ -197,6 +220,7 @@ perf-archive
 piggyback
 piggy.gzip
 piggy.S
+pmu-*
 pnmtologo
 ppc_defs.h*
 pss_boot.h
@@ -206,7 +230,12 @@ r200_reg_safe.h
 r300_reg_safe.h
 r420_reg_safe.h
 r600_reg_safe.h
+randomize_layout_hash.h
+randomize_layout_seed.h
+realmode.lds
+realmode.relocs
 recordmcount
+regdb.c
 relocs
 rlim_names.h
 rn50_reg_safe.h
@@ -216,8 +245,12 @@ series
 setup
 setup.bin
 setup.elf
+signing_key*
+size_overflow_hash.h
 sImage
+slabinfo
 sm_tbl*
+sortextable
 split-include
 syscalltab.h
 tables.c
@@ -227,6 +260,7 @@ tftpboot.img
 timeconst.h
 times.h*
 trix_boot.h
+user_constants.h
 utsrelease.h*
 vdso-syms.lds
 vdso.lds
@@ -238,13 +272,17 @@ vdso32.lds
 vdso32.so.dbg
 vdso64.lds
 vdso64.so.dbg
+vdsox32.lds
+vdsox32-syms.lds
 version.h*
 vmImage
 vmlinux
 vmlinux-*
 vmlinux.aout
 vmlinux.bin.all
+vmlinux.bin.bz2
 vmlinux.lds
+vmlinux.relocs
 vmlinuz
 voffset.h
 vsyscall.lds
@@ -252,9 +290,12 @@ vsyscall_32.lds
 wanxlfw.inc
 uImage
 unifdef
+utsrelease.h
 wakeup.bin
 wakeup.elf
 wakeup.lds
+x509*
 zImage*
 zconf.hash.c
+zconf.lex.c
 zoffset.h
index a311db829e9bb6a819bc293404d65d5c3584a741..415b28ce3dda15b2cfe29ca8ecf60e55c6d27a7b 100644 (file)
@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
        === 4 Host Program support
           --- 4.1 Simple Host Program
           --- 4.2 Composite Host Programs
-          --- 4.3 Using C++ for host programs
-          --- 4.4 Controlling compiler options for host programs
-          --- 4.5 When host programs are actually built
-          --- 4.6 Using hostprogs-$(CONFIG_FOO)
+          --- 4.3 Defining shared libraries
+          --- 4.4 Using C++ for host programs
+          --- 4.5 Controlling compiler options for host programs
+          --- 4.6 When host programs are actually built
+          --- 4.7 Using hostprogs-$(CONFIG_FOO)
 
        === 5 Kbuild clean infrastructure
 
@@ -642,7 +643,29 @@ Both possibilities are described in the following.
        Finally, the two .o files are linked to the executable, lxdialog.
        Note: The syntax <executable>-y is not permitted for host-programs.
 
---- 4.3 Using C++ for host programs
+--- 4.3 Defining shared libraries
+
+       Objects with extension .so are considered shared libraries, and
+       will be compiled as position independent objects.
+       Kbuild provides support for shared libraries, but the usage
+       shall be restricted.
+       In the following example the libkconfig.so shared library is used
+       to link the executable conf.
+
+       Example:
+               #scripts/kconfig/Makefile
+               hostprogs-y     := conf
+               conf-objs       := conf.o libkconfig.so
+               libkconfig-objs := expr.o type.o
+
+       Shared libraries always require a corresponding -objs line, and
+       in the example above the shared library libkconfig is composed by
+       the two objects expr.o and type.o.
+       expr.o and type.o will be built as position independent code and
+       linked as a shared library libkconfig.so. C++ is not supported for
+       shared libraries.
+
+--- 4.4 Using C++ for host programs
 
        kbuild offers support for host programs written in C++. This was
        introduced solely to support kconfig, and is not recommended
@@ -665,7 +688,7 @@ Both possibilities are described in the following.
                qconf-cxxobjs := qconf.o
                qconf-objs    := check.o
 
---- 4.4 Controlling compiler options for host programs
+--- 4.5 Controlling compiler options for host programs
 
        When compiling host programs, it is possible to set specific flags.
        The programs will always be compiled utilising $(HOSTCC) passed
@@ -693,7 +716,7 @@ Both possibilities are described in the following.
        When linking qconf, it will be passed the extra option
        "-L$(QTDIR)/lib".
 
---- 4.5 When host programs are actually built
+--- 4.6 When host programs are actually built
 
        Kbuild will only build host-programs when they are referenced
        as a prerequisite.
@@ -724,7 +747,7 @@ Both possibilities are described in the following.
        This will tell kbuild to build lxdialog even if not referenced in
        any rule.
 
---- 4.6 Using hostprogs-$(CONFIG_FOO)
+--- 4.7 Using hostprogs-$(CONFIG_FOO)
 
        A typical pattern in a Kbuild file looks like this:
 
index 176d4fe4f076be0c785de56ade5cbca399e591e2..17ceefa3db3670b3fdac18b338bc45ddc0c5cace 100644 (file)
@@ -1191,6 +1191,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
                        Default: 1024
 
+       grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
+                       ignore grsecurity's /proc restrictions
+
+
        hashdist=       [KNL,NUMA] Large hashes allocated during boot
                        are distributed across NUMA nodes.  Defaults on
                        for 64-bit NUMA, off otherwise.
@@ -2283,6 +2287,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        noexec=on: enable non-executable mappings (default)
                        noexec=off: disable non-executable mappings
 
+       nopcid          [X86-64]
+                       Disable PCID (Process-Context IDentifier) even if it
+                       is supported by the processor.
+
        nosmap          [X86]
                        Disable SMAP (Supervisor Mode Access Prevention)
                        even if it is supported by processor.
@@ -2584,6 +2592,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        the specified number of seconds.  This is to be used if
                        your oopses keep scrolling off the screen.
 
+       pax_nouderef    [X86] disables UDEREF.  Most likely needed under certain
+                       virtualization environments that don't cope well with the
+                       expand down segment used by UDEREF on X86-32 or the frequent
+                       page table updates on X86-64.
+
+       pax_sanitize_slab=
+                       Format: { 0 | 1 | off | fast | full }
+                       Options '0' and '1' are only provided for backward
+                       compatibility, 'off' or 'fast' should be used instead.
+                       0|off : disable slab object sanitization
+                       1|fast: enable slab object sanitization excluding
+                               whitelisted slabs (default)
+                       full  : sanitize all slabs, even the whitelisted ones
+
+       pax_softmode=   0/1 to disable/enable PaX softmode on boot already.
+
+       pax_extra_latent_entropy
+                       Enable a very simple form of latent entropy extraction
+                       from the first 4GB of memory as the bootmem allocator
+                       passes the memory pages to the buddy allocator.
+
+       pax_weakuderef  [X86-64] enables the weaker but faster form of UDEREF
+                       when the processor supports PCID.
+
        pcbit=          [HW,ISDN]
 
        pcd.            [PARIDE]
index e49665a2b5ac2e868cd0b00f57a9bef05470ac26..7c65470d729298e9e8fc30e7e77162e3bb4ebada 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
 HOSTCC       = gcc
 HOSTCXX      = g++
 HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
-HOSTCXXFLAGS = -O2
+HOSTCFLAGS   = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
+HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
 
 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
 HOSTCFLAGS  += -Wno-unused-value -Wno-unused-parameter \
@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
 # Rules shared between *config targets and build targets
 
 # Basic helpers built in scripts/
-PHONY += scripts_basic
-scripts_basic:
+PHONY += scripts_basic gcc-plugins
+scripts_basic: gcc-plugins
        $(Q)$(MAKE) $(build)=scripts/basic
        $(Q)rm -f .tmp_quiet_recordmcount
 
@@ -622,6 +624,72 @@ endif
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
+ifndef DISABLE_PAX_PLUGINS
+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
+else
+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
+endif
+ifneq ($(PLUGINCC),)
+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
+endif
+ifdef CONFIG_PAX_MEMORY_STACKLEAK
+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
+endif
+ifdef CONFIG_KALLOCSTAT_PLUGIN
+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
+endif
+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
+endif
+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
+endif
+endif
+ifdef CONFIG_CHECKER_PLUGIN
+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
+endif
+endif
+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
+ifdef CONFIG_PAX_SIZE_OVERFLOW
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
+ifdef CONFIG_PAX_LATENT_ENTROPY
+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
+endif
+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
+endif
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
+ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+       $(Q)$(MAKE) $(build)=tools/gcc
+else
+gcc-plugins: ;
+endif
+else
+gcc-plugins:
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
+       $(error Your gcc installation does not support plugins.  If the necessary headers for plugin support are missing, they should be installed.  On Debian, apt-get install gcc-<ver>-plugin-dev.  If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
+else
+       $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
+endif
+       $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure.  PAX_SIZE_OVERFLOW will not be active."
+endif
+endif
+
 ifdef CONFIG_READABLE_ASM
 # Disable optimizations that make assembler listings hard to read.
 # reorder blocks reorders the control in the function
@@ -714,7 +782,7 @@ KBUILD_CFLAGS   += $(call cc-option, -gsplit-dwarf, -g)
 else
 KBUILD_CFLAGS  += -g
 endif
-KBUILD_AFLAGS  += -Wa,-gdwarf-2
+KBUILD_AFLAGS  += -Wa,--gdwarf-2
 endif
 ifdef CONFIG_DEBUG_INFO_DWARF4
 KBUILD_CFLAGS  += $(call cc-option, -gdwarf-4,)
@@ -879,7 +947,7 @@ export mod_sign_cmd
 
 
 ifeq ($(KBUILD_EXTMOD),)
-core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
 
 vmlinux-dirs   := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
                     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@@ -926,6 +994,8 @@ endif
 
 # The actual objects are generated when descending,
 # make sure no implicit rule kicks in
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 # Error messages still appears in the original language
 
 PHONY += $(vmlinux-dirs)
-$(vmlinux-dirs): prepare scripts
+$(vmlinux-dirs): gcc-plugins prepare scripts
        $(Q)$(MAKE) $(build)=$@
 
 define filechk_kernel.release
@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
 
 archprepare: archheaders archscripts prepare1 scripts_basic
 
+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 prepare0: archprepare FORCE
        $(Q)$(MAKE) $(build)=.
 
 # All the preparing..
+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
 prepare: prepare0
 
 # Generate some files
@@ -1095,6 +1168,8 @@ all: modules
 # using awk while concatenating to the final file.
 
 PHONY += modules
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
        $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
        @$(kecho) '  Building modules, stage 2.';
@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
 
 # Target to prepare building external modules
 PHONY += modules_prepare
-modules_prepare: prepare scripts
+modules_prepare: gcc-plugins prepare scripts
 
 # Target to install modules
 PHONY += modules_install
@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
                  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
                  signing_key.priv signing_key.x509 x509.genkey         \
                  extra_certificates signing_key.x509.keyid             \
-                 signing_key.x509.signer
+                 signing_key.x509.signer \
+                 tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
+                 tools/gcc/size_overflow_plugin/size_overflow_hash.h \
+                 tools/gcc/randomize_layout_seed.h
 
 # clean - Delete most, but leave enough to build external modules
 #
@@ -1215,7 +1293,7 @@ distclean: mrproper
        @find $(srctree) $(RCS_FIND_IGNORE) \
                \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
                -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-               -o -name '.*.rej' -o -name '*%'  -o -name 'core' \) \
+               -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
                -type f -print | xargs rm -f
 
 
@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
 $(module-dirs): crmodverdir $(objtree)/Module.symvers
        $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
 
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 modules: $(module-dirs)
        @$(kecho) '  Building modules, stage 2.';
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
@@ -1521,17 +1601,21 @@ else
         target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
 endif
 
-%.s: %.c prepare scripts FORCE
+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.s: %.c gcc-plugins prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.i: %.c prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.c prepare scripts FORCE
+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.o: %.c gcc-plugins prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.lst: %.c prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.s: %.S prepare scripts FORCE
+%.s: %.S gcc-plugins prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.S prepare scripts FORCE
+%.o: %.S gcc-plugins prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.symtypes: %.c prepare scripts FORCE
        $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
@@ -1543,11 +1627,15 @@ endif
        $(build)=$(build-dir)
 # Make sure the latest headers are built for Documentation
 Documentation/: headers_install
-%/: prepare scripts FORCE
+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%/: gcc-plugins prepare scripts FORCE
        $(cmd_crmodverdir)
        $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
        $(build)=$(build-dir)
-%.ko: prepare scripts FORCE
+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.ko: gcc-plugins prepare scripts FORCE
        $(cmd_crmodverdir)
        $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1)   \
        $(build)=$(build-dir) $(@:.ko=.o)
index 8f8eafbedd7c2970ec1dbd403ec2e3d135079927..3405f46f50df28f3573cb3869cc0fa4fdfcb6eef 100644 (file)
@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic_dec(v) atomic_sub(1,(v))
 #define atomic64_dec(v) atomic64_sub(1,(v))
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 #endif /* _ALPHA_ATOMIC_H */
index ad368a93a46a6c5c7e1bf0d25ab17f08ebc19fd8..fbe0f2514a1ba07bc3992238d65defccefd99d7b 100644 (file)
@@ -4,19 +4,19 @@
 #ifndef __ARCH_ALPHA_CACHE_H
 #define __ARCH_ALPHA_CACHE_H
 
+#include <linux/const.h>
 
 /* Bytes per L1 (data) cache line. */
 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
-# define L1_CACHE_BYTES     64
 # define L1_CACHE_SHIFT     6
 #else
 /* Both EV4 and EV5 are write-through, read-allocate,
    direct-mapped, physical.
 */
-# define L1_CACHE_BYTES     32
 # define L1_CACHE_SHIFT     5
 #endif
 
+#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES    L1_CACHE_BYTES
 
 #endif
index 968d9991f5ee2b06968c16359ce5618ce304e3c6..d36b2df546413088c4f4d5ddcb4cf746ea697f47 100644 (file)
@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 #define ELF_ET_DYN_BASE                (TASK_UNMAPPED_BASE + 0x1000000)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
+
+#define PAX_DELTA_MMAP_LEN     (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
+#define PAX_DELTA_STACK_LEN    (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
+#endif
+
 /* $0 is set by ld.so to a pointer to a function which might be 
    registered using atexit.  This provides a mean for the dynamic
    linker to call DT_FINI functions for shared libraries that have
index aab14a019c20f82c7cc60084026b120727971214..b4fa3e77b739652ac917858f02c9e5740fc8151a 100644 (file)
@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
        pgd_set(pgd, pmd);
 }
 
+static inline void
+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+       pgd_populate(mm, pgd, pmd);
+}
+
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 
 static inline void
index d8f9b7e892348359837e34b558a7a28ae121530c..f6222fa8d7feb800b93573dabbf283ef0cb71dd5 100644 (file)
@@ -102,6 +102,17 @@ struct vm_area_struct;
 #define PAGE_SHARED    __pgprot(_PAGE_VALID | __ACCESS_BITS)
 #define PAGE_COPY      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 #define PAGE_READONLY  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
+# define PAGE_COPY_NOEXEC      __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+# define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+#else
+# define PAGE_SHARED_NOEXEC    PAGE_SHARED
+# define PAGE_COPY_NOEXEC      PAGE_COPY
+# define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL    __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 
 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
index 2fd00b7077e41f17c654fac08c732f2ed62e1b1b..cfd50696e7de75a6a3c9a84be35cbdf81448b6fd 100644 (file)
@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
 
        /* The small sections were sorted to the end of the segment.
           The following should definitely cover them.  */
-       gp = (u64)me->module_core + me->core_size - 0x8000;
+       gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
        got = sechdrs[me->arch.gotsecindex].sh_addr;
 
        for (i = 0; i < n; i++) {
index e51f578636a5718d4f0e438b90b4b78a12b6b7da..16c64a3c8939ca3e9e0c7e6afd8a28d9085b6053 100644 (file)
@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
 
 static unsigned long
-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
-                        unsigned long limit)
+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
+                        unsigned long limit, unsigned long flags)
 {
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
        info.flags = 0;
        info.length = len;
@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
        info.high_limit = limit;
        info.align_mask = 0;
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
           merely specific addresses, but regions of memory -- perhaps
           this feature should be incorporated into all ports?  */
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
-               addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+               addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
                if (addr != (unsigned long) -ENOMEM)
                        return addr;
        }
 
        /* Next, try allocating at TASK_UNMAPPED_BASE.  */
-       addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
-                                        len, limit);
+       addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
+
        if (addr != (unsigned long) -ENOMEM)
                return addr;
 
        /* Finally, try allocating in low memory.  */
-       addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
+       addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
 
        return addr;
 }
index 9d0ac091a52a7d16cf1f78f402ab48c511924a24..479a962dafe32fc88f9cc005cec13cba87834118 100644 (file)
@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
        __reload_thread(pcb);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+       int err;
+
+       do { /* PaX: patched PLT emulation #1 */
+               unsigned int ldah, ldq, jmp;
+
+               err = get_user(ldah, (unsigned int *)regs->pc);
+               err |= get_user(ldq, (unsigned int *)(regs->pc+4));
+               err |= get_user(jmp, (unsigned int *)(regs->pc+8));
+
+               if (err)
+                       break;
+
+               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
+                   (ldq & 0xFFFF0000U) == 0xA77B0000U &&
+                   jmp == 0x6BFB0000U)
+               {
+                       unsigned long r27, addr;
+                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+                       unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
+
+                       addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
+                       err = get_user(r27, (unsigned long *)addr);
+                       if (err)
+                               break;
+
+                       regs->r27 = r27;
+                       regs->pc = r27;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #2 */
+               unsigned int ldah, lda, br;
+
+               err = get_user(ldah, (unsigned int *)regs->pc);
+               err |= get_user(lda, (unsigned int *)(regs->pc+4));
+               err |= get_user(br, (unsigned int *)(regs->pc+8));
+
+               if (err)
+                       break;
+
+               if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
+                   (lda & 0xFFFF0000U) == 0xA77B0000U &&
+                   (br & 0xFFE00000U) == 0xC3E00000U)
+               {
+                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
+                       unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+                       unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
+
+                       regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
+                       regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: unpatched PLT emulation */
+               unsigned int br;
+
+               err = get_user(br, (unsigned int *)regs->pc);
+
+               if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
+                       unsigned int br2, ldq, nop, jmp;
+                       unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
+
+                       addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+                       err = get_user(br2, (unsigned int *)addr);
+                       err |= get_user(ldq, (unsigned int *)(addr+4));
+                       err |= get_user(nop, (unsigned int *)(addr+8));
+                       err |= get_user(jmp, (unsigned int *)(addr+12));
+                       err |= get_user(resolver, (unsigned long *)(addr+16));
+
+                       if (err)
+                               break;
+
+                       if (br2 == 0xC3600000U &&
+                           ldq == 0xA77B000CU &&
+                           nop == 0x47FF041FU &&
+                           jmp == 0x6B7B0000U)
+                       {
+                               regs->r28 = regs->pc+4;
+                               regs->r27 = addr+16;
+                               regs->pc = resolver;
+                               return 3;
+                       }
+               }
+       } while (0);
+#endif
+
+       return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 5; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
 
 /*
  * This routine handles page faults.  It determines the address,
@@ -133,8 +251,29 @@ retry:
  good_area:
        si_code = SEGV_ACCERR;
        if (cause < 0) {
-               if (!(vma->vm_flags & VM_EXEC))
+               if (!(vma->vm_flags & VM_EXEC)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+                       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
+                               goto bad_area;
+
+                       up_read(&mm->mmap_sem);
+                       switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+                       case 2:
+                       case 3:
+                               return;
+#endif
+
+                       }
+                       pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
+                       do_group_exit(SIGKILL);
+#else
                        goto bad_area;
+#endif
+
+               }
        } else if (!cause) {
                /* Allow reads even for write-only mappings */
                if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
index 97d07ed60a0b7b01cad35891c7695a2d3d4a1640..2931f2bca6ebe83d95bc0be2b806af4e6772dca1 100644 (file)
@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
 
 config UACCESS_WITH_MEMCPY
        bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
-       depends on MMU
+       depends on MMU && !PAX_MEMORY_UDEREF
        default y if CPU_FEROCEON
        help
          Implement faster copy_to_user and clear_user methods for CPU
@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
 config KEXEC
        bool "Kexec system call (EXPERIMENTAL)"
        depends on (!SMP || PM_SLEEP_SMP)
+       depends on !GRKERNSEC_KMEM
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index e22c11970b7bd278e5a030a364c2b66ba3572e6f..eaa807d4e60fac7f2ce10c8b34d814a59cb185e1 100644 (file)
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
 #define ATOMIC_INIT(i) { (i) }
 
 #ifdef __KERNEL__
 
+#ifdef CONFIG_THUMB2_KERNEL
+#define REFCOUNT_TRAP_INSN "bkpt       0xf1"
+#else
+#define REFCOUNT_TRAP_INSN "bkpt       0xf103"
+#endif
+
+#define _ASM_EXTABLE(from, to)         \
+"      .pushsection __ex_table,\"a\"\n"\
+"      .align  3\n"                    \
+"      .long   " #from ", " #to"\n"    \
+"      .popsection"
+
 /*
  * On ARM, ordinary assignment (str instruction) doesn't clear the local
  * strex/ldrex monitor on some implementations. The reason we can use it for
  * atomic_set() is the clrex or dummy strex done on every exception return.
  */
 #define atomic_read(v) ACCESS_ONCE((v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
 #define atomic_set(v,i)        (((v)->counter) = (i))
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+       v->counter = i;
+}
 
 #if __LINUX_ARM_ARCH__ >= 6
 
  * to ensure that the update happens.
  */
 
-#define ATOMIC_OP(op, c_op, asm_op)                                    \
-static inline void atomic_##op(int i, atomic_t *v)                     \
+#ifdef CONFIG_PAX_REFCOUNT
+#define __OVERFLOW_POST                        \
+       "       bvc     3f\n"           \
+       "2:     " REFCOUNT_TRAP_INSN "\n"\
+       "3:\n"
+#define __OVERFLOW_POST_RETURN         \
+       "       bvc     3f\n"           \
+"      mov     %0, %1\n"               \
+       "2:     " REFCOUNT_TRAP_INSN "\n"\
+       "3:\n"
+#define __OVERFLOW_EXTABLE             \
+       "4:\n"                          \
+       _ASM_EXTABLE(2b, 4b)
+#else
+#define __OVERFLOW_POST
+#define __OVERFLOW_POST_RETURN
+#define __OVERFLOW_EXTABLE
+#endif
+
+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable)                \
+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)   \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
        prefetchw(&v->counter);                                         \
-       __asm__ __volatile__("@ atomic_" #op "\n"                       \
+       __asm__ __volatile__("@ atomic_" #op #suffix "\n"               \
 "1:    ldrex   %0, [%3]\n"                                             \
 "      " #asm_op "     %0, %0, %4\n"                                   \
+       post_op                                                         \
 "      strex   %1, %0, [%3]\n"                                         \
 "      teq     %1, #0\n"                                               \
-"      bne     1b"                                                     \
+"      bne     1b\n"                                                   \
+       extable                                                         \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
+                                   __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v)         \
        smp_mb();                                                       \
        prefetchw(&v->counter);                                         \
                                                                        \
-       __asm__ __volatile__("@ atomic_" #op "_return\n"                \
+       __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n"     \
 "1:    ldrex   %0, [%3]\n"                                             \
 "      " #asm_op "     %0, %0, %4\n"                                   \
+       post_op                                                         \
 "      strex   %1, %0, [%3]\n"                                         \
 "      teq     %1, #0\n"                                               \
-"      bne     1b"                                                     \
+"      bne     1b\n"                                                   \
+       extable                                                         \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "Ir" (i)                                   \
        : "cc");                                                        \
@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        return result;                                                  \
 }
 
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
+                                          __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
+
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
        int oldval;
@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        __asm__ __volatile__ ("@ atomic_add_unless\n"
 "1:    ldrex   %0, [%4]\n"
 "      teq     %0, %5\n"
-"      beq     2f\n"
-"      add     %1, %0, %6\n"
+"      beq     4f\n"
+"      adds    %1, %0, %6\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      bvc     3f\n"
+"2:    " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
 "      strex   %2, %1, [%4]\n"
 "      teq     %2, #0\n"
 "      bne     1b\n"
-"2:"
+"4:"
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(2b, 4b)
+#endif
+
        : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (u), "r" (a)
        : "cc");
@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        return oldval;
 }
 
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
+{
+       unsigned long oldval, res;
+
+       smp_mb();
+
+       do {
+               __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
+               "ldrex  %1, [%3]\n"
+               "mov    %0, #0\n"
+               "teq    %1, %4\n"
+               "strexeq %0, %5, [%3]\n"
+                   : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                   : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                   : "cc");
+       } while (res);
+
+       smp_mb();
+
+       return oldval;
+}
+
 #else /* ARM_ARCH_6 */
 
 #ifdef CONFIG_SMP
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
-#define ATOMIC_OP(op, c_op, asm_op)                                    \
-static inline void atomic_##op(int i, atomic_t *v)                     \
+#define __ATOMIC_OP(op, suffix, c_op, asm_op)                          \
+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v)   \
 {                                                                      \
        unsigned long flags;                                            \
                                                                        \
@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v)                 \
        raw_local_irq_restore(flags);                                   \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op)    \
+                                   __ATOMIC_OP(op, _unchecked, c_op, asm_op)
+
+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op)                   \
+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
 {                                                                      \
        unsigned long flags;                                            \
        int val;                                                        \
@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return val;                                                     \
 }
 
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
+                                          __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
+
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return ret;
 }
 
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+{
+       return atomic_cmpxchg((atomic_t *)v, old, new);
+}
+
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
+#undef __ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef __ATOMIC_OP
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+       return xchg(&v->counter, new);
+}
 
 #define atomic_inc(v)          atomic_add(1, v)
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+       atomic_add_unchecked(1, v);
+}
 #define atomic_dec(v)          atomic_sub(1, v)
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+       atomic_sub_unchecked(1, v);
+}
 
 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v) == 0;
+}
 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
 #define atomic_inc_return(v)    (atomic_add_return(1, v))
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v);
+}
 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 
@@ -216,6 +336,14 @@ typedef struct {
        long long counter;
 } atomic64_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+       long long counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
+
 #define ATOMIC64_INIT(i) { (i) }
 
 #ifdef CONFIG_ARM_LPAE
@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
        return result;
 }
 
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       long long result;
+
+       __asm__ __volatile__("@ atomic64_read_unchecked\n"
+"      ldrd    %0, %H0, [%1]"
+       : "=&r" (result)
+       : "r" (&v->counter), "Qo" (v->counter)
+       );
+
+       return result;
+}
+
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
        __asm__ __volatile__("@ atomic64_set\n"
@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
        : "r" (&v->counter), "r" (i)
        );
 }
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+       __asm__ __volatile__("@ atomic64_set_unchecked\n"
+"      strd    %2, %H2, [%1]"
+       : "=Qo" (v->counter)
+       : "r" (&v->counter), "r" (i)
+       );
+}
 #else
 static inline long long atomic64_read(const atomic64_t *v)
 {
@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
        return result;
 }
 
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       long long result;
+
+       __asm__ __volatile__("@ atomic64_read_unchecked\n"
+"      ldrexd  %0, %H0, [%1]"
+       : "=&r" (result)
+       : "r" (&v->counter), "Qo" (v->counter)
+       );
+
+       return result;
+}
+
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
        long long tmp;
@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
        : "r" (&v->counter), "r" (i)
        : "cc");
 }
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+       long long tmp;
+
+       prefetchw(&v->counter);
+       __asm__ __volatile__("@ atomic64_set_unchecked\n"
+"1:    ldrexd  %0, %H0, [%2]\n"
+"      strexd  %0, %3, %H3, [%2]\n"
+"      teq     %0, #0\n"
+"      bne     1b"
+       : "=&r" (tmp), "=Qo" (v->counter)
+       : "r" (&v->counter), "r" (i)
+       : "cc");
+}
 #endif
 
-#define ATOMIC64_OP(op, op1, op2)                                      \
-static inline void atomic64_##op(long long i, atomic64_t *v)           \
+#undef __OVERFLOW_POST_RETURN
+#define __OVERFLOW_POST_RETURN         \
+       "       bvc     3f\n"           \
+"      mov     %0, %1\n"               \
+"      mov     %H0, %H1\n"             \
+       "2:     " REFCOUNT_TRAP_INSN "\n"\
+       "3:\n"
+
+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable)          \
+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
 {                                                                      \
        long long result;                                               \
        unsigned long tmp;                                              \
                                                                        \
        prefetchw(&v->counter);                                         \
-       __asm__ __volatile__("@ atomic64_" #op "\n"                     \
+       __asm__ __volatile__("@ atomic64_" #op #suffix "\n"             \
 "1:    ldrexd  %0, %H0, [%3]\n"                                        \
 "      " #op1 " %Q0, %Q0, %Q4\n"                                       \
 "      " #op2 " %R0, %R0, %R4\n"                                       \
+       post_op                                                         \
 "      strexd  %1, %0, %H0, [%3]\n"                                    \
 "      teq     %1, #0\n"                                               \
-"      bne     1b"                                                     \
+"      bne     1b\n"                                                   \
+       extable                                                         \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "r" (i)                                    \
        : "cc");                                                        \
 }                                                                      \
 
-#define ATOMIC64_OP_RETURN(op, op1, op2)                               \
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
+                                 __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable)   \
+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
 {                                                                      \
        long long result;                                               \
        unsigned long tmp;                                              \
@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
        smp_mb();                                                       \
        prefetchw(&v->counter);                                         \
                                                                        \
-       __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
+       __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n"   \
 "1:    ldrexd  %0, %H0, [%3]\n"                                        \
 "      " #op1 " %Q0, %Q0, %Q4\n"                                       \
 "      " #op2 " %R0, %R0, %R4\n"                                       \
+       post_op                                                         \
 "      strexd  %1, %0, %H0, [%3]\n"                                    \
 "      teq     %1, #0\n"                                               \
-"      bne     1b"                                                     \
+"      bne     1b\n"                                                   \
+       extable                                                         \
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
        : "r" (&v->counter), "r" (i)                                    \
        : "cc");                                                        \
@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
        return result;                                                  \
 }
 
+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
+                                        __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
+
 #define ATOMIC64_OPS(op, op1, op2)                                     \
        ATOMIC64_OP(op, op1, op2)                                       \
        ATOMIC64_OP_RETURN(op, op1, op2)
@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
+#undef __ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
+#undef __ATOMIC64_OP
+#undef __OVERFLOW_EXTABLE
+#undef __OVERFLOW_POST_RETURN
+#undef __OVERFLOW_POST
 
 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
                                        long long new)
@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
        return oldval;
 }
 
+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
+                                       long long new)
+{
+       long long oldval;
+       unsigned long res;
+
+       smp_mb();
+
+       do {
+               __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
+               "ldrexd         %1, %H1, [%3]\n"
+               "mov            %0, #0\n"
+               "teq            %1, %4\n"
+               "teqeq          %H1, %H4\n"
+               "strexdeq       %0, %5, %H5, [%3]"
+               : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+               : "r" (&ptr->counter), "r" (old), "r" (new)
+               : "cc");
+       } while (res);
+
+       smp_mb();
+
+       return oldval;
+}
+
 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 {
        long long result;
@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
        long long result;
-       unsigned long tmp;
+       u64 tmp;
 
        smp_mb();
        prefetchw(&v->counter);
 
        __asm__ __volatile__("@ atomic64_dec_if_positive\n"
-"1:    ldrexd  %0, %H0, [%3]\n"
-"      subs    %Q0, %Q0, #1\n"
-"      sbc     %R0, %R0, #0\n"
+"1:    ldrexd  %1, %H1, [%3]\n"
+"      subs    %Q0, %Q1, #1\n"
+"      sbcs    %R0, %R1, #0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      bvc     3f\n"
+"      mov     %Q0, %Q1\n"
+"      mov     %R0, %R1\n"
+"2:    " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
 "      teq     %R0, #0\n"
-"      bmi     2f\n"
+"      bmi     4f\n"
 "      strexd  %1, %0, %H0, [%3]\n"
 "      teq     %1, #0\n"
 "      bne     1b\n"
-"2:"
+"4:\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(2b, 4b)
+#endif
+
        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter)
        : "cc");
@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 "      teq     %0, %5\n"
 "      teqeq   %H0, %H5\n"
 "      moveq   %1, #0\n"
-"      beq     2f\n"
+"      beq     4f\n"
 "      adds    %Q0, %Q0, %Q6\n"
-"      adc     %R0, %R0, %R6\n"
+"      adcs    %R0, %R0, %R6\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      bvc     3f\n"
+"2:    " REFCOUNT_TRAP_INSN "\n"
+"3:\n"
+#endif
+
 "      strexd  %2, %0, %H0, [%4]\n"
 "      teq     %2, #0\n"
 "      bne     1b\n"
-"2:"
+"4:\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(2b, 4b)
+#endif
+
        : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
        : "r" (&v->counter), "r" (u), "r" (a)
        : "cc");
@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 
 #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 #define atomic64_inc(v)                        atomic64_add(1LL, (v))
+#define atomic64_inc_unchecked(v)      atomic64_add_unchecked(1LL, (v))
 #define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
+#define atomic64_inc_return_unchecked(v)       atomic64_add_return_unchecked(1LL, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
 #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
+#define atomic64_dec_unchecked(v)      atomic64_sub_unchecked(1LL, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
index d2f81e6b8c1cc5adb914ce38a7ab991b25801137..3c4dba54e2808b1326793b53af4df351bb93ba56 100644 (file)
@@ -67,7 +67,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 75fe66bc02b420014f37459c1ad786d49b1529c0..ba3dee4cb993ccaf7a214eeb866f06d109e7490b 100644 (file)
@@ -4,8 +4,10 @@
 #ifndef __ASMARM_CACHE_H
 #define __ASMARM_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT         CONFIG_ARM_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
@@ -24,5 +26,6 @@
 #endif
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_only __attribute__ ((__section__(".data..read_only")))
 
 #endif
index 2d46862e7bef7735216f78856924456a73091973..a35415bb9b04ddf33c88ba0f443e4bdb955335ed 100644 (file)
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
        void (*dma_unmap_area)(const void *, size_t, int);
 
        void (*dma_flush_range)(const void *, const void *);
-};
+} __no_const;
 
 /*
  * Select the calling method
index 523315115478960a2e433a66276fea007b120cda..87a71fa1bfe7d34f80d91014b13df463fc7e13a8 100644 (file)
@@ -37,7 +37,19 @@ __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
 __wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
+
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
+{
+       __wsum ret;
+       pax_open_userland();
+       ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+       pax_close_userland();
+       return ret;
+}
+
+
 
 /*
  *     Fold a partial checksum without adding pseudo headers
index abb2c3769b014e33ad4a70f87a374b30911de4b3..96db950c9338d406b4065cb8685581c7c8c127bc 100644 (file)
@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
 
 #define xchg(ptr,x) \
        ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg_unchecked(ptr,x) \
+       ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
 #include <asm-generic/cmpxchg-local.h>
 
index 6ddbe446425e11524d927b5cd8b479bcd2419238..b5e38b1a55ef5c062eaa58cee5e61a3a72e00836 100644 (file)
  * Domain types
  */
 #define DOMAIN_NOACCESS        0
-#define DOMAIN_CLIENT  1
 #ifdef CONFIG_CPU_USE_DOMAINS
+#define DOMAIN_USERCLIENT      1
+#define DOMAIN_KERNELCLIENT    1
 #define DOMAIN_MANAGER 3
+#define DOMAIN_VECTORS         DOMAIN_USER
+#else
+
+#ifdef CONFIG_PAX_KERNEXEC
+#define DOMAIN_MANAGER 1
+#define DOMAIN_KERNEXEC        3
 #else
 #define DOMAIN_MANAGER 1
 #endif
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#define DOMAIN_USERCLIENT      0
+#define DOMAIN_UDEREF          1
+#define DOMAIN_VECTORS         DOMAIN_KERNEL
+#else
+#define DOMAIN_USERCLIENT      1
+#define DOMAIN_VECTORS         DOMAIN_USER
+#endif
+#define DOMAIN_KERNELCLIENT    1
+
+#endif
+
 #define domain_val(dom,type)   ((type) << (2*(dom)))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
 static inline void set_domain(unsigned val)
 {
        asm volatile(
@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
        isb();
 }
 
-#define modify_domain(dom,type)                                        \
-       do {                                                    \
-       struct thread_info *thread = current_thread_info();     \
-       unsigned int domain = thread->cpu_domain;               \
-       domain &= ~domain_val(dom, DOMAIN_MANAGER);             \
-       thread->cpu_domain = domain | domain_val(dom, type);    \
-       set_domain(thread->cpu_domain);                         \
-       } while (0)
-
+extern void modify_domain(unsigned int dom, unsigned int type);
 #else
 static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)  { }
index afb9cafd378618fef0d39a7bcae64e9950afdb1c..9a0bac022f6f69c84eb545984175996c21848fb6 100644 (file)
@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-#define ELF_ET_DYN_BASE        (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    0x00008000UL
+
+#define PAX_DELTA_MMAP_LEN     ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
+#define PAX_DELTA_STACK_LEN    ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
+#endif
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
 extern void elf_set_personality(const struct elf32_hdr *);
 #define SET_PERSONALITY(ex)    elf_set_personality(&(ex))
 
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 #ifdef CONFIG_MMU
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 struct linux_binprm;
index de53547469246f063b5eaadc12ebc636041f2c28..52b9a289fc10b7d60f992348d1c749e73f46c295 100644 (file)
@@ -81,7 +81,9 @@
        BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||             \
                (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
                                                                        \
+       pax_open_kernel();                                              \
        memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);   \
+       pax_close_kernel();                                             \
        flush_icache_range((unsigned long)(dest_buf),                   \
                (unsigned long)(dest_buf) + (size));                    \
                                                                        \
index 53e69dae796f32e495ab262953d4b52cce0cc303..3fdc89690441d661b2c7e03e6497929e4aa78b63 100644 (file)
@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
+       pax_open_userland();
+
        smp_mb();
        /* Prefetching cannot fault */
        prefetchw(uaddr);
@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "cc", "memory");
        smp_mb();
 
+       pax_close_userland();
+
        *uval = val;
        return ret;
 }
@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
+       pax_open_userland();
+
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     " TUSER(ldr) "  %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
 
+       pax_close_userland();
+
        *uval = val;
        return ret;
 }
@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                return -EFAULT;
 
        pagefault_disable();    /* implies preempt_disable() */
+       pax_open_userland();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                ret = -ENOSYS;
        }
 
+       pax_close_userland();
        pagefault_enable();     /* subsumes preempt_enable() */
 
        if (!ret) {
index 83eb2f7729113f9a7d23caf67e80dc20f9150bea..ed77159d4c4e5f5c56ff4857991dd861b64cacad 100644 (file)
@@ -4,6 +4,6 @@
 /*
  * This is the "bare minimum".  AIO seems to require this.
  */
-#define KM_TYPE_NR 16
+#define KM_TYPE_NR 17
 
 #endif
index 9e614a18e680d964734ffbe257d595b129b3acad..3302cca0c7134b846e7229b839379f3cc8963025 100644 (file)
@@ -22,7 +22,7 @@ struct dma_ops {
        int     (*residue)(unsigned int, dma_t *);              /* optional */
        int     (*setspeed)(unsigned int, dma_t *, int);        /* optional */
        const char *type;
-};
+} __do_const;
 
 struct dma_struct {
        void            *addr;          /* single DMA address           */
index f98c7f32c9c8aefb256c7c7a287c6c2d69602011..e5c626d35f3be2beb4ef528445572f4279b56674 100644 (file)
@@ -23,17 +23,19 @@ struct map_desc {
 
 /* types 0-3 are defined in asm/io.h */
 enum {
-       MT_UNCACHED = 4,
-       MT_CACHECLEAN,
-       MT_MINICLEAN,
+       MT_UNCACHED_RW = 4,
+       MT_CACHECLEAN_RO,
+       MT_MINICLEAN_RO,
        MT_LOW_VECTORS,
        MT_HIGH_VECTORS,
-       MT_MEMORY_RWX,
+       __MT_MEMORY_RWX,
        MT_MEMORY_RW,
-       MT_ROM,
-       MT_MEMORY_RWX_NONCACHED,
+       MT_MEMORY_RX,
+       MT_ROM_RX,
+       MT_MEMORY_RW_NONCACHED,
+       MT_MEMORY_RX_NONCACHED,
        MT_MEMORY_RW_DTCM,
-       MT_MEMORY_RWX_ITCM,
+       MT_MEMORY_RX_ITCM,
        MT_MEMORY_RW_SO,
        MT_MEMORY_DMA_READY,
 };
index 891a56b35bcf0c3ad4b8af956fdfa8509d699ff9..48f337e882412b42785c3ab9b24777d6c260fb46 100644 (file)
@@ -36,7 +36,7 @@ struct outer_cache_fns {
 
        /* This is an ARM L2C thing */
        void (*write_sec)(unsigned long, unsigned);
-};
+} __no_const;
 
 extern struct outer_cache_fns outer_cache;
 
index 4355f0ec44d62e9b5d7c40132f28f0ff6710f387..cd9168e6bb1ab7d23b6aaa76e0e4a9ca59fb2d63 100644 (file)
@@ -23,6 +23,7 @@
 
 #else
 
+#include <linux/compiler.h>
 #include <asm/glue.h>
 
 /*
@@ -114,7 +115,7 @@ struct cpu_user_fns {
        void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
        void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
                        unsigned long vaddr, struct vm_area_struct *vma);
-};
+} __no_const;
 
 #ifdef MULTI_USER
 extern struct cpu_user_fns cpu_user;
index 19cfab526d13a37dd33d476f1270179fc4fe5d5c..3f5c7e9d2015d9c3aed4bc7c4e0174d957009786 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/processor.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/system_info.h>
 
 #define check_pgt_cache()              do { } while (0)
 
@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 }
 
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       pud_populate(mm, pud, pmd);
+}
+
 #else  /* !CONFIG_ARM_LPAE */
 
 /*
@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 #define pmd_alloc_one(mm,addr)         ({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, pmd)              do { } while (0)
 #define pud_populate(mm,pmd,pte)       BUG()
+#define pud_populate_kernel(mm,pmd,pte)        BUG()
 
 #endif /* CONFIG_ARM_LPAE */
 
@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
+{
+#ifdef CONFIG_ARM_LPAE
+       pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
+#else
+       if (addr & SECTION_SIZE)
+               pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
+       else
+               pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
+#endif
+       flush_pmd_entry(pmdp);
+}
+
 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
                                  pmdval_t prot)
 {
index 5e68278e953e2513f904cc253d200705c999a673..1869baee88465ffe240eb54a35049083502c47bd 100644 (file)
@@ -27,7 +27,7 @@
 /*
  *   - section
  */
-#define PMD_SECT_PXN    (_AT(pmdval_t, 1) << 0)     /* v7 */
+#define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 0)     /* v7 */
 #define PMD_SECT_BUFFERABLE    (_AT(pmdval_t, 1) << 2)
 #define PMD_SECT_CACHEABLE     (_AT(pmdval_t, 1) << 3)
 #define PMD_SECT_XN            (_AT(pmdval_t, 1) << 4)         /* v6 */
@@ -39,6 +39,7 @@
 #define PMD_SECT_nG            (_AT(pmdval_t, 1) << 17)        /* v6 */
 #define PMD_SECT_SUPER         (_AT(pmdval_t, 1) << 18)        /* v6 */
 #define PMD_SECT_AF            (_AT(pmdval_t, 0))
+#define PMD_SECT_RDONLY                (_AT(pmdval_t, 0))
 
 #define PMD_SECT_UNCACHED      (_AT(pmdval_t, 0))
 #define PMD_SECT_BUFFERED      (PMD_SECT_BUFFERABLE)
@@ -68,6 +69,7 @@
  *   - extended small page/tiny page
  */
 #define PTE_EXT_XN             (_AT(pteval_t, 1) << 0)         /* v6 */
+#define PTE_EXT_PXN            (_AT(pteval_t, 1) << 2)         /* v7 */
 #define PTE_EXT_AP_MASK                (_AT(pteval_t, 3) << 4)
 #define PTE_EXT_AP0            (_AT(pteval_t, 1) << 4)
 #define PTE_EXT_AP1            (_AT(pteval_t, 2) << 4)
index f0279411847d72e2f73ffd430a9a5d53d9968514..f36ce3081e23a66dded86b8efb9f3e798c5a8146 100644 (file)
 #define L_PTE_SHARED           (_AT(pteval_t, 1) << 10)        /* shared(v6), coherent(xsc3) */
 #define L_PTE_NONE             (_AT(pteval_t, 1) << 11)
 
+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
+#define L_PTE_PXN              (_AT(pteval_t, 0))
+
 /*
  * These are the memory types, defined to be compatible with
  * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
index a31ecdad4b5966dbf15953054bcc0c9d260cd302..95e98d4fbf3dfdec72703f4fa901216471c970cc 100644 (file)
@@ -81,6 +81,7 @@
 #define L_PTE_USER             (_AT(pteval_t, 1) << 6)         /* AP[1] */
 #define L_PTE_SHARED           (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 10)        /* AF */
+#define L_PTE_PXN              (_AT(pteval_t, 1) << 53)        /* PXN */
 #define L_PTE_XN               (_AT(pteval_t, 1) << 54)        /* XN */
 #define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)
 #define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)
 #define L_PMD_SECT_SPLITTING   (_AT(pmdval_t, 1) << 56)
 #define L_PMD_SECT_NONE                (_AT(pmdval_t, 1) << 57)
 #define L_PMD_SECT_RDONLY      (_AT(pteval_t, 1) << 58)
+#define PMD_SECT_RDONLY                PMD_SECT_AP2
 
 /*
  * To be used in assembly code with the upper page attributes.
  */
+#define L_PTE_PXN_HIGH         (1 << (53 - 32))
 #define L_PTE_XN_HIGH          (1 << (54 - 32))
 #define L_PTE_DIRTY_HIGH       (1 << (55 - 32))
 
index d5cac545ba339808527ad0d244cd7eec6be7831a..906ea3e4ef0d49a35ff463e77051e0dc87caba56 100644 (file)
@@ -33,6 +33,9 @@
 #include <asm/pgtable-2level.h>
 #endif
 
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
@@ -48,6 +51,9 @@
 #define LIBRARY_TEXT_START     0x0c000000
 
 #ifndef __ASSEMBLY__
+extern pteval_t __supported_pte_mask;
+extern pmdval_t __supported_pmd_mask;
+
 extern void __pte_error(const char *file, int line, pte_t);
 extern void __pmd_error(const char *file, int line, pmd_t);
 extern void __pgd_error(const char *file, int line, pgd_t);
@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
 #define pmd_ERROR(pmd)         __pmd_error(__FILE__, __LINE__, pmd)
 #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd)
 
+#define  __HAVE_ARCH_PAX_OPEN_KERNEL
+#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/domain.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+
+static inline int test_domain(int domain, int domaintype)
+{
+       return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
+}
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long pax_open_kernel(void) {
+#ifdef CONFIG_ARM_LPAE
+       /* TODO */
+#else
+       preempt_disable();
+       BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
+       modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
+#endif
+       return 0;
+}
+
+static inline unsigned long pax_close_kernel(void) {
+#ifdef CONFIG_ARM_LPAE
+       /* TODO */
+#else
+       BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
+       /* DOMAIN_MANAGER = "client" under KERNEXEC */
+       modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
+       preempt_enable_no_resched();
+#endif
+       return 0;
+}
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+
 /*
  * This is the lowest virtual address we can permit any user space
  * mapping to be mapped at.  This is particularly important for
@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
 /*
  * The pgprot_* and protection_map entries will be fixed up in runtime
  * to include the cachable and bufferable bits based on memory policy,
- * as well as any architecture dependent bits like global/ASID and SMP
- * shared mapping bits.
+ * as well as any architecture dependent bits like global/ASID, PXN,
+ * and SMP shared mapping bits.
  */
 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
 
@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
-               L_PTE_NONE | L_PTE_VALID;
+               L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index c25ef3ec6d1f85dc1e259dd1f777ce546d33d97e..735f14b6dfe22cf722f47e768107ccbd7963dd84 100644 (file)
@@ -32,7 +32,7 @@ struct psci_operations {
        int (*affinity_info)(unsigned long target_affinity,
                        unsigned long lowest_affinity_level);
        int (*migrate_info_type)(void);
-};
+} __no_const;
 
 extern struct psci_operations psci_ops;
 extern struct smp_operations psci_smp_ops;
index 18f5a554134fe88174e99c0e5dd6de473e91e200..5072a40b93f657fd503e9f0e52bf38906e54058e 100644 (file)
@@ -107,7 +107,7 @@ struct smp_operations {
        int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
-};
+} __no_const;
 
 struct of_cpu_method {
        const char *method;
index d890e41f55207b3b3df4d78f0c26354a85f94739..3921292cf3e3b64e3c51681bfb7f01cb0c7d0405 100644 (file)
@@ -78,9 +78,9 @@ struct thread_info {
        .flags          = 0,                                            \
        .preempt_count  = INIT_PREEMPT_COUNT,                           \
        .addr_limit     = KERNEL_DS,                                    \
-       .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |     \
-                         domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |   \
-                         domain_val(DOMAIN_IO, DOMAIN_CLIENT),         \
+       .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |  \
+                         domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) |      \
+                         domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),   \
        .restart_block  = {                                             \
                .fn     = do_no_restart_syscall,                        \
        },                                                              \
@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define TIF_SYSCALL_AUDIT      9
 #define TIF_SYSCALL_TRACEPOINT 10
 #define TIF_SECCOMP            11      /* seccomp syscall filtering active */
-#define TIF_NOHZ               12      /* in adaptive nohz mode */
+/* within 8 bits of TIF_SYSCALL_TRACE
+ *  to meet flexible second operand requirements
+ */
+#define TIF_GRSEC_SETXID       12
+#define TIF_NOHZ               13      /* in adaptive nohz mode */
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
+#define _TIF_GRSEC_SETXID      (1 << TIF_GRSEC_SETXID)
 
 /* Checks for any syscall work in entry-common.S */
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
 
 /*
  * Change these and you break ASM code in entry-common.S
index 5f833f7adba1abdc8620a11020cb2c80dec24d26..76e66448666b5baf302c9a6af6427902d9c645a0 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/compiler.h>
 #include <asm/thread_info.h>
+#include <asm/pgtable.h>
 
 #ifdef __ASSEMBLY__
 #include <asm/asm-offsets.h>
@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
                         * at 0xffff0fe0 must be used instead.  (see
                         * entry-armv.S for details)
                         */
+                       pax_open_kernel();
                        *((unsigned int *)0xffff0ff0) = val;
+                       pax_close_kernel();
 #endif
                }
 
index 4767eb9caa78c89fe0c6b447b0d234fb71738285..bf0066819f8199d0eaa623814de19ba747d411a5 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/domain.h>
 #include <asm/unified.h>
 #include <asm/compiler.h>
+#include <asm/pgtable.h>
 
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 #include <asm-generic/uaccess-unaligned.h>
@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
 static inline void set_fs(mm_segment_t fs)
 {
        current_thread_info()->addr_limit = fs;
-       modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+       modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
 }
 
 #define segment_eq(a,b)        ((a) == (b))
 
+#define __HAVE_ARCH_PAX_OPEN_USERLAND
+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
+
+static inline void pax_open_userland(void)
+{
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (segment_eq(get_fs(), USER_DS)) {
+               BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
+               modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
+       }
+#endif
+
+}
+
+static inline void pax_close_userland(void)
+{
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (segment_eq(get_fs(), USER_DS)) {
+               BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
+               modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
+       }
+#endif
+
+}
+
 #define __addr_ok(addr) ({ \
        unsigned long flag; \
        __asm__("cmp %2, %0; movlo %0, #0" \
@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
 
 #define get_user(x,p)                                                  \
        ({                                                              \
+               int __e;                                                \
                might_fault();                                          \
-               __get_user_check(x,p);                                  \
+               pax_open_userland();                                    \
+               __e = __get_user_check(x,p);                            \
+               pax_close_userland();                                   \
+               __e;                                                    \
         })
 
 extern int __put_user_1(void *, unsigned int);
@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
 
 #define put_user(x,p)                                                  \
        ({                                                              \
+               int __e;                                                \
                might_fault();                                          \
-               __put_user_check(x,p);                                  \
+               pax_open_userland();                                    \
+               __e = __put_user_check(x,p);                            \
+               pax_close_userland();                                   \
+               __e;                                                    \
         })
 
 #else /* CONFIG_MMU */
@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
 
 #endif /* CONFIG_MMU */
 
+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
 #define access_ok(type,addr,size)      (__range_ok(addr,size) == 0)
 
 #define user_addr_max() \
@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
 #define __get_user(x,ptr)                                              \
 ({                                                                     \
        long __gu_err = 0;                                              \
+       pax_open_userland();                                            \
        __get_user_err((x),(ptr),__gu_err);                             \
+       pax_close_userland();                                           \
        __gu_err;                                                       \
 })
 
 #define __get_user_error(x,ptr,err)                                    \
 ({                                                                     \
+       pax_open_userland();                                            \
        __get_user_err((x),(ptr),err);                                  \
+       pax_close_userland();                                           \
        (void) 0;                                                       \
 })
 
@@ -368,13 +409,17 @@ do {                                                                      \
 #define __put_user(x,ptr)                                              \
 ({                                                                     \
        long __pu_err = 0;                                              \
+       pax_open_userland();                                            \
        __put_user_err((x),(ptr),__pu_err);                             \
+       pax_close_userland();                                           \
        __pu_err;                                                       \
 })
 
 #define __put_user_error(x,ptr,err)                                    \
 ({                                                                     \
+       pax_open_userland();                                            \
        __put_user_err((x),(ptr),err);                                  \
+       pax_close_userland();                                           \
        (void) 0;                                                       \
 })
 
@@ -474,11 +519,44 @@ do {                                                                      \
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long ret;
+
+       check_object_size(to, n, false);
+       pax_open_userland();
+       ret = ___copy_from_user(to, from, n);
+       pax_close_userland();
+       return ret;
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       unsigned long ret;
+
+       check_object_size(from, n, true);
+       pax_open_userland();
+       ret = ___copy_to_user(to, from, n);
+       pax_close_userland();
+       return ret;
+}
+
 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
+{
+       unsigned long ret;
+       pax_open_userland();
+       ret = ___clear_user(addr, n);
+       pax_close_userland();
+       return ret;
+}
+
 #else
 #define __copy_from_user(to,from,n)    (memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to,from,n)      (memcpy((void __force *)to, from, n), 0)
@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        if (access_ok(VERIFY_READ, from, n))
                n = __copy_from_user(to, from, n);
        else /* security hole - plug it */
@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        if (access_ok(VERIFY_WRITE, to, n))
                n = __copy_to_user(to, from, n);
        return n;
index 5af0ed1b825a2aa95dc0626c9e1a685924979e24..cea838833cf7cc97931a9655672691f3f34b380b 100644 (file)
@@ -92,7 +92,7 @@
  * ARMv7 groups of PSR bits
  */
 #define APSR_MASK      0xf80f0000      /* N, Z, C, V, Q and GE flags */
-#define PSR_ISET_MASK  0x01000010      /* ISA state (J, T) mask */
+#define PSR_ISET_MASK  0x01000020      /* ISA state (J, T) mask */
 #define PSR_IT_MASK    0x0600fc00      /* If-Then execution state mask */
 #define PSR_ENDIAN_MASK        0x00000200      /* Endianness state mask */
 
index a88671cfe1ffb1e1ee43b06a7c8c6fd704f38580..1cc895e968e56abd5475eb962fcec71eb900fdd4 100644 (file)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
 
        /* networking */
 EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_ipv6_magic);
 
@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
index 2f5555d307b345b7e0191adb7d7f02fe49ab887b..d493c91e8b0fa99dd9ee38484546a31fe1fe35fe 100644 (file)
 9997:
        .endm
 
+       .macro  pax_enter_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       @ make aligned space for saved DACR
+       sub     sp, sp, #8
+       @ save regs
+       stmdb   sp!, {r1, r2}
+       @ read DACR from cpu_domain into r1
+       mov     r2, sp
+       @ assume 8K pages, since we have to split the immediate in two
+       bic     r2, r2, #(0x1fc0)
+       bic     r2, r2, #(0x3f)
+       ldr     r1, [r2, #TI_CPU_DOMAIN]
+       @ store old DACR on stack
+       str     r1, [sp, #8]
+#ifdef CONFIG_PAX_KERNEXEC
+       @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+       bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+       orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       @ set current DOMAIN_USER to DOMAIN_NOACCESS
+       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+#endif
+       @ write r1 to current_thread_info()->cpu_domain
+       str     r1, [r2, #TI_CPU_DOMAIN]
+       @ write r1 to DACR
+       mcr     p15, 0, r1, c3, c0, 0
+       @ instruction sync
+       instr_sync
+       @ restore regs
+       ldmia   sp!, {r1, r2}
+#endif
+       .endm
+
+       .macro  pax_open_userland
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       @ save regs
+       stmdb   sp!, {r0, r1}
+       @ read DACR from cpu_domain into r1
+       mov     r0, sp
+       @ assume 8K pages, since we have to split the immediate in two
+       bic     r0, r0, #(0x1fc0)
+       bic     r0, r0, #(0x3f)
+       ldr     r1, [r0, #TI_CPU_DOMAIN]
+       @ set current DOMAIN_USER to DOMAIN_CLIENT
+       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+       orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
+       @ write r1 to current_thread_info()->cpu_domain
+       str     r1, [r0, #TI_CPU_DOMAIN]
+       @ write r1 to DACR
+       mcr     p15, 0, r1, c3, c0, 0
+       @ instruction sync
+       instr_sync
+       @ restore regs
+       ldmia   sp!, {r0, r1}
+#endif
+       .endm
+
+       .macro  pax_close_userland
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       @ save regs
+       stmdb   sp!, {r0, r1}
+       @ read DACR from cpu_domain into r1
+       mov     r0, sp
+       @ assume 8K pages, since we have to split the immediate in two
+       bic     r0, r0, #(0x1fc0)
+       bic     r0, r0, #(0x3f)
+       ldr     r1, [r0, #TI_CPU_DOMAIN]
+       @ set current DOMAIN_USER to DOMAIN_NOACCESS
+       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+       @ write r1 to current_thread_info()->cpu_domain
+       str     r1, [r0, #TI_CPU_DOMAIN]
+       @ write r1 to DACR
+       mcr     p15, 0, r1, c3, c0, 0
+       @ instruction sync
+       instr_sync
+       @ restore regs
+       ldmia   sp!, {r0, r1}
+#endif
+       .endm
+
        .macro  pabt_helper
        @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
 #ifdef MULTI_PABORT
  * Invalid mode handlers
  */
        .macro  inv_entry, reason
+
+       pax_enter_kernel
+
        sub     sp, sp, #S_FRAME_SIZE
  ARM(  stmib   sp, {r1 - lr}           )
  THUMB(        stmia   sp, {r0 - r12}          )
  THUMB(        str     sp, [sp, #S_SP]         )
  THUMB(        str     lr, [sp, #S_LR]         )
+
        mov     r1, #\reason
        .endm
 
@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
        .macro  svc_entry, stack_hole=0, trace=1
  UNWIND(.fnstart               )
  UNWIND(.save {r0 - pc}                )
+
+       pax_enter_kernel
+
        sub     sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(        str     r0, [sp]        )       @ temporarily saved
  SPFIX(        mov     r0, sp          )
@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
        ldmia   r0, {r3 - r5}
        add     r7, sp, #S_SP - 4       @ here for interlock avoidance
        mov     r6, #-1                 @  ""  ""      ""       ""
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       @ offset sp by 8 as done in pax_enter_kernel
+       add     r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
+#else
        add     r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#endif
  SPFIX(        addeq   r2, r2, #4      )
        str     r3, [sp, #-4]!          @ save the "real" r0 copied
                                        @ from the exception stack
@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
        .macro  usr_entry, trace=1
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )       @ don't unwind the user space
+
+       pax_enter_kernel_user
+
        sub     sp, sp, #S_FRAME_SIZE
  ARM(  stmib   sp, {r1 - r12}  )
  THUMB(        stmia   sp, {r0 - r12}  )
@@ -478,7 +575,9 @@ __und_usr:
        tst     r3, #PSR_T_BIT                  @ Thumb mode?
        bne     __und_usr_thumb
        sub     r4, r2, #4                      @ ARM instr at LR - 4
+       pax_open_userland
 1:     ldrt    r0, [r4]
+       pax_close_userland
  ARM_BE8(rev   r0, r0)                         @ little endian instruction
 
        @ r0 = 32-bit ARM instruction which caused the exception
@@ -512,11 +611,15 @@ __und_usr_thumb:
  */
        .arch   armv6t2
 #endif
+       pax_open_userland
 2:     ldrht   r5, [r4]
+       pax_close_userland
 ARM_BE8(rev16  r5, r5)                         @ little endian instruction
        cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
        blo     __und_usr_fault_16              @ 16bit undefined instruction
+       pax_open_userland
 3:     ldrht   r0, [r2]
+       pax_close_userland
 ARM_BE8(rev16  r0, r0)                         @ little endian instruction
        add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
        str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
  */
        .pushsection .fixup, "ax"
        .align  2
-4:     str     r4, [sp, #S_PC]                 @ retry current instruction
+4:     pax_close_userland
+       str     r4, [sp, #S_PC]                 @ retry current instruction
        ret     r9
        .popsection
        .pushsection __ex_table,"a"
@@ -766,7 +870,7 @@ ENTRY(__switch_to)
  THUMB(        str     lr, [ip], #4               )
        ldr     r4, [r2, #TI_TP_VALUE]
        ldr     r5, [r2, #TI_TP_VALUE + 4]
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
        ldr     r6, [r2, #TI_CPU_DOMAIN]
 #endif
        switch_tls r1, r4, r5, r3, r7
@@ -775,7 +879,7 @@ ENTRY(__switch_to)
        ldr     r8, =__stack_chk_guard
        ldr     r7, [r7, #TSK_STACK_CANARY]
 #endif
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
        mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
 #endif
        mov     r5, r0
index f8ccc21fa032354facead9735abf9f4eb0cb7eb2..83d192f85686cd0acd142ae75d9267e6d06d05cb 100644 (file)
 #include <asm/assembler.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/domain.h>
 #include <asm/unwind.h>
 
+#include "entry-header.S"
+
 #ifdef CONFIG_NEED_RET_TO_USER
 #include <mach/entry-macro.S>
 #else
        .macro  arch_ret_to_user, tmp1, tmp2
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       @ save regs
+       stmdb   sp!, {r1, r2}
+        @ read DACR from cpu_domain into r1
+        mov     r2, sp
+        @ assume 8K pages, since we have to split the immediate in two
+        bic     r2, r2, #(0x1fc0)
+        bic     r2, r2, #(0x3f)
+        ldr     r1, [r2, #TI_CPU_DOMAIN]
+#ifdef CONFIG_PAX_KERNEXEC
+        @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+        bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+        orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+        @ set current DOMAIN_USER to DOMAIN_UDEREF
+        bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+        orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
+#endif
+        @ write r1 to current_thread_info()->cpu_domain
+        str     r1, [r2, #TI_CPU_DOMAIN]
+        @ write r1 to DACR
+        mcr     p15, 0, r1, c3, c0, 0
+        @ instruction sync
+        instr_sync
+       @ restore regs
+       ldmia   sp!, {r1, r2}
+#endif
        .endm
 #endif
 
-#include "entry-header.S"
-
-
        .align  5
 /*
  * This is the fast syscall return path.  We do as little as
@@ -171,6 +199,12 @@ ENTRY(vector_swi)
  USER( ldr     scno, [lr, #-4]         )       @ get SWI instruction
 #endif
 
+       /*
+        * do this here to avoid a performance hit of wrapping the code above
+        * that directly dereferences userland to parse the SWI instruction
+        */
+       pax_enter_kernel_user
+
        adr     tbl, sys_call_table             @ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
index 1a0045abead7562be1e27163e0aee3c6afbe9b40..9b4f34d55fc22083f5138b87ba3038a9d9fae5a0 100644 (file)
        msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
        .endm
 
+       .macro  pax_enter_kernel_user
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       @ save regs
+       stmdb   sp!, {r0, r1}
+       @ read DACR from cpu_domain into r1
+       mov     r0, sp
+       @ assume 8K pages, since we have to split the immediate in two
+       bic     r0, r0, #(0x1fc0)
+       bic     r0, r0, #(0x3f)
+       ldr     r1, [r0, #TI_CPU_DOMAIN]
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       @ set current DOMAIN_USER to DOMAIN_NOACCESS
+       bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+#endif
+#ifdef CONFIG_PAX_KERNEXEC
+       @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+       bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+       orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+       @ write r1 to current_thread_info()->cpu_domain
+       str     r1, [r0, #TI_CPU_DOMAIN]
+       @ write r1 to DACR
+       mcr     p15, 0, r1, c3, c0, 0
+       @ instruction sync
+       instr_sync
+       @ restore regs
+       ldmia   sp!, {r0, r1}
+#endif
+       .endm
+
+       .macro  pax_exit_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       @ save regs
+       stmdb   sp!, {r0, r1}
+       @ read old DACR from stack into r1
+       ldr     r1, [sp, #(8 + S_SP)]
+       sub     r1, r1, #8
+       ldr     r1, [r1]
+
+       @ write r1 to current_thread_info()->cpu_domain
+       mov     r0, sp
+       @ assume 8K pages, since we have to split the immediate in two
+       bic     r0, r0, #(0x1fc0)
+       bic     r0, r0, #(0x3f)
+       str     r1, [r0, #TI_CPU_DOMAIN]
+       @ write r1 to DACR
+       mcr     p15, 0, r1, c3, c0, 0
+       @ instruction sync
+       instr_sync
+       @ restore regs
+       ldmia   sp!, {r0, r1}
+#endif
+       .endm
+
 #ifndef CONFIG_THUMB2_KERNEL
        .macro  svc_exit, rpsr, irq = 0
        .if     \irq != 0
        blne    trace_hardirqs_off
 #endif
        .endif
+
+       pax_exit_kernel
+
        msr     spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
        blne    trace_hardirqs_off
 #endif
        .endif
+
+       pax_exit_kernel
+
        ldr     lr, [sp, #S_SP]                 @ top of the stack
        ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
 
index 059c3da0fee34fb815ffaeb3f9d4a59f2b79661f..8e45cfc0271d6211196d6cb937e297fc3f7fc99f 100644 (file)
@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
        void *base = vectors_page;
        unsigned offset = FIQ_OFFSET;
 
+       pax_open_kernel();
        memcpy(base + offset, start, length);
+       pax_close_kernel();
+
        if (!cache_is_vipt_nonaliasing())
                flush_icache_range((unsigned long)base + offset, offset +
                                   length);
index 664eee8c4a26ce5998e72a3b1e70ffe8bfa2d8f3..f47093858aaff987760e5c6131c012588149ebfa 100644 (file)
@@ -437,7 +437,7 @@ __enable_mmu:
        mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
                      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+                     domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 #endif
index bea7db9e5b80935a2b164922ea62b5b55c77c01c..a210d10f365f09390f31b96f2ce27e77e0cd38af 100644 (file)
 #endif
 
 #ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
 {
+       if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
+               return NULL;
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                               GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                               GFP_KERNEL, prot, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
+
+void *module_alloc(unsigned long size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+       return __module_alloc(size, PAGE_KERNEL);
+#else
+       return __module_alloc(size, PAGE_KERNEL_EXEC);
+#endif
+
+}
+
+#ifdef CONFIG_PAX_KERNEXEC
+void module_memfree_exec(void *module_region)
+{
+       module_memfree(module_region);
+}
+EXPORT_SYMBOL(module_memfree_exec);
+
+void *module_alloc_exec(unsigned long size)
+{
+       return __module_alloc(size, PAGE_KERNEL_EXEC);
+}
+EXPORT_SYMBOL(module_alloc_exec);
+#endif
 #endif
 
 int
index 5038960e3c55abc1dc9744808f7c8c9d44eca290..4aa71d8f64339db2e6b2aa15263b120714b7035f 100644 (file)
@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
        else
                __acquire(&patch_lock);
 
+       pax_open_kernel();
        if (thumb2 && __opcode_is_thumb16(insn)) {
                *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
                size = sizeof(u16);
@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
                *(u32 *)waddr = insn;
                size = sizeof(u32);
        }
+       pax_close_kernel();
 
        if (waddr != addr) {
                flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
index fdfa3a78ec8cb8d7c96098f58175eea28e8a6da5..5d208b8b8861e75284d68a3d3661b4050d3a864e 100644 (file)
@@ -207,6 +207,7 @@ void machine_power_off(void)
 
        if (pm_power_off)
                pm_power_off();
+       BUG();
 }
 
 /*
@@ -220,7 +221,7 @@ void machine_power_off(void)
  * executing pre-reset code, and using RAM that the primary CPU's code wishes
  * to use. Implementing such co-ordination would be essentially impossible.
  */
-void machine_restart(char *cmd)
+__noreturn void machine_restart(char *cmd)
 {
        local_irq_disable();
        smp_send_stop();
@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
 
        show_regs_print_info(KERN_DEFAULT);
 
-       print_symbol("PC is at %s\n", instruction_pointer(regs));
-       print_symbol("LR is at %s\n", regs->ARM_lr);
+       printk("PC is at %pA\n", (void *)instruction_pointer(regs));
+       printk("LR is at %pA\n", (void *)regs->ARM_lr);
        printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
               "sp : %08lx  ip : %08lx  fp : %08lx\n",
                regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
        return 0;
 }
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
-}
-
 #ifdef CONFIG_MMU
 #ifdef CONFIG_KUSER_HELPERS
 /*
@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
 
 static int __init gate_vma_init(void)
 {
-       gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+       gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
        return 0;
 }
 arch_initcall(gate_vma_init);
@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
        return is_gate_vma(vma) ? "[vectors]" : NULL;
 }
 
-/* If possible, provide a placement hint at a random offset from the
- * stack for the signal page.
- */
-static unsigned long sigpage_addr(const struct mm_struct *mm,
-                                 unsigned int npages)
-{
-       unsigned long offset;
-       unsigned long first;
-       unsigned long last;
-       unsigned long addr;
-       unsigned int slots;
-
-       first = PAGE_ALIGN(mm->start_stack);
-
-       last = TASK_SIZE - (npages << PAGE_SHIFT);
-
-       /* No room after stack? */
-       if (first > last)
-               return 0;
-
-       /* Just enough room? */
-       if (first == last)
-               return first;
-
-       slots = ((last - first) >> PAGE_SHIFT) + 1;
-
-       offset = get_random_int() % slots;
-
-       addr = first + (offset << PAGE_SHIFT);
-
-       return addr;
-}
-
-static struct page *signal_page;
-extern struct page *get_signal_page(void);
-
-static const struct vm_special_mapping sigpage_mapping = {
-       .name = "[sigpage]",
-       .pages = &signal_page,
-};
-
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long addr;
-       unsigned long hint;
-       int ret = 0;
-
-       if (!signal_page)
-               signal_page = get_signal_page();
-       if (!signal_page)
-               return -ENOMEM;
 
        down_write(&mm->mmap_sem);
-       hint = sigpage_addr(mm, 1);
-       addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
-       if (IS_ERR_VALUE(addr)) {
-               ret = addr;
-               goto up_fail;
-       }
-
-       vma = _install_special_mapping(mm, addr, PAGE_SIZE,
-               VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
-               &sigpage_mapping);
-
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto up_fail;
-       }
-
-       mm->context.sigpage = addr;
-
- up_fail:
+       mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
        up_write(&mm->mmap_sem);
-       return ret;
+       return 0;
 }
 #endif
index f73891b6b7300dc67e353a8694a035e9a212c2d7..cf3004e82efaa0e3917ed1c7ea0a861f3e0ab6a2 100644 (file)
@@ -28,7 +28,7 @@
 #include <asm/psci.h>
 #include <asm/system_misc.h>
 
-struct psci_operations psci_ops;
+struct psci_operations psci_ops __read_only;
 
 static int (*invoke_psci_fn)(u32, u32, u32, u32);
 typedef int (*psci_initcall_t)(const struct device_node *);
index ef9119f7462ea11550fc0e6edaa756d9edc39905..31995a33bd6a15a3bb3e53808af483292fe2ff3f 100644 (file)
@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
        regs->ARM_ip = ip;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
 {
        current_thread_info()->syscall = scno;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        /* Do the secure computing check first; failures should be fast. */
 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
        if (secure_computing() == -1)
index e55408e965596964ff8c8708dcfec529a559b1bc..14d999882e6baf60edce36ff12576d5d4e502228 100644 (file)
@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
 unsigned int elf_hwcap2 __read_mostly;
 EXPORT_SYMBOL(elf_hwcap2);
 
+pteval_t __supported_pte_mask __read_only;
+pmdval_t __supported_pmd_mask __read_only;
 
 #ifdef MULTI_CPU
-struct processor processor __read_mostly;
+struct processor processor __read_only;
 #endif
 #ifdef MULTI_TLB
-struct cpu_tlb_fns cpu_tlb __read_mostly;
+struct cpu_tlb_fns cpu_tlb __read_only;
 #endif
 #ifdef MULTI_USER
-struct cpu_user_fns cpu_user __read_mostly;
+struct cpu_user_fns cpu_user __read_only;
 #endif
 #ifdef MULTI_CACHE
-struct cpu_cache_fns cpu_cache __read_mostly;
+struct cpu_cache_fns cpu_cache __read_only;
 #endif
 #ifdef CONFIG_OUTER_CACHE
-struct outer_cache_fns outer_cache __read_mostly;
+struct outer_cache_fns outer_cache __read_only;
 EXPORT_SYMBOL(outer_cache);
 #endif
 
@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
                asm("mrc        p15, 0, %0, c0, c1, 4"
                    : "=r" (mmfr0));
                if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
-                   (mmfr0 & 0x000000f0) >= 0x00000030)
+                   (mmfr0 & 0x000000f0) >= 0x00000030) {
                        cpu_arch = CPU_ARCH_ARMv7;
-               else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+                       if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
+                               __supported_pte_mask |= L_PTE_PXN;
+                               __supported_pmd_mask |= PMD_PXNTABLE;
+                       }
+               } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
                         (mmfr0 & 0x000000f0) == 0x00000020)
                        cpu_arch = CPU_ARCH_ARMv6;
                else
index 8aa6f1b87c9e2b1325c0ec8b8778f94f4106450b..0899e080105c10fc2af1c86f096b2d2dc82da573 100644 (file)
@@ -24,8 +24,6 @@
 
 extern const unsigned long sigreturn_codes[7];
 
-static unsigned long signal_return_offset;
-
 #ifdef CONFIG_CRUNCH
 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
 {
@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
                         * except when the MPU has protected the vectors
                         * page from PL0
                         */
-                       retcode = mm->context.sigpage + signal_return_offset +
-                                 (idx << 2) + thumb;
+                       retcode = mm->context.sigpage + (idx << 2) + thumb;
                } else
 #endif
                {
@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
        } while (thread_flags & _TIF_WORK_MASK);
        return 0;
 }
-
-struct page *get_signal_page(void)
-{
-       unsigned long ptr;
-       unsigned offset;
-       struct page *page;
-       void *addr;
-
-       page = alloc_pages(GFP_KERNEL, 0);
-
-       if (!page)
-               return NULL;
-
-       addr = page_address(page);
-
-       /* Give the signal return code some randomness */
-       offset = 0x200 + (get_random_int() & 0x7fc);
-       signal_return_offset = offset;
-
-       /*
-        * Copy signal return handlers into the vector page, and
-        * set sigreturn to be a pointer to these.
-        */
-       memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
-
-       ptr = (unsigned long)addr + offset;
-       flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
-
-       return page;
-}
index 86ef244c5a24b4fa80b20da26c5d522832a61d59..c518451a2fd76f54b78cf23b61a3ab3345c55d70 100644 (file)
@@ -76,7 +76,7 @@ enum ipi_msg_type {
 
 static DECLARE_COMPLETION(cpu_running);
 
-static struct smp_operations smp_ops;
+static struct smp_operations smp_ops __read_only;
 
 void __init smp_set_ops(struct smp_operations *ops)
 {
index 7a3be1d4d0b13b7ca984721403aafda47ecb574c..b00c7de5e2ef98dcbb85f0467f9177701d7a50c1 100644 (file)
@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
                .virtual        = ITCM_OFFSET,
                .pfn            = __phys_to_pfn(ITCM_OFFSET),
                .length         = 0,
-               .type           = MT_MEMORY_RWX_ITCM,
+               .type           = MT_MEMORY_RX_ITCM,
        }
 };
 
@@ -267,7 +267,9 @@ no_dtcm:
                start = &__sitcm_text;
                end   = &__eitcm_text;
                ram   = &__itcm_start;
+               pax_open_kernel();
                memcpy(start, ram, itcm_code_sz);
+               pax_close_kernel();
                pr_debug("CPU ITCM: copied code from %p - %p\n",
                         start, end);
                itcm_present = true;
index 788e23fe64d8e5502f553464c8718a2bf00bf2cb..6fa06a1d40c4bfe40be16e6b1ded092d441a22ba 100644 (file)
@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-       printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+       printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
 #else
        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 static int die_owner = -1;
 static unsigned int die_nest_count;
 
+extern void gr_handle_kernel_exploit(void);
+
 static unsigned long oops_begin(void)
 {
        int cpu;
@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
                panic("Fatal exception in interrupt");
        if (panic_on_oops)
                panic("Fatal exception");
+
+       gr_handle_kernel_exploit();
+
        if (signr)
                do_exit(signr);
 }
@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
        kuser_init(vectors_base);
 
        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-       modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+
+#ifndef CONFIG_PAX_MEMORY_UDEREF
+       modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
+#endif
+
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * on V7-M there is no need to copy the vector table to a dedicated
index b31aa73e80765539ce14018a1ed2fdc0a1a81ddc..cc4b7a19e875c55f98229f3af3d51150493e7c4f 100644 (file)
@@ -37,7 +37,7 @@
 #endif
 
 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
-       defined(CONFIG_GENERIC_BUG)
+       defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
 #define ARM_EXIT_KEEP(x)       x
 #define ARM_EXIT_DISCARD(x)
 #else
@@ -123,6 +123,8 @@ SECTIONS
 #ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
+       _etext = .;                     /* End of text section */
+
        RO_DATA(PAGE_SIZE)
 
        . = ALIGN(4);
@@ -153,8 +155,6 @@ SECTIONS
 
        NOTES
 
-       _etext = .;                     /* End of text and rodata section */
-
 #ifndef CONFIG_XIP_KERNEL
 # ifdef CONFIG_ARM_KERNMEM_PERMS
        . = ALIGN(1<<SECTION_SHIFT);
index 0b0d58a905c43ba05afc61ca06341bfab348b528..988cb45db0000be1c67e789a994eab6fac5b2aa8 100644 (file)
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
 
 /* The VMID used in the VTTBR */
-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u8 kvm_next_vmid;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
  */
 static bool need_new_vmid_gen(struct kvm *kvm)
 {
-       return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+       return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
 }
 
 /**
@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
 
        /* First user of a new VMID generation? */
        if (unlikely(kvm_next_vmid == 0)) {
-               atomic64_inc(&kvm_vmid_gen);
+               atomic64_inc_unchecked(&kvm_vmid_gen);
                kvm_next_vmid = 1;
 
                /*
@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
                kvm_call_hyp(__kvm_flush_vm_context);
        }
 
-       kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+       kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
        kvm->arch.vmid = kvm_next_vmid;
        kvm_next_vmid++;
 
@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
 /**
  * Initialize Hyp-mode and memory mappings on all CPUs.
  */
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
        int err;
        int ret, cpu;
index 14a0d988c82cb41ab88acd4d924bf75aa87efce3..7771a7d9e71eb46e571d3253e01c1e97c510cfd2 100644 (file)
 
                .text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int ___clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(___clear_user)
                stmfd   sp!, {r1, lr}
                mov     r2, #0
                cmp     r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(          strnebt r2, [r0])
                mov     r0, #0
                ldmfd   sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(___clear_user)
 ENDPROC(__clear_user_std)
 
                .pushsection .fixup,"ax"
index 7a235b9952be04e3ed8acd8892d5ca4d63ee27ff..73a0556648b97a0cf2ac097bd0ec057545a5e7df 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_from_user(void *to, const void *from, size_t n)
+ *     size_t ___copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
 
        .text
 
-ENTRY(__copy_from_user)
+ENTRY(___copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(___copy_from_user)
 
        .pushsection .fixup,"ax"
        .align 0
index 6ee2f6706f869b03c95f30d2b1a1cf7adf9086d9..d1cce7642d6f19773fe12a7377e62961c91f6c6f 100644 (file)
@@ -10,6 +10,7 @@
  *  ASM optimised string functions
  */
 #include <linux/linkage.h>
+#include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
index a9d3db16ecb5738234dc14defe8a4d2f21f60fef..164b08927a8046c8641190cba0e753126cdc45e7 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_to_user(void *to, const void *from, size_t n)
+ *     size_t ___copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
        .text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(___copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(___copy_to_user)
 ENDPROC(__copy_to_user_std)
 
        .pushsection .fixup,"ax"
index 7d08b43d2c0e496fc704378301b82d3b34d93f3a..f7ca7eaecb2f8a12bec36a4c0df1dce0aa438a6f 100644 (file)
@@ -57,8 +57,8 @@
  *  Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
  */
 
-#define FN_ENTRY       ENTRY(csum_partial_copy_from_user)
-#define FN_EXIT                ENDPROC(csum_partial_copy_from_user)
+#define FN_ENTRY       ENTRY(__csum_partial_copy_from_user)
+#define FN_EXIT                ENDPROC(__csum_partial_copy_from_user)
 
 #include "csumpartialcopygeneric.S"
 
index 312d43eb686a0254543258efdc3185c340bde82c..21d2322efc0b7a459b6d756d2ff6b639a3215c69 100644 (file)
@@ -29,7 +29,7 @@
 /*
  * Default to the loop-based delay implementation.
  */
-struct arm_delay_ops arm_delay_ops = {
+struct arm_delay_ops arm_delay_ops __read_only = {
        .delay          = __loop_delay,
        .const_udelay   = __loop_const_udelay,
        .udelay         = __loop_udelay,
index 3e58d710013c3ad9b377fc76e6dad58f377e88a7..029817c9dfad71da9f7c20f950a9f19c33e97b83 100644 (file)
@@ -136,7 +136,7 @@ out:
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+___copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        /*
         * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
        return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long ___clear_user(void __user *addr, unsigned long n)
 {
        /* See rational for this in __copy_to_user() above. */
        if (n < 64)
index ce25e85720fb695eedef9650d097693e1a714dde..3dd78504e4ecdb8035f7c0d8edea6a785a2274da 100644 (file)
@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
 
        desc->pfn = __phys_to_pfn(base);
        desc->length = length;
-       desc->type = MT_MEMORY_RWX_NONCACHED;
+       desc->type = MT_MEMORY_RW_NONCACHED;
 
        pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
                base, length, desc->virtual);
index f8e7dcd1705529d52cd3c4c4c3b9cf609ae92e34..17ee9210123ae372503650ba1048002b1b3c0158 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/cpu_pm.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/err.h>
 #include <linux/regulator/machine.h>
@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
        tmp |= pm_data->wake_disable_mask;
        pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
 
-       exynos_pm_syscore_ops.suspend   = pm_data->pm_suspend;
-       exynos_pm_syscore_ops.resume    = pm_data->pm_resume;
+       pax_open_kernel();
+       *(void **)&exynos_pm_syscore_ops.suspend        = pm_data->pm_suspend;
+       *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
+       pax_close_kernel();
 
        register_syscore_ops(&exynos_pm_syscore_ops);
        suspend_set_ops(&exynos_suspend_ops);
index 7f352de2609909ad6c5087aaa203f744876fd4b7..6dc0929d36a4e14cebad2026fd97d75a187e4686 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "keystone.h"
 
-static struct notifier_block platform_nb;
+static notifier_block_no_const platform_nb;
 static unsigned long keystone_dma_pfn_offset __read_mostly;
 
 static int keystone_platform_notifier(struct notifier_block *nb,
index ccef8806bb58771b16a87dc80edc32e585597d29..5dfad8061b8beb1ea99c794343cac7f7d61a07dd 100644 (file)
@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
 
 /*
  * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
  * is needed as a workaround for a deadlock issue between the PCIe
  * interface and the cache controller.
  */
@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
        mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
 
        if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
-               mtype = MT_UNCACHED;
+               mtype = MT_UNCACHED_RW;
 
        return __arm_ioremap_caller(phys_addr, size, mtype, caller);
 }
index b6443a4e0c78002d6665479a608e7915b1801015..20a0b7405c12b5c7b1f142f4356a4a175d436b4f 100644 (file)
@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
 }
 #endif
 
-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
        .late_init = n8x0_menelaus_late_init,
 };
 
index 79f49d904a06f208ae5567c7a2992ce5264f5cc0..70bf184e5cefbb40dbc57a0529816cd79d58f8fa 100644 (file)
@@ -86,7 +86,7 @@ struct cpu_pm_ops {
        void (*resume)(void);
        void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
        void (*hotplug_restart)(void);
-};
+} __no_const;
 
 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
 static struct powerdomain *mpuss_pd;
@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
 {}
 
-struct cpu_pm_ops omap_pm_ops = {
+static struct cpu_pm_ops omap_pm_ops __read_only = {
        .finish_suspend         = default_finish_suspend,
        .resume                 = dummy_cpu_resume,
        .scu_prepare            = dummy_scu_prepare,
index 5305ec7341eca5579398a10b72f263a2fbbe8e0e..6d740456f0a1a4d4fa6f45bb0fd915ddddbb4652 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/smp.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/irqchip/arm-gic.h>
 
 #include <asm/smp_scu.h>
index f961c46453b97c3ba7aa636af837e4cc66801260..4a453dc9527cf545eef919b2b963fe601af03a21 100644 (file)
@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata irq_hotplug_notifier = {
+static struct notifier_block irq_hotplug_notifier = {
        .notifier_call = irq_cpu_hotplug_notify,
 };
 
index be9541e1865096bb2c8b81659dfce952cbc98eeb..821805f87f6ef519dc86d7c153084dd68ce84484 100644 (file)
@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
 struct platform_device __init *omap_device_build(const char *pdev_name,
                                                 int pdev_id,
                                                 struct omap_hwmod *oh,
-                                                void *pdata, int pdata_len)
+                                                const void *pdata, int pdata_len)
 {
        struct omap_hwmod *ohs[] = { oh };
 
@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
                                                    int pdev_id,
                                                    struct omap_hwmod **ohs,
-                                                   int oh_cnt, void *pdata,
+                                                   int oh_cnt, const void *pdata,
                                                    int pdata_len)
 {
        int ret = -ENOMEM;
index 78c02b355179894126f5e2e75f835fc1d753060a..c94109a2f122a8360f70a123e777900c5885bb3e 100644 (file)
@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
 /* Core code interface */
 
 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
-                                         struct omap_hwmod *oh, void *pdata,
+                                         struct omap_hwmod *oh, const void *pdata,
                                          int pdata_len);
 
 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
                                         struct omap_hwmod **oh, int oh_cnt,
-                                        void *pdata, int pdata_len);
+                                        const void *pdata, int pdata_len);
 
 struct omap_device *omap_device_alloc(struct platform_device *pdev,
                                      struct omap_hwmod **ohs, int oh_cnt);
index 9025ffffd2dc1d066fcb54a2cf44f2bf9a73525c..35557020717e07183b341f7f78669a9d10fb6458 100644 (file)
@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
        int (*init_clkdm)(struct omap_hwmod *oh);
        void (*update_context_lost)(struct omap_hwmod *oh);
        int (*get_context_lost)(struct omap_hwmod *oh);
-};
+} __no_const;
 
 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
-static struct omap_hwmod_soc_ops soc_ops;
+static struct omap_hwmod_soc_ops soc_ops __read_only;
 
 /* omap_hwmod_list contains all registered struct omap_hwmods */
 static LIST_HEAD(omap_hwmod_list);
index 95fee54c38ab6c83a1c402339f54413b4a770793..cfa9cf1cb2e9c1f80a9fa1114ffccd34ddf8593a 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <asm/pgtable.h>
 
 #include "powerdomain.h"
 
@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
 
 void __init am43xx_powerdomains_init(void)
 {
-       omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
+       pax_open_kernel();
+       *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
+       pax_close_kernel();
        pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
        pwrdm_register_pwrdms(powerdomains_am43xx);
        pwrdm_complete_init();
index ff0a68cf7439c31ac8743939b3c503fe8786d066..b312aa00453e05f84a8cfbd4c3a1671c0662c55f 100644 (file)
@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
        struct omap_hwmod *oh;
        char *oh_name = "wd_timer2";
        char *dev_name = "omap_wdt";
-       struct omap_wd_timer_platform_data pdata;
+       static struct omap_wd_timer_platform_data pdata = {
+               .read_reset_sources = prm_read_reset_sources
+       };
 
        if (!cpu_class_is_omap2() || of_have_populated_dt())
                return 0;
@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
                return -EINVAL;
        }
 
-       pdata.read_reset_sources = prm_read_reset_sources;
-
        pdev = omap_device_build(dev_name, id, oh, &pdata,
                                 sizeof(struct omap_wd_timer_platform_data));
        WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
index 4f25a7c7ca0fed7b74c8aab8b0f4281021f9e175..a81be85a3cfaf48b32faba41c3f7f81f553a83da 100644 (file)
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
        bool entered_lp2 = false;
 
        if (tegra_pending_sgi())
-               ACCESS_ONCE(abort_flag) = true;
+               ACCESS_ONCE_RW(abort_flag) = true;
 
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 
index ab95f5391a2b631e5cace17bbb176766e7d410bf..4b977a70203cc482c05d557aa7c940605f8f4d10 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
index 2cb587b50905af29edbfb914a61ad4e4c9615035..6ddfebf446ba6e5c2570188e0d88344cd6ab65c8 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/delay.h>
 #include <linux/io.h>
index 2dea8b59d2220e1bacf274bdf7c75c010bbf8728..6499da2d47908562c8b8c630b24cb968ad3d22f2 100644 (file)
@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
        .type           = MT_DEVICE,            \
 }
 
-#define __MEM_DEV_DESC(x, sz)  {               \
-       .virtual        = IO_ADDRESS(x),        \
-       .pfn            = __phys_to_pfn(x),     \
-       .length         = sz,                   \
-       .type           = MT_MEMORY_RWX,                \
-}
-
 extern struct smp_operations ux500_smp_ops;
 extern void ux500_cpu_die(unsigned int cpu);
 
index 52d768ff785711a1d9d2fc384400e754ae8ddbef..5f93180e74f5a418b37d289bfc0d953ae6ebb4e7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/io.h>
 #include <asm/cacheflush.h>
 #include <asm/smp_scu.h>
+#include <linux/irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include "common.h"
 
index c43c714555661337048b72a5a21a6b5357659567..4f8f7b96d6ccb7138c1d52e9c2e94e6589a1aab3 100644 (file)
@@ -446,6 +446,7 @@ config CPU_32v5
 
 config CPU_32v6
        bool
+       select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
        select TLS_REG_EMUL if !CPU_32v6K && !MMU
 
 config CPU_32v6K
@@ -600,6 +601,7 @@ config CPU_CP15_MPU
 
 config CPU_USE_DOMAINS
        bool
+       depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
        help
          This option enables or disables the use of domain switching
          via the set_fs() function.
@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
 
 config KUSER_HELPERS
        bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
-       depends on MMU
+       depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
        default y
        help
          Warning: disabling this option may break user programs.
@@ -812,7 +814,7 @@ config KUSER_HELPERS
          See Documentation/arm/kernel_user_helpers.txt for details.
 
          However, the fixed address nature of these helpers can be used
-         by ROP (return orientated programming) authors when creating
+         by ROP (Return Oriented Programming) authors when creating
          exploits.
 
          If all of the binaries and libraries which run on your platform
index 2c0c541c60caaafe76943ec3bb65a79da990f5c2..4585df9cf53839f0dface7e93958dcfc44f0212c 100644 (file)
@@ -216,10 +216,12 @@ union offset_union {
 #define __get16_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v, a = addr;              \
+               pax_open_userland();                            \
                __get8_unaligned_check(ins,v,a,err);            \
                val =  v << ((BE) ? 8 : 0);                     \
                __get8_unaligned_check(ins,v,a,err);            \
                val |= v << ((BE) ? 0 : 8);                     \
+               pax_close_userland();                           \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
@@ -233,6 +235,7 @@ union offset_union {
 #define __get32_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v, a = addr;              \
+               pax_open_userland();                            \
                __get8_unaligned_check(ins,v,a,err);            \
                val =  v << ((BE) ? 24 :  0);                   \
                __get8_unaligned_check(ins,v,a,err);            \
@@ -241,6 +244,7 @@ union offset_union {
                val |= v << ((BE) ?  8 : 16);                   \
                __get8_unaligned_check(ins,v,a,err);            \
                val |= v << ((BE) ?  0 : 24);                   \
+               pax_close_userland();                           \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
@@ -254,6 +258,7 @@ union offset_union {
 #define __put16_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v = val, a = addr;        \
+               pax_open_userland();                            \
                __asm__( FIRST_BYTE_16                          \
         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
@@ -273,6 +278,7 @@ union offset_union {
                "       .popsection\n"                          \
                : "=r" (err), "=&r" (v), "=&r" (a)              \
                : "0" (err), "1" (v), "2" (a));                 \
+               pax_close_userland();                           \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
@@ -286,6 +292,7 @@ union offset_union {
 #define __put32_unaligned_check(ins,val,addr)                  \
        do {                                                    \
                unsigned int err = 0, v = val, a = addr;        \
+               pax_open_userland();                            \
                __asm__( FIRST_BYTE_32                          \
         ARM(   "1:     "ins"   %1, [%2], #1\n" )               \
         THUMB( "1:     "ins"   %1, [%2]\n"     )               \
@@ -315,6 +322,7 @@ union offset_union {
                "       .popsection\n"                          \
                : "=r" (err), "=&r" (v), "=&r" (a)              \
                : "0" (err), "1" (v), "2" (a));                 \
+               pax_close_userland();                           \
                if (err)                                        \
                        goto fault;                             \
        } while (0)
index 5e65ca8dea62cf3f38d48f2ad8a25425ab0ce6de..879e7b31fa1b53cedbadadc2319708dc3cb3bb8b 100644 (file)
@@ -42,7 +42,7 @@ struct l2c_init_data {
        void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
        void (*save)(void __iomem *);
        struct outer_cache_fns outer_cache;
-};
+} __do_const;
 
 #define CACHE_LINE_SIZE                32
 
index 845769e413323120b6d7b4afed7640746413bc98..4278fd76a2f13aede04bad44cf644511a590d872 100644 (file)
@@ -43,7 +43,7 @@
 #define NUM_USER_ASIDS         ASID_FIRST_VERSION
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
 static DEFINE_PER_CPU(atomic64_t, active_asids);
@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 {
        static u32 cur_idx = 1;
        u64 asid = atomic64_read(&mm->context.id);
-       u64 generation = atomic64_read(&asid_generation);
+       u64 generation = atomic64_read_unchecked(&asid_generation);
 
        if (asid != 0) {
                /*
@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
         */
        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
        if (asid == NUM_USER_ASIDS) {
-               generation = atomic64_add_return(ASID_FIRST_VERSION,
+               generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
                                                 &asid_generation);
                flush_context(cpu);
                asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        cpu_set_reserved_ttbr0();
 
        asid = atomic64_read(&mm->context.id);
-       if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+       if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
            && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
                goto switch_mm_fastpath;
 
        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
        /* Check that our ASID belongs to the current generation. */
        asid = atomic64_read(&mm->context.id);
-       if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+       if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
                asid = new_context(mm, cpu);
                atomic64_set(&mm->context.id, asid);
        }
index a982dc3190dfb3a841bbe196f76f58cdbb554f84..2d9f5f7548cdf4fe182b989adfb0ad61fc47e63d 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 #include "fault.h"
 
@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
        if (fixup_exception(regs))
                return;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (addr < TASK_SIZE) {
+               if (current->signal->curr_ip)
+                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
+               else
+                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
+       }
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+       if ((fsr & FSR_WRITE) &&
+           (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
+            (MODULES_VADDR <= addr && addr < MODULES_END)))
+       {
+               if (current->signal->curr_ip)
+                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
+               else
+                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
+       }
+#endif
+
        /*
         * No handler, we'll have to terminate things with extreme prejudice.
         */
@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
        }
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (fsr & FSR_LNX_PF) {
+               pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
+               do_group_exit(SIGKILL);
+       }
+#endif
+
        tsk->thread.address = addr;
        tsk->thread.error_code = fsr;
        tsk->thread.trap_no = 14;
@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 }
 #endif                                 /* CONFIG_MMU */
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 20; i++) {
+               unsigned char c;
+               if (get_user(c, (__force unsigned char __user *)pc+i))
+                       printk(KERN_CONT "?? ");
+               else
+                       printk(KERN_CONT "%02x ", c);
+       }
+       printk("\n");
+
+       printk(KERN_ERR "PAX: bytes at SP-4: ");
+       for (i = -1; i < 20; i++) {
+               unsigned long c;
+               if (get_user(c, (__force unsigned long __user *)sp+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08lx ", c);
+       }
+       printk("\n");
+}
+#endif
+
 /*
  * First Level Translation Fault Handler
  *
@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
        struct siginfo info;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (addr < TASK_SIZE && is_domain_fault(fsr)) {
+               if (current->signal->curr_ip)
+                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
+               else
+                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
+               goto die;
+       }
+#endif
+
        if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
                return;
 
+die:
        pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
                inf->name, fsr, addr);
 
@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
        ifsr_info[nr].name = name;
 }
 
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+
 asmlinkage void __exception
 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
        const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
        struct siginfo info;
+       unsigned long pc = instruction_pointer(regs);
+
+       if (user_mode(regs)) {
+               unsigned long sigpage = current->mm->context.sigpage;
+
+               if (sigpage <= pc && pc < sigpage + 7*4) {
+                       if (pc < sigpage + 3*4)
+                               sys_sigreturn(regs);
+                       else
+                               sys_rt_sigreturn(regs);
+                       return;
+               }
+               if (pc == 0xffff0f60UL) {
+                       /*
+                        * PaX: __kuser_cmpxchg64 emulation
+                        */
+                       // TODO
+                       //regs->ARM_pc = regs->ARM_lr;
+                       //return;
+               }
+               if (pc == 0xffff0fa0UL) {
+                       /*
+                        * PaX: __kuser_memory_barrier emulation
+                        */
+                       // dmb(); implied by the exception
+                       regs->ARM_pc = regs->ARM_lr;
+                       return;
+               }
+               if (pc == 0xffff0fc0UL) {
+                       /*
+                        * PaX: __kuser_cmpxchg emulation
+                        */
+                       // TODO
+                       //long new;
+                       //int op;
+
+                       //op = FUTEX_OP_SET << 28;
+                       //new = futex_atomic_op_inuser(op, regs->ARM_r2);
+                       //regs->ARM_r0 = old != new;
+                       //regs->ARM_pc = regs->ARM_lr;
+                       //return;
+               }
+               if (pc == 0xffff0fe0UL) {
+                       /*
+                        * PaX: __kuser_get_tls emulation
+                        */
+                       regs->ARM_r0 = current_thread_info()->tp_value[0];
+                       regs->ARM_pc = regs->ARM_lr;
+                       return;
+               }
+       }
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
+               if (current->signal->curr_ip)
+                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
+                                       pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+               else
+                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
+                                       pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+               goto die;
+       }
+#endif
+
+#ifdef CONFIG_PAX_REFCOUNT
+       if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
+#ifdef CONFIG_THUMB2_KERNEL
+               unsigned short bkpt;
+
+               if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
+#else
+               unsigned int bkpt;
+
+               if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
+#endif
+                       current->thread.error_code = ifsr;
+                       current->thread.trap_no = 0;
+                       pax_report_refcount_overflow(regs);
+                       fixup_exception(regs);
+                       return;
+               }
+       }
+#endif
 
        if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
                return;
 
+die:
        pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
                inf->name, ifsr, addr);
 
index cf08bdfbe0d6b6168970e7962341dd407c2823fd..772656c607d526dcfd3974950b7a30f94ffe5727 100644 (file)
@@ -3,6 +3,7 @@
 
 /*
  * Fault status register encodings.  We steal bit 31 for our own purposes.
+ * Set when the FSR value is from an instruction fault.
  */
 #define FSR_LNX_PF             (1 << 31)
 #define FSR_WRITE              (1 << 11)
@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
 }
 #endif
 
+/* valid for LPAE and !LPAE */
+static inline int is_xn_fault(unsigned int fsr)
+{
+       return ((fsr_fs(fsr) & 0x3c) == 0xc);
+}
+
+static inline int is_domain_fault(unsigned int fsr)
+{
+       return ((fsr_fs(fsr) & 0xD) == 0x9);
+}
+
 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 unsigned long search_exception_table(unsigned long addr);
 
index 2495c8cb47baaddcdb15a209406a9e4b9b4f1f25..415b7fce270b58b5508b3230ec97c6ca1b659537 100644 (file)
@@ -758,7 +758,46 @@ void free_tcmmem(void)
 {
 #ifdef CONFIG_HAVE_TCM
        extern char __tcm_start, __tcm_end;
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+       unsigned long addr;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       int cpu_arch = cpu_architecture();
+       unsigned int cr = get_cr();
+
+       if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+               /* make pages tables, etc before .text NX */
+               for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
+                       pgd = pgd_offset_k(addr);
+                       pud = pud_offset(pgd, addr);
+                       pmd = pmd_offset(pud, addr);
+                       __section_update(pmd, addr, PMD_SECT_XN);
+               }
+               /* make init NX */
+               for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
+                       pgd = pgd_offset_k(addr);
+                       pud = pud_offset(pgd, addr);
+                       pmd = pmd_offset(pud, addr);
+                       __section_update(pmd, addr, PMD_SECT_XN);
+               }
+               /* make kernel code/rodata RX */
+               for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
+                       pgd = pgd_offset_k(addr);
+                       pud = pud_offset(pgd, addr);
+                       pmd = pmd_offset(pud, addr);
+#ifdef CONFIG_ARM_LPAE
+                       __section_update(pmd, addr, PMD_SECT_RDONLY);
+#else
+                       __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
+#endif
+               }
+       }
+#endif
 
+#ifdef CONFIG_HAVE_TCM
        poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
        free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 #endif
index d1e5ad7ab3bc6ae9b3a538a3601bccf929b0e998..84dcbf2693d613ced690e374c43006ba131f12bd 100644 (file)
@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
        unsigned int mtype;
 
        if (cached)
-               mtype = MT_MEMORY_RWX;
+               mtype = MT_MEMORY_RX;
        else
-               mtype = MT_MEMORY_RWX_NONCACHED;
+               mtype = MT_MEMORY_RX_NONCACHED;
 
        return __arm_ioremap_caller(phys_addr, size, mtype,
                        __builtin_return_address(0));
index 5e85ed371364c17657be7d7155139a5e3f536437..b10a7eded538b40a3a42c351c8a72bb6f5c1a10e 100644 (file)
@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct vm_area_struct *vma;
        int do_align = 0;
        int aliasing = cache_is_vipt_aliasing();
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        /*
@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_align)
                        addr = COLOUR_ALIGN(addr, pgoff);
@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.high_limit = TASK_SIZE;
        info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        unsigned long addr = addr0;
        int do_align = 0;
        int aliasing = cache_is_vipt_aliasing();
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        /*
@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                return addr;
        }
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* requesting a specific address */
        if (addr) {
                if (do_align)
@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                else
                        addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.high_limit = mm->mmap_base;
        info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 {
        unsigned long random_factor = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* 8 bits of randomness in 20 address space bits */
        if ((current->flags & PF_RANDOMIZE) &&
            !(current->personality & ADDR_NO_RANDOMIZE))
@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base(random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
index 4e6ef896c6195db73f770957e9df619a0be05e06..21c27f2ebdb5e6a4261dbbbcfbf74b8a3b64687e 100644 (file)
 #include "mm.h"
 #include "tcm.h"
 
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+void modify_domain(unsigned int dom, unsigned int type)
+{
+       struct thread_info *thread = current_thread_info();
+       unsigned int domain = thread->cpu_domain;
+       /*
+        * DOMAIN_MANAGER might be defined to some other value,
+        * use the arch-defined constant
+        */
+       domain &= ~domain_val(dom, 3);
+       thread->cpu_domain = domain | domain_val(dom, type);
+       set_domain(thread->cpu_domain);
+}
+EXPORT_SYMBOL(modify_domain);
+#endif
+
 /*
  * empty_zero_page is a special page that is used for
  * zero-initialized data and COW.
@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
 #define PROT_PTE_S2_DEVICE     PROT_PTE_DEVICE
 #define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
-static struct mem_type mem_types[] = {
+#ifdef CONFIG_PAX_KERNEXEC
+#define L_PTE_KERNEXEC         L_PTE_RDONLY
+#define PMD_SECT_KERNEXEC      PMD_SECT_RDONLY
+#else
+#define L_PTE_KERNEXEC         L_PTE_DIRTY
+#define PMD_SECT_KERNEXEC      PMD_SECT_AP_WRITE
+#endif
+
+static struct mem_type mem_types[] __read_only = {
        [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
                                  L_PTE_SHARED,
@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
                .prot_sect      = PROT_SECT_DEVICE,
                .domain         = DOMAIN_IO,
        },
-       [MT_UNCACHED] = {
+       [MT_UNCACHED_RW] = {
                .prot_pte       = PROT_PTE_DEVICE,
                .prot_l1        = PMD_TYPE_TABLE,
                .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain         = DOMAIN_IO,
        },
-       [MT_CACHECLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+       [MT_CACHECLEAN_RO] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
                .domain    = DOMAIN_KERNEL,
        },
 #ifndef CONFIG_ARM_LPAE
-       [MT_MINICLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
+       [MT_MINICLEAN_RO] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
                .domain    = DOMAIN_KERNEL,
        },
 #endif
@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
        [MT_HIGH_VECTORS] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_USER | L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
-       [MT_MEMORY_RWX] = {
+       [__MT_MEMORY_RWX] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_ROM] = {
-               .prot_sect = PMD_TYPE_SECT,
+       [MT_MEMORY_RX] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+               .domain    = DOMAIN_KERNEL,
+       },
+       [MT_ROM_RX] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_RWX_NONCACHED] = {
+       [MT_MEMORY_RW_NONCACHED] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_MT_BUFFERABLE,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_RX_NONCACHED] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
+                               L_PTE_MT_BUFFERABLE,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+               .domain    = DOMAIN_KERNEL,
+       },
        [MT_MEMORY_RW_DTCM] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_XN,
@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_RWX_ITCM] = {
-               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+       [MT_MEMORY_RX_ITCM] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
                .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_RW_SO] = {
@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
                 * Mark cache clean areas and XIP ROM read only
                 * from SVC mode and no access from userspace.
                 */
-               mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-               mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-               mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#ifdef CONFIG_PAX_KERNEXEC
+               mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#endif
+               mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+               mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
 
                /*
@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
-                       mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
-                       mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+                       mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+                       mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
-                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
                }
        }
 
@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
        if (cpu_arch >= CPU_ARCH_ARMv6) {
                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
                        /* Non-cacheable Normal is XCB = 001 */
-                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
+                       mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
+                               PMD_SECT_BUFFERED;
+                       mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
                                PMD_SECT_BUFFERED;
                } else {
                        /* For both ARMv6 and non-TEX-remapping ARMv7 */
-                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
+                       mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
+                               PMD_SECT_TEX(1);
+                       mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
                                PMD_SECT_TEX(1);
                }
        } else {
-               mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+               mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+               mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
        }
 
 #ifdef CONFIG_ARM_LPAE
@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
        user_pgprot |= PTE_EXT_PXN;
 #endif
 
+       user_pgprot |= __supported_pte_mask;
+
        for (i = 0; i < 16; i++) {
                pteval_t v = pgprot_val(protection_map[i]);
                protection_map[i] = __pgprot(v | user_pgprot);
@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
 
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
-       mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
-       mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+       mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
-       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
-       mem_types[MT_ROM].prot_sect |= cp->pmd;
+       mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
+       mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
+       mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
        case PMD_SECT_WT:
-               mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
+               mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
                break;
        case PMD_SECT_WB:
        case PMD_SECT_WBWA:
-               mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
+               mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
                break;
        }
        pr_info("Memory policy: %sData cache %s\n",
@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
                return;
        }
 
-       if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+       if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
            md->virtual >= PAGE_OFFSET &&
            (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
                pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
  */
+
+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
+
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
        struct map_desc map;
        unsigned long addr;
-       void *vectors;
 
-       /*
-        * Allocate the vector page early.
-        */
-       vectors = early_alloc(PAGE_SIZE * 2);
-
-       early_trap_init(vectors);
+       early_trap_init(&vectors);
 
        for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
                pmd_clear(pmd_off_k(addr));
@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
        map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
        map.virtual = MODULES_VADDR;
        map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
-       map.type = MT_ROM;
+       map.type = MT_ROM_RX;
        create_mapping(&map);
 #endif
 
@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
        map.virtual = FLUSH_BASE;
        map.length = SZ_1M;
-       map.type = MT_CACHECLEAN;
+       map.type = MT_CACHECLEAN_RO;
        create_mapping(&map);
 #endif
 #ifdef FLUSH_BASE_MINICACHE
        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
        map.virtual = FLUSH_BASE_MINICACHE;
        map.length = SZ_1M;
-       map.type = MT_MINICLEAN;
+       map.type = MT_MINICLEAN_RO;
        create_mapping(&map);
 #endif
 
@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
         * location (0xffff0000).  If we aren't using high-vectors, also
         * create a mapping at the low-vectors virtual address.
         */
-       map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+       map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
        map.virtual = 0xffff0000;
        map.length = PAGE_SIZE;
 #ifdef CONFIG_KUSER_HELPERS
@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+#ifndef CONFIG_PAX_KERNEXEC
        phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
        phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+#endif
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
                if (start >= end)
                        break;
 
+#ifdef CONFIG_PAX_KERNEXEC
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+
+               if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
+                       struct map_desc kernel;
+                       struct map_desc initmap;
+
+                       /* when freeing initmem we will make this RW */
+                       initmap.pfn = __phys_to_pfn(__pa(__init_begin));
+                       initmap.virtual = (unsigned long)__init_begin;
+                       initmap.length = _sdata - __init_begin;
+                       initmap.type = __MT_MEMORY_RWX;
+                       create_mapping(&initmap);
+
+                       /* when freeing initmem we will make this RX */
+                       kernel.pfn = __phys_to_pfn(__pa(_stext));
+                       kernel.virtual = (unsigned long)_stext;
+                       kernel.length = __init_begin - _stext;
+                       kernel.type = __MT_MEMORY_RWX;
+                       create_mapping(&kernel);
+
+                       if (map.virtual < (unsigned long)_stext) {
+                               map.length = (unsigned long)_stext - map.virtual;
+                               map.type = __MT_MEMORY_RWX;
+                               create_mapping(&map);
+                       }
+
+                       map.pfn = __phys_to_pfn(__pa(_sdata));
+                       map.virtual = (unsigned long)_sdata;
+                       map.length = end - __pa(_sdata);
+               }
+
+               map.type = MT_MEMORY_RW;
+               create_mapping(&map);
+#else
                if (end < kernel_x_start) {
                        map.pfn = __phys_to_pfn(start);
                        map.virtual = __phys_to_virt(start);
                        map.length = end - start;
-                       map.type = MT_MEMORY_RWX;
+                       map.type = __MT_MEMORY_RWX;
 
                        create_mapping(&map);
                } else if (start >= kernel_x_end) {
@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
                        map.pfn = __phys_to_pfn(kernel_x_start);
                        map.virtual = __phys_to_virt(kernel_x_start);
                        map.length = kernel_x_end - kernel_x_start;
-                       map.type = MT_MEMORY_RWX;
+                       map.type = __MT_MEMORY_RWX;
 
                        create_mapping(&map);
 
@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
                                create_mapping(&map);
                        }
                }
+#endif
        }
 }
 
index e1268f90502682c75dfd7a98e6ed15272ea8d512..a9755a78576fff0a46da75c387b072103e8314d2 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <asm/hwcap.h>
 #include <asm/opcodes.h>
+#include <asm/pgtable.h>
 
 #include "bpf_jit_32.h"
 
@@ -71,7 +72,11 @@ struct jit_ctx {
 #endif
 };
 
+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
+int bpf_jit_enable __read_only;
+#else
 int bpf_jit_enable __read_mostly;
+#endif
 
 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
 {
@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
 {
        u32 *ptr;
        /* We are guaranteed to have aligned memory. */
+       pax_open_kernel();
        for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
                *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
+       pax_close_kernel();
 }
 
 static void build_prologue(struct jit_ctx *ctx)
index 5b217f460f18ce4d0cd5a2499f17eaa264a25fec..c23f40ecbc94ff628ea911ed9e829c91631c718c 100644 (file)
@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
                .virtual        = IOP3XX_PERIPHERAL_VIRT_BASE,
                .pfn            = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
                .length         = IOP3XX_PERIPHERAL_SIZE,
-               .type           = MT_UNCACHED,
+               .type           = MT_UNCACHED_RW,
         },
 };
 
index a5bc92d7e4765b81315379c41618fff7f84cbec2..0bb4730951dc3ff040c5503676da660b17157995 100644 (file)
@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
         * Looks like we need to preserve some bootloader code at the
         * beginning of SRAM for jumping to flash for reboot to work...
         */
+       pax_open_kernel();
        memset_io(omap_sram_base + omap_sram_skip, 0,
                  omap_sram_size - omap_sram_skip);
+       pax_close_kernel();
 }
index ce6d7634b6cb0d8f6be56046ca55638ca8bd0747..cfea9171bdbef9b2cfda33d51f1e1e32fc3471c3 100644 (file)
@@ -47,7 +47,7 @@ struct samsung_dma_ops {
        int (*started)(unsigned ch);
        int (*flush)(unsigned ch);
        int (*stop)(unsigned ch);
-};
+} __no_const;
 
 extern void *samsung_dmadev_get_ops(void);
 extern void *s3c_dma_get_ops(void);
index a5abb0062d6e943d67ca4ff5479e887194482367..9cbca9af914ed4c62b0b3791478d6c81702c1211 100644 (file)
@@ -44,7 +44,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 09da25bc596fd0bdccdf03e94b37a4c81e0cc633..3ea0d647a5731c4826bddceb08450eced47dd76c 100644 (file)
@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
 {
        switch (size) {
        case 1:
-               ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+               ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
                break;
        case 2:
-               ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+               ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
                break;
        case 4:
-               ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+               ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
                break;
        case 8:
-               ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+               ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
                break;
        default:
                BUILD_BUG();
index 3bf8f4e99a511c67a3a2d9c4a739929cedd5889f..5dd54914eee1797ef0b93e3ab93e9ec4afe59482 100644 (file)
@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
        flag;                                                           \
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)    __range_ok(addr, size)
 #define user_addr_max                  get_fs
 
index c3a58a189a914423bb5327ca0c6e487b564aad56..78fbf54a9aa342567beaec79fd562fed060092a7 100644 (file)
@@ -1,8 +1,10 @@
 #ifndef __ASM_AVR32_CACHE_H
 #define __ASM_AVR32_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
index d232888b99d5b1e210a70627413244d1e78b8ac7..87c8df102a13a4d8f73da84db57efde3ddbae213 100644 (file)
@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    0x00001000UL
+
+#define PAX_DELTA_MMAP_LEN     15
+#define PAX_DELTA_STACK_LEN    15
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
index 479330b89796fe1aeefc116da4f6727fab494e64..53717a870b032a9360ac4a37d24b67c4ef22f0ca 100644 (file)
@@ -2,9 +2,9 @@
 #define __ASM_AVR32_KMAP_TYPES_H
 
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define KM_TYPE_NR 29
+# define KM_TYPE_NR 30
 #else
-# define KM_TYPE_NR 14
+# define KM_TYPE_NR 15
 #endif
 
 #endif /* __ASM_AVR32_KMAP_TYPES_H */
index d223a8b57c1eaad282289e75089654153ab598d6..69c5210f7215f0a33fa1cbb3aef3868e24b36715 100644 (file)
@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
 
 int exception_trace = 1;
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 20; i++) {
+               unsigned char c;
+               if (get_user(c, (unsigned char *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%02x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults. It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
@@ -178,6 +195,16 @@ bad_area:
        up_read(&mm->mmap_sem);
 
        if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+                       if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
+                               pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
+                               do_group_exit(SIGKILL);
+                       }
+               }
+#endif
+
                if (exception_trace && printk_ratelimit())
                        printk("%s%s[%d]: segfault at %08lx pc %08lx "
                               "sp %08lx ecr %lu\n",
index 568885a2c2862bc12699207515650d5342ddb6fd..f8008df2eff8c6a410b6c17b27d88c744331745e 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef __ARCH_BLACKFIN_CACHE_H
 #define __ARCH_BLACKFIN_CACHE_H
 
+#include <linux/const.h>
 #include <linux/linkage.h>     /* for asmlinkage */
 
 /*
@@ -14,7 +15,7 @@
  * Blackfin loads 32 bytes for cache
  */
 #define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES        L1_CACHE_BYTES
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
index aea27184d2d26645a14bda4e75e58bd2aeaaf7db..3639a60b765fa402cecd26a9e21e1ee334b92fa4 100644 (file)
@@ -1,8 +1,9 @@
 #ifndef _ASM_ARCH_CACHE_H
 #define _ASM_ARCH_CACHE_H
 
+#include <linux/const.h>
 /* Etrax 100LX have 32-byte cache-lines. */
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* _ASM_ARCH_CACHE_H */
index 7caf25d58e6b6e3b76f75251e0831e1251ced43a..ee65ac530f62648ef60eaeb9163ad3e6e8b8d3d3 100644 (file)
@@ -1,11 +1,12 @@
 #ifndef _ASM_CRIS_ARCH_CACHE_H
 #define _ASM_CRIS_ARCH_CACHE_H
 
+#include <linux/const.h>
 #include <arch/hwregs/dma.h>
 
 /* A cache-line is 32 bytes. */
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
index 102190a61d65a1fb28f4775309a167355cebcca7..5334cea7c408d70a13d6fbf8b905ceae0ab8804f 100644 (file)
@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_cmpxchg(v, old, new)  (__cmpxchg_64(old, new, &(v)->counter))
 #define atomic64_xchg(v, new)          (__xchg_64(new, &(v)->counter))
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
index 2797163b8f4f12cb543495664610de65376b1319..c2a401df9dc63d9734cad4d6fb9d2a19fe817030 100644 (file)
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
+#include <linux/const.h>
 
 /* bytes per L1 cache line */
 #define L1_CACHE_SHIFT         (CONFIG_FRV_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __cacheline_aligned    __attribute__((aligned(L1_CACHE_BYTES)))
 #define ____cacheline_aligned  __attribute__((aligned(L1_CACHE_BYTES)))
index 43901f2209637b2aeea32905389f62e2217237bc..0d8b8656663dd5f9fc0cec492b2a42535c7049bc 100644 (file)
@@ -2,6 +2,6 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-#define KM_TYPE_NR 17
+#define KM_TYPE_NR 18
 
 #endif
index 836f14707a627f156343154f359ac1f7758e9fd4..4cf23f5973fbf53cdf62bf81a25dd530531d415f 100644 (file)
@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 {
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
        if (len > TASK_SIZE)
                return -ENOMEM;
@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(current->mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        goto success;
        }
 
@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        info.high_limit = (current->mm->start_stack - 0x00200000);
        info.align_mask = 0;
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
        if (!(addr & ~PAGE_MASK))
                goto success;
index 69952c18420708f4f5328021c7988aa1f285ed64..4fa2908f26242b22304c79cb8fbf965ccd145c0d 100644 (file)
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
+#include <linux/const.h>
+
 /* Bytes per L1 cache line */
-#define L1_CACHE_SHIFT         (5)
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT         5
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
index 074e52bf815c36fdb813816d6ff3feaa0b3b332e..76afdac1ec65bfa7a275374cc602793593d75779 100644 (file)
@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
 config KEXEC
        bool "kexec system call"
        depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+       depends on !GRKERNSEC_KMEM
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 970d0bd99621b32eef163debccd8c2d41a970250..e750b9bcf847f90b1af932b622cfafd914e8d470 100644 (file)
@@ -98,5 +98,6 @@ endef
 archprepare: make_nr_irqs_h FORCE
 PHONY += make_nr_irqs_h FORCE
 
+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
 make_nr_irqs_h: FORCE
        $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
index 0bf03501fe5ca6af6d63173743ad52164cb099b5..2ad19571a0ab67cbb7841cb51a6dd62fa40a0434 100644 (file)
@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
 #define atomic64_inc(v)                        atomic64_add(1, (v))
 #define atomic64_dec(v)                        atomic64_sub(1, (v))
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 #endif /* _ASM_IA64_ATOMIC_H */
index f6769eb2bbf9b5ff8774017ac0eb332fcc7309d9..1cdb590924efcfd0f46016da366105e471a29350 100644 (file)
@@ -66,7 +66,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 988254a7d34944d5e0cc8f22bb66823af349c86f..e1ee8855a064d9054f5b8fbdbc11b857440c66b8 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_IA64_CACHE_H
 #define _ASM_IA64_CACHE_H
 
+#include <linux/const.h>
 
 /*
  * Copyright (C) 1998-2000 Hewlett-Packard Co
@@ -9,7 +10,7 @@
 
 /* Bytes per L1 (data) cache line.  */
 #define L1_CACHE_SHIFT         CONFIG_IA64_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #ifdef CONFIG_SMP
 # define SMP_CACHE_SHIFT       L1_CACHE_SHIFT
index 5a83c5cc3dc8779e8411238eb651bbc62764b055..4d7f553aa9164ae3c64ec42b839254a0df00572a 100644 (file)
  */
 #define ELF_ET_DYN_BASE                (TASK_UNMAPPED_BASE + 0x800000000UL)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
+
+#define PAX_DELTA_MMAP_LEN     (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
+#define PAX_DELTA_STACK_LEN    (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
+#endif
+
 #define PT_IA_64_UNWIND                0x70000001
 
 /* IA-64 relocations: */
index 5767cdfc08db8b671e2536bb093fbe4a921e47c7..7462574eabda1f3ba3de15a3617621220c4d1e0c 100644 (file)
@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
        pgd_val(*pgd_entry) = __pa(pud);
 }
 
+static inline void
+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+{
+       pgd_populate(mm, pgd_entry, pud);
+}
+
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        return quicklist_alloc(0, GFP_KERNEL, NULL);
@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
        pud_val(*pud_entry) = __pa(pmd);
 }
 
+static inline void
+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+{
+       pud_populate(mm, pud_entry, pmd);
+}
+
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        return quicklist_alloc(0, GFP_KERNEL, NULL);
index 7935115398a6679ec560a4f7ef08d2cc95724988..c0eca6ae2a52b6d8bc0bdeba60ec42ae4ac414a0 100644 (file)
@@ -12,7 +12,7 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
-
+#include <linux/const.h>
 #include <asm/mman.h>
 #include <asm/page.h>
 #include <asm/processor.h>
 #define PAGE_READONLY  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 #define PAGE_COPY      __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC    __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+# define PAGE_READONLY_NOEXEC  __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+# define PAGE_COPY_NOEXEC      __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#else
+# define PAGE_SHARED_NOEXEC    PAGE_SHARED
+# define PAGE_READONLY_NOEXEC  PAGE_READONLY
+# define PAGE_COPY_NOEXEC      PAGE_COPY
+#endif
+
 #define PAGE_GATE      __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
 #define PAGE_KERNEL    __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
 #define PAGE_KERNELRX  __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
index 45698cd15b7b85a19a59e26ea8c91f9255f6454e..e8e2dbc9e142a056ea4d7e056c0dcadc84f48408 100644 (file)
@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
        unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
 
        asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
-       ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+       ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
 }
 
 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
index 103bedc59644a65a90d76d51b85c586478ef0e01..02105979e8088bd4f4faa2221e72801b7339dabb 100644 (file)
@@ -70,6 +70,7 @@
         && ((segment).seg == KERNEL_DS.seg                                             \
             || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));        \
 })
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)    __access_ok((addr), (size), get_fs())
 
 /*
@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+       if (count > INT_MAX)
+               return count;
+
+       if (!__builtin_constant_p(count))
+               check_object_size(from, count, true);
+
        return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+       if (count > INT_MAX)
+               return count;
+
+       if (!__builtin_constant_p(count))
+               check_object_size(to, count, false);
+
        return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
 ({                                                                                     \
        void __user *__cu_to = (to);                                                    \
        const void *__cu_from = (from);                                                 \
-       long __cu_len = (n);                                                            \
+       unsigned long __cu_len = (n);                                                   \
                                                                                        \
-       if (__access_ok(__cu_to, __cu_len, get_fs()))                                   \
+       if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) {          \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_from, __cu_len, true);                   \
                __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);   \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
 ({                                                                                     \
        void *__cu_to = (to);                                                           \
        const void __user *__cu_from = (from);                                          \
-       long __cu_len = (n);                                                            \
+       unsigned long __cu_len = (n);                                                   \
                                                                                        \
        __chk_user_ptr(__cu_from);                                                      \
-       if (__access_ok(__cu_from, __cu_len, get_fs()))                                 \
+       if (__cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs())) {       \
+               if (!__builtin_constant_p(n))                                           \
+                       check_object_size(__cu_to, __cu_len, false);                    \
                __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
+       }                                                                               \
        __cu_len;                                                                       \
 })
 
index 29754aae5177a94ec257021ab00c57f688a61de8..06d28383766d77a45ccbea7a3e3c02c5c38cf597 100644 (file)
@@ -491,16 +491,40 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
        return 0;
 }
 
+static inline int
+in_init_rx (const struct module *mod, uint64_t addr)
+{
+       return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
+}
+
+static inline int
+in_init_rw (const struct module *mod, uint64_t addr)
+{
+       return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
+}
+
 static inline int
 in_init (const struct module *mod, uint64_t addr)
 {
-       return addr - (uint64_t) mod->module_init < mod->init_size;
+       return in_init_rx(mod, addr) || in_init_rw(mod, addr);
+}
+
+static inline int
+in_core_rx (const struct module *mod, uint64_t addr)
+{
+       return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
+}
+
+static inline int
+in_core_rw (const struct module *mod, uint64_t addr)
+{
+       return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
 }
 
 static inline int
 in_core (const struct module *mod, uint64_t addr)
 {
-       return addr - (uint64_t) mod->module_core < mod->core_size;
+       return in_core_rx(mod, addr) || in_core_rw(mod, addr);
 }
 
 static inline int
@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
                break;
 
              case RV_BDREL:
-               val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
+               if (in_init_rx(mod, val))
+                       val -= (uint64_t) mod->module_init_rx;
+               else if (in_init_rw(mod, val))
+                       val -= (uint64_t) mod->module_init_rw;
+               else if (in_core_rx(mod, val))
+                       val -= (uint64_t) mod->module_core_rx;
+               else if (in_core_rw(mod, val))
+                       val -= (uint64_t) mod->module_core_rw;
                break;
 
              case RV_LTV:
@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
                 *     addresses have been selected...
                 */
                uint64_t gp;
-               if (mod->core_size > MAX_LTOFF)
+               if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
                        /*
                         * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
                         * at the end of the module.
                         */
-                       gp = mod->core_size - MAX_LTOFF / 2;
+                       gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
                else
-                       gp = mod->core_size / 2;
-               gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
+                       gp = (mod->core_size_rx + mod->core_size_rw) / 2;
+               gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
                mod->arch.gp = gp;
                DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
        }
index c39c3cd3ac348a414787d4db30ec6c6181f7f6d9..3c77738fc570ab91036d91834ff324c5696888e9 100644 (file)
@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata palinfo_cpu_notifier =
+static struct notifier_block palinfo_cpu_notifier =
 {
        .notifier_call = palinfo_cpu_callback,
        .priority = 0,
index 41e33f84c18511ffcf0164e7d18613369974a343..65180b2a292f373281527f50dae31c37ce13d7ad 100644 (file)
@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
        unsigned long align_mask = 0;
        struct mm_struct *mm = current->mm;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
        if (len > RGN_MAP_LIMIT)
                return -ENOMEM;
@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
        if (REGION_NUMBER(addr) == RGN_HPAGE)
                addr = 0;
 #endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP)
+               addr = mm->free_area_cache;
+       else
+#endif
+
        if (!addr)
                addr = TASK_UNMAPPED_BASE;
 
@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
        info.high_limit = TASK_SIZE;
        info.align_mask = align_mask;
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
index 84f8a52ac5ae2bdb65004691813aed4ed008ea9f..7c76178d39b23a0e624af01924fd35c02ba013d6 100644 (file)
@@ -192,7 +192,7 @@ SECTIONS {
        /* Per-cpu data: */
        . = ALIGN(PERCPU_PAGE_SIZE);
        PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
-       __phys_per_cpu_start = __per_cpu_load;
+       __phys_per_cpu_start = per_cpu_load;
        /*
         * ensure percpu data fits
         * into percpu page size
index ba5ba7accd0d6bb4dbab34f7fc307c4306347f4a..36e9d3a2d6e219002aad79516a6a5426be6583d6 100644 (file)
@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
        return pte_present(pte);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 8; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 #      define VM_READ_BIT      0
 #      define VM_WRITE_BIT     1
 #      define VM_EXEC_BIT      2
@@ -151,8 +168,21 @@ retry:
        if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
                goto bad_area;
 
-       if ((vma->vm_flags & mask) != mask)
+       if ((vma->vm_flags & mask) != mask) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
+                       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
+                               goto bad_area;
+
+                       up_read(&mm->mmap_sem);
+                       pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
+                       do_group_exit(SIGKILL);
+               }
+#endif
+
                goto bad_area;
+       }
 
        /*
         * If for any reason at all we couldn't handle the fault, make
index 76069c18ee42c186edf37c680ee78249d553b4fc..c2aa816371eb3d926c6a74ecf6435701e94ad3ce 100644 (file)
@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
                unsigned long pgoff, unsigned long flags)
 {
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
 
        if (len > RGN_MAP_LIMIT)
                return -ENOMEM;
@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
        info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
        info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
index 6b3345758d3e3298ed8ee23b592d21adda88034d..88b51248c4a5839c2c0a24450e95dd15ea61c5f8 100644 (file)
@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
+                       vma->vm_flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+                       if (current->mm->pax_flags & MF_PAX_MPROTECT)
+                               vma->vm_flags &= ~VM_MAYEXEC;
+#endif
+
+               }
+#endif
+
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
                down_write(&current->mm->mmap_sem);
                if (insert_vm_struct(current->mm, vma)) {
@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-       gate_vma.vm_page_prot = __P101;
+       gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
 
        return 0;
 }
index 40b3ee98193dbf5177299ee1cfb82c673e749a7d..8c2c112b9d4f927695e6d26d1197edf8a7a784ae 100644 (file)
@@ -1,8 +1,10 @@
 #ifndef _ASM_M32R_CACHE_H
 #define _ASM_M32R_CACHE_H
 
+#include <linux/const.h>
+
 /* L1 cache line size */
 #define L1_CACHE_SHIFT         4
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif  /* _ASM_M32R_CACHE_H */
index 82abd159dbef495253bb2989093f86809b085ca5..d95ae5db097468fb9928af2fa0cd2c8466106b43 100644 (file)
@@ -14,6 +14,9 @@
 unsigned long
 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        prefetch(from);
        if (access_ok(VERIFY_WRITE, to, n))
                __copy_user(to,from,n);
@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 unsigned long
 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        prefetchw(to);
        if (access_ok(VERIFY_READ, from, n))
                __copy_user_zeroing(to,from,n);
index 0395c51e46a6cab11bb510271ca9413f671f9314..5f2603124864ac2e2edafaefb439289fafb49a01 100644 (file)
@@ -4,9 +4,11 @@
 #ifndef __ARCH_M68K_CACHE_H
 #define __ARCH_M68K_CACHE_H
 
+#include <linux/const.h>
+
 /* bytes per L1 cache line */
 #define        L1_CACHE_SHIFT  4
-#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
+#define        L1_CACHE_BYTES  (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
index d703d8e26a656c1560202d3c1af6a39ed95771e5..a8e2d700e8d48a643c540da3f83fbef74cc9532d 100644 (file)
@@ -90,7 +90,7 @@ static inline void fence(void)
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 3c32075d294528df5d8c2493215f34b012ec57f4..ae0ae75abffece6a7a4fd5fce1d860321cbb38e0 100644 (file)
@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
        info.high_limit = TASK_SIZE;
        info.align_mask = PAGE_MASK & HUGEPT_MASK;
        info.align_offset = 0;
+       info.threadstack_offset = 0;
        return vm_unmapped_area(&info);
 }
 
index 4efe96a036f74a7ce058f563d44c21f46c0cc374..60e869982417a5dc17b4770de6b070ca1e5a7284 100644 (file)
 #ifndef _ASM_MICROBLAZE_CACHE_H
 #define _ASM_MICROBLAZE_CACHE_H
 
+#include <linux/const.h>
 #include <asm/registers.h>
 
 #define L1_CACHE_SHIFT 5
 /* word-granular cache in microblaze */
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define SMP_CACHE_BYTES        L1_CACHE_BYTES
 
index 843713c05b79fe69f8bbc057a17a5e200dfc54da..b6a87b99784fa54a6a036f08d44e7baf31d121b5 100644 (file)
@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
 
 config KEXEC
        bool "Kexec system call"
+       depends on !GRKERNSEC_KMEM
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 3778655c4a375215fddea8fd0969960d096d0d9c..1dff0a9a502e3f93ddb12f76e68d473ce119d810 100644 (file)
@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
        if (dma_release_from_coherent(dev, order, vaddr))
                return;
 
-       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+       swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
 }
 
 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
index 857da84cfc92eb20bd7f29cb5d9b3c1e16b86203..3f4458b3e58ec93a58cb02915dc741818d03381b 100644 (file)
 #include <asm/cmpxchg.h>
 #include <asm/war.h>
 
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
 #define ATOMIC_INIT(i)   { (i) }
 
+#ifdef CONFIG_64BIT
+#define _ASM_EXTABLE(from, to)         \
+"      .section __ex_table,\"a\"\n"    \
+"      .dword  " #from ", " #to"\n"    \
+"      .previous\n"
+#else
+#define _ASM_EXTABLE(from, to)         \
+"      .section __ex_table,\"a\"\n"    \
+"      .word   " #from ", " #to"\n"    \
+"      .previous\n"
+#endif
+
 /*
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v)         ACCESS_ONCE((v)->counter)
+static inline int atomic_read(const atomic_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
+
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
 
 /*
  * atomic_set - set atomic variable
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v, i)               ((v)->counter = (i))
+static inline void atomic_set(atomic_t *v, int i)
+{
+       v->counter = i;
+}
 
-#define ATOMIC_OP(op, c_op, asm_op)                                          \
-static __inline__ void atomic_##op(int i, atomic_t * v)                              \
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+       v->counter = i;
+}
+
+#ifdef CONFIG_PAX_REFCOUNT
+#define __OVERFLOW_POST                                \
+       "       b       4f              \n"     \
+       "       .set    noreorder       \n"     \
+       "3:     b       5f              \n"     \
+       "       move    %0, %1          \n"     \
+       "       .set    reorder         \n"
+#define __OVERFLOW_EXTABLE     \
+       "3:\n"                  \
+       _ASM_EXTABLE(2b, 3b)
+#else
+#define __OVERFLOW_POST
+#define __OVERFLOW_EXTABLE
+#endif
+
+#define __ATOMIC_OP(op, suffix, asm_op, extable)                             \
+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v)        \
 {                                                                            \
        if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
                int temp;                                                     \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
-               "1:     ll      %0, %1          # atomic_" #op "        \n"   \
-               "       " #asm_op " %0, %2                              \n"   \
+               "       .set    mips3                                   \n"   \
+               "1:     ll      %0, %1          # atomic_" #op #suffix "\n"   \
+               "2:     " #asm_op " %0, %2                              \n"   \
                "       sc      %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
+               extable                                                       \
                "       .set    mips0                                   \n"   \
                : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
-                       "       ll      %0, %1          # atomic_" #op "\n"   \
-                       "       " #asm_op " %0, %2                      \n"   \
-                       "       sc      %0, %1                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!temp));                                    \
+               __asm__ __volatile__(                                         \
+               "       .set    mips3                                   \n"   \
+               "1:     ll      %0, %1          # atomic_" #op #suffix "\n"   \
+               "2:     " #asm_op " %0, %2                              \n"   \
+               "       sc      %0, %1                                  \n"   \
+               "       beqz    %0, 1b                                  \n"   \
+                       extable                                               \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "Ir" (i));                                                  \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
                raw_local_irq_save(flags);                                    \
-               v->counter c_op i;                                            \
+               __asm__ __volatile__(                                         \
+               "2:     " #asm_op " %0, %1                              \n"   \
+               extable                                                       \
+               : "+r" (v->counter) : "Ir" (i));                              \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
 }
 
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                                   \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v)                      \
+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u)                   \
+                             __ATOMIC_OP(op, _unchecked, asm_op)
+
+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable)             \
+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
 {                                                                            \
        int result;                                                           \
                                                                              \
@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                  \
                int temp;                                                     \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
-               "1:     ll      %1, %2          # atomic_" #op "_return \n"   \
-               "       " #asm_op " %0, %1, %3                          \n"   \
+               "       .set    mips3                                   \n"   \
+               "1:     ll      %1, %2  # atomic_" #op "_return" #suffix"\n"  \
+               "2:     " #asm_op " %0, %1, %3                          \n"   \
                "       sc      %0, %2                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
-               "       " #asm_op " %0, %1, %3                          \n"   \
+               post_op                                                       \
+               extable                                                       \
+               "4:     " #asm_op " %0, %1, %3                          \n"   \
+               "5:                                                     \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF12_ASM() (v->counter)                            \
@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                 \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
-                       "       ll      %1, %2  # atomic_" #op "_return \n"   \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       sc      %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF12_ASM() (v->counter)                    \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!result));                                  \
+               __asm__ __volatile__(                                         \
+               "       .set    mips3                                   \n"   \
+               "1:     ll      %1, %2  # atomic_" #op "_return" #suffix "\n" \
+               "2:     " #asm_op " %0, %1, %3                          \n"   \
+               "       sc      %0, %2                                  \n"   \
+               post_op                                                       \
+               extable                                                       \
+               "4:     " #asm_op " %0, %1, %3                          \n"   \
+               "5:                                                     \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "+" GCC_OFF12_ASM() (v->counter)                            \
+               : "Ir" (i));                                                  \
                                                                              \
                result = temp; result c_op i;                                 \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
                raw_local_irq_save(flags);                                    \
-               result = v->counter;                                          \
-               result c_op i;                                                \
-               v->counter = result;                                          \
+               __asm__ __volatile__(                                         \
+               "       lw      %0, %1                                  \n"   \
+               "2:     " #asm_op " %0, %1, %2                          \n"   \
+               "       sw      %0, %1                                  \n"   \
+               "3:                                                     \n"   \
+               extable                                                       \
+               : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter)            \
+               : "Ir" (i));                                                  \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
                                                                              \
@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                 \
        return result;                                                        \
 }
 
-#define ATOMIC_OPS(op, c_op, asm_op)                                         \
-       ATOMIC_OP(op, c_op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
+                                    __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define ATOMIC_OPS(op, asm_op)                                               \
+       ATOMIC_OP(op, asm_op)                                                 \
+       ATOMIC_OP_RETURN(op, asm_op)
 
-ATOMIC_OPS(add, +=, addu)
-ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
+#undef __ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef __ATOMIC_OP
 
 /*
  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
  * Atomically test @v and subtract @i if @v is greater or equal than @i.
  * The function returns the old value of @v minus @i.
  */
-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
 {
        int result;
 
@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
        return result;
 }
 
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
+                                          int new)
+{
+       return cmpxchg(&(v->counter), old, new);
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+       return xchg(&v->counter, new);
+}
+
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+       return xchg(&(v->counter), new);
+}
 
 /**
  * __atomic_add_unless - add unless the number is a given value
@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define atomic_dec_return(v) atomic_sub_return(1, (v))
 #define atomic_inc_return(v) atomic_add_return(1, (v))
+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v);
+}
 
 /*
  * atomic_sub_and_test - subtract value from variable and test result
@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * other cases.
  */
 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v) == 0;
+}
 
 /*
  * atomic_dec_and_test - decrement by 1 and test
@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * Atomically increments @v by 1.
  */
 #define atomic_inc(v) atomic_add(1, (v))
+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+       atomic_add_unchecked(1, v);
+}
 
 /*
  * atomic_dec - decrement and test
@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * Atomically decrements @v by 1.
  */
 #define atomic_dec(v) atomic_sub(1, (v))
+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+       atomic_sub_unchecked(1, v);
+}
 
 /*
  * atomic_add_negative - add and test if negative
@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  * @v: pointer of type atomic64_t
  *
  */
-#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+static inline long atomic64_read(const atomic64_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
+
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
 
 /*
  * atomic64_set - set atomic variable
  * @v: pointer of type atomic64_t
  * @i: required value
  */
-#define atomic64_set(v, i)     ((v)->counter = (i))
+static inline void atomic64_set(atomic64_t *v, long i)
+{
+       v->counter = i;
+}
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+       v->counter = i;
+}
 
-#define ATOMIC64_OP(op, c_op, asm_op)                                        \
-static __inline__ void atomic64_##op(long i, atomic64_t * v)                 \
+#define __ATOMIC64_OP(op, suffix, asm_op, extable)                           \
+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v)    \
 {                                                                            \
        if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
                long temp;                                                    \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
-               "1:     lld     %0, %1          # atomic64_" #op "      \n"   \
-               "       " #asm_op " %0, %2                              \n"   \
+               "       .set    mips3                                   \n"   \
+               "1:     lld     %0, %1          # atomic64_" #op #suffix "\n" \
+               "2:     " #asm_op " %0, %2                              \n"   \
                "       scd     %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
+               extable                                                       \
                "       .set    mips0                                   \n"   \
                : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
-                       "       lld     %0, %1          # atomic64_" #op "\n" \
-                       "       " #asm_op " %0, %2                      \n"   \
-                       "       scd     %0, %1                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
-                       : "Ir" (i));                                          \
-               } while (unlikely(!temp));                                    \
+               __asm__ __volatile__(                                         \
+               "       .set    mips3                                   \n"   \
+               "1:     lld     %0, %1          # atomic64_" #op #suffix "\n" \
+               "2:     " #asm_op " %0, %2                              \n"   \
+               "       scd     %0, %1                                  \n"   \
+               "       beqz    %0, 1b                                  \n"   \
+                       extable                                               \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "Ir" (i));                                                  \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
                raw_local_irq_save(flags);                                    \
-               v->counter c_op i;                                            \
+               __asm__ __volatile__(                                         \
+               "2:     " #asm_op " %0, %1                              \n"   \
+               extable                                                       \
+               : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i));               \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
 }
 
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op)                                 \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)        \
+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u)               \
+                               __ATOMIC64_OP(op, _unchecked, asm_op)
+
+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable)           \
+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
 {                                                                            \
        long result;                                                          \
                                                                              \
@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
                long temp;                                                    \
                                                                              \
                __asm__ __volatile__(                                         \
-               "       .set    arch=r4000                              \n"   \
+               "       .set    mips3                                   \n"   \
                "1:     lld     %1, %2          # atomic64_" #op "_return\n"  \
-               "       " #asm_op " %0, %1, %3                          \n"   \
+               "2:     " #asm_op " %0, %1, %3                          \n"   \
                "       scd     %0, %2                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
-               "       " #asm_op " %0, %1, %3                          \n"   \
+               post_op                                                       \
+               extable                                                       \
+               "4:     " #asm_op " %0, %1, %3                          \n"   \
+               "5:                                                     \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
                  "+" GCC_OFF12_ASM() (v->counter)                            \
@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
-               do {                                                          \
-                       __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
-                       "       lld     %1, %2  # atomic64_" #op "_return\n"  \
-                       "       " #asm_op " %0, %1, %3                  \n"   \
-                       "       scd     %0, %2                          \n"   \
-                       "       .set    mips0                           \n"   \
-                       : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF12_ASM() (v->counter)                    \
-                       : "Ir" (i), GCC_OFF12_ASM() (v->counter)              \
-                       : "memory");                                          \
-               } while (unlikely(!result));                                  \
+               __asm__ __volatile__(                                         \
+               "       .set    mips3                                   \n"   \
+               "1:     lld     %1, %2  # atomic64_" #op "_return" #suffix "\n"\
+               "2:     " #asm_op " %0, %1, %3                          \n"   \
+               "       scd     %0, %2                                  \n"   \
+               "       beqz    %0, 1b                                  \n"   \
+               post_op                                                       \
+               extable                                                       \
+               "4:     " #asm_op " %0, %1, %3                          \n"   \
+               "5:                                                     \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "=" GCC_OFF12_ASM() (v->counter)                            \
+               : "Ir" (i), GCC_OFF12_ASM() (v->counter)                      \
+               : "memory");                                                  \
                                                                              \
                result = temp; result c_op i;                                 \
        } else {                                                              \
                unsigned long flags;                                          \
                                                                              \
                raw_local_irq_save(flags);                                    \
-               result = v->counter;                                          \
-               result c_op i;                                                \
-               v->counter = result;                                          \
+               __asm__ __volatile__(                                         \
+               "       ld      %0, %1                                  \n"   \
+               "2:     " #asm_op " %0, %1, %2                          \n"   \
+               "       sd      %0, %1                                  \n"   \
+               "3:                                                     \n"   \
+               extable                                                       \
+               : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter)            \
+               : "Ir" (i));                                                  \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
                                                                              \
@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
        return result;                                                        \
 }
 
-#define ATOMIC64_OPS(op, c_op, asm_op)                                       \
-       ATOMIC64_OP(op, c_op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, c_op, asm_op)
+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE)     \
+                                      __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
 
-ATOMIC64_OPS(add, +=, daddu)
-ATOMIC64_OPS(sub, -=, dsubu)
+#define ATOMIC64_OPS(op, asm_op)                                               \
+       ATOMIC64_OP(op, asm_op)                                                 \
+       ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, dadd)
+ATOMIC64_OPS(sub, dsub)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
+#undef __ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
+#undef __ATOMIC64_OP
+#undef __OVERFLOW_EXTABLE
+#undef __OVERFLOW_POST
 
 /*
  * atomic64_sub_if_positive - conditionally subtract integer from atomic
@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
  * Atomically test @v and subtract @i if @v is greater or equal than @i.
  * The function returns the old value of @v minus @i.
  */
-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
 {
        long result;
 
@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
        return result;
 }
 
-#define atomic64_cmpxchg(v, o, n) \
-       ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+{
+       return cmpxchg(&v->counter, old, new);
+}
+
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
+                                             long new)
+{
+       return cmpxchg(&(v->counter), old, new);
+}
+
+static inline long atomic64_xchg(atomic64_t *v, long new)
+{
+       return xchg(&v->counter, new);
+}
+
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
+{
+       return xchg(&(v->counter), new);
+}
 
 /**
  * atomic64_add_unless - add unless the number is a given value
@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
 
 /*
  * atomic64_sub_and_test - subtract value from variable and test result
@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  * other cases.
  */
 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
 
 /*
  * atomic64_dec_and_test - decrement by 1 and test
@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  * Atomically increments @v by 1.
  */
 #define atomic64_inc(v) atomic64_add(1, (v))
+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
 
 /*
  * atomic64_dec - decrement and test
@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  * Atomically decrements @v by 1.
  */
 #define atomic64_dec(v) atomic64_sub(1, (v))
+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
 
 /*
  * atomic64_add_negative - add and test if negative
index 2b8bbbcb9be0e9f8c5c6f7dbc1a2c6f692185443..4556df66a4cddda6d81b88ef67697f882c01b92c 100644 (file)
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index b4db69fbc40ce212a557b235679ec0ebf2208060..8f3b09336aea489ef55e7b9e059bae9aafc009dd 100644 (file)
@@ -9,10 +9,11 @@
 #ifndef _ASM_CACHE_H
 #define _ASM_CACHE_H
 
+#include <linux/const.h>
 #include <kmalloc.h>
 
 #define L1_CACHE_SHIFT         CONFIG_MIPS_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define SMP_CACHE_SHIFT                L1_CACHE_SHIFT
 #define SMP_CACHE_BYTES                L1_CACHE_BYTES
index eb4d95de619c5dca7543ed551267e43799f4ca11..f2f7f93a0f7093a38e7075a841093b81bcb14323 100644 (file)
@@ -405,15 +405,18 @@ extern const char *__elf_platform;
 #define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
 #endif
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 struct linux_binprm;
 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
                                       int uses_interp);
 
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 struct arch_elf_state {
        int fp_abi;
        int interp_fp_abi;
index c1f6afa4bc4f9e7fdef4956044d454e80bdab27c..38cc6e9a3c970cfeecba54b736af1768281cbcfa 100644 (file)
@@ -12,6 +12,6 @@
 #ifndef _ASM_EXEC_H
 #define _ASM_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* _ASM_EXEC_H */
index 9e8ef5994c9cf15b20068376e8e06d7194b9bf3c..1139d6bbd3975c0712f4d0eaa4c004b735012bcd 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/atomic.h>
 
-extern atomic_t irq_err_count;
+extern atomic_unchecked_t irq_err_count;
 
 /*
  * interrupt-retrigger: NOP for now. This may not be appropriate for all
index 46dfc3c1fd49777a41b3158c77b1fc5c49955087..a16b13aad1ba6353b60f335d3f8d2da1528c0ade 100644 (file)
@@ -12,15 +12,25 @@ typedef struct
        atomic_long_t a;
 } local_t;
 
+typedef struct {
+       atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)        atomic_long_read_unchecked(&(l)->a)
 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l, i)      atomic_long_set_unchecked(&(l)->a, (i))
 
 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
 #define local_inc(l)   atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)   atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
 
 /*
  * Same as above, but return the result value
@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
        return result;
 }
 
+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
+{
+       unsigned long result;
+
+       if (kernel_uses_llsc && R10000_LLSC_WAR) {
+               unsigned long temp;
+
+               __asm__ __volatile__(
+               "       .set    mips3                                   \n"
+               "1:"    __LL    "%1, %2         # local_add_return      \n"
+               "       addu    %0, %1, %3                              \n"
+                       __SC    "%0, %2                                 \n"
+               "       beqzl   %0, 1b                                  \n"
+               "       addu    %0, %1, %3                              \n"
+               "       .set    mips0                                   \n"
+               : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+               : "Ir" (i), "m" (l->a.counter)
+               : "memory");
+       } else if (kernel_uses_llsc) {
+               unsigned long temp;
+
+               __asm__ __volatile__(
+               "       .set    mips3                                   \n"
+               "1:"    __LL    "%1, %2         # local_add_return      \n"
+               "       addu    %0, %1, %3                              \n"
+                       __SC    "%0, %2                                 \n"
+               "       beqz    %0, 1b                                  \n"
+               "       addu    %0, %1, %3                              \n"
+               "       .set    mips0                                   \n"
+               : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+               : "Ir" (i), "m" (l->a.counter)
+               : "memory");
+       } else {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               result = l->a.counter;
+               result += i;
+               l->a.counter = result;
+               local_irq_restore(flags);
+       }
+
+       return result;
+}
+
 static __inline__ long local_sub_return(long i, local_t * l)
 {
        unsigned long result;
@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
 
 #define local_cmpxchg(l, o, n) \
        ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+       ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
 
 /**
index 154b70a104837ee7b63ffb817f413635962389d7..426ae3d1e9ed7c186d151630d7d619252d0f44f1 100644 (file)
@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
   #ifdef CONFIG_CPU_MIPS32
     typedef struct { unsigned long pte_low, pte_high; } pte_t;
     #define pte_val(x)   ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-    #define __pte(x)     ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+    #define __pte(x)     ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
   #else
      typedef struct { unsigned long long pte; } pte_t;
      #define pte_val(x) ((x).pte)
index b336037e8768cfd50e45ff877f759a0039bbeb5e..5b874ccce3faa01f3fe84aa2680ab762c7dfc881 100644 (file)
@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
        set_pud(pud, __pud((unsigned long)pmd));
 }
+
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       pud_populate(mm, pud, pmd);
+}
 #endif
 
 /*
index 845016d1cdbdca5ecf1916ec0b28c2644f297908..33032688c5a719b83e8b1505bb8c0a83217c3214 100644 (file)
@@ -20,6 +20,9 @@
 #include <asm/io.h>
 #include <asm/pgtable-bits.h>
 
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+
 struct mm_struct;
 struct vm_area_struct;
 
index e4440f92b366f7a1d90e4af10dd2477acdf0223a..8fb00051ad48721dd2f5bc52b332105faf06263f 100644 (file)
@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SECCOMP            4       /* secure computing */
 #define TIF_NOTIFY_RESUME      5       /* callback before returning to user */
 #define TIF_RESTORE_SIGMASK    9       /* restore signal mask in do_signal() */
+/* li takes a 32bit immediate */
+#define TIF_GRSEC_SETXID       10      /* update credentials on syscall entry/exit */
+
 #define TIF_USEDFPU            16      /* FPU was used by this task this quantum (SMP) */
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_NOHZ               19      /* in adaptive nohz mode */
@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_USEDMSA           (1<<TIF_USEDMSA)
 #define _TIF_MSA_CTX_LIVE      (1<<TIF_MSA_CTX_LIVE)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_GRSEC_SETXID      (1<<TIF_GRSEC_SETXID)
 
 #define _TIF_WORK_SYSCALL_ENTRY        (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
                                 _TIF_SYSCALL_AUDIT | \
-                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+                                _TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK         \
@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (_TIF_NOHZ | _TIF_WORK_MASK |           \
                                 _TIF_WORK_SYSCALL_EXIT |               \
-                                _TIF_SYSCALL_TRACEPOINT)
+                                _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
 
 /*
  * We stash processor id into a COP0 register to retrieve it fast
index bf8b32450ef6b4b5c91993f52fa205e79d2e9226..cec570534431d1dfbee98e0952ae4356b7619e01 100644 (file)
@@ -130,6 +130,7 @@ extern u64 __ua_limit;
        __ok == 0;                                                      \
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)                                    \
        likely(__access_ok((addr), (size), __access_mask))
 
index 1188e00bb120a2637c53f3916f28b1f990a9515e..41cf1445f36df3bab39cd53b70cf86a13a90aba7 100644 (file)
@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #include <asm/processor.h>
 #include <linux/module.h>
 #include <linux/elfcore.h>
index 928767858b867b65cce4aa92f965f96064a1f429..f870e47845bf77ed79cebfab4d9ba4c8b65f6874 100644 (file)
@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN     (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN    (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #include <asm/processor.h>
 
 #include <linux/module.h>
index a74ec3ae557c0dc77d63ab835dcf9e820db8c672..4f06f1832fbd8bda6a957c9bc4793f8492b7bfc0 100644 (file)
@@ -202,7 +202,7 @@ spurious_8259A_irq:
                        printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
                        spurious_irq_mask |= irqmask;
                }
-               atomic_inc(&irq_err_count);
+               atomic_inc_unchecked(&irq_err_count);
                /*
                 * Theoretically we do not have to handle this IRQ,
                 * but in Linux this does not cause problems and is
index 44a1f792e399a9efa6479ad9924cff46955dc758..2bd6aa3ad4e5d20242c67e5ff23a59136df015b5 100644 (file)
@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
                }
        }
 
-       atomic_inc(&irq_err_count);
+       atomic_inc_unchecked(&irq_err_count);
 }
 
 void __init gt641xx_irq_init(void)
index d2bfbc2e8995fba3b6da1ad7a190f0d872ca6fbf..a8eacd28ef80770cb16659f274be6c8c9a40cdc9 100644 (file)
@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
        printk("unexpected IRQ # %d\n", irq);
 }
 
-atomic_t irq_err_count;
+atomic_unchecked_t irq_err_count;
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
        return 0;
 }
 
 asmlinkage void spurious_interrupt(void)
 {
-       atomic_inc(&irq_err_count);
+       atomic_inc_unchecked(&irq_err_count);
 }
 
 void __init init_IRQ(void)
@@ -109,7 +109,10 @@ void __init init_IRQ(void)
 #endif
 }
 
+
 #ifdef DEBUG_STACKOVERFLOW
+extern void gr_handle_kernel_exploit(void);
+
 static inline void check_stack_overflow(void)
 {
        unsigned long sp;
@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
                printk("do_IRQ: stack overflow: %ld\n",
                       sp - sizeof(struct thread_info));
                dump_stack();
+               gr_handle_kernel_exploit();
        }
 }
 #else
index 06147179a175b7ea5d8c02a502e4b20653dd8b3d..002fa430ca358b9ee03ab3298f0f0527cbed6010 100644 (file)
@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
        nc_core_ready_count = nc_addr;
 
        /* Ensure ready_count is zero-initialised before the assembly runs */
-       ACCESS_ONCE(*nc_core_ready_count) = 0;
+       ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
        coupled_barrier(&per_cpu(pm_barrier, core), online);
 
        /* Run the generated entry code */
index 85bff5d513e5b42ae483e414c14a4a844793b9a1..39bc202f567e88319e462fdf0cb3367133e0f49c 100644 (file)
@@ -534,18 +534,6 @@ out:
        return pc;
 }
 
-/*
- * Don't forget that the stack pointer must be aligned on a 8 bytes
- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
- */
-unsigned long arch_align_stack(unsigned long sp)
-{
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() & ~PAGE_MASK;
-
-       return sp & ALMASK;
-}
-
 static void arch_dump_stack(void *info)
 {
        struct pt_regs *regs;
index 51045281259403c55fcefac09d510f874a3047bb..950bbdcfc3263dd419d1daaad05ebb832a3811ad 100644 (file)
@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
        return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
            tracehook_report_syscall_entry(regs))
                ret = -1;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[2]);
 
index 07fc5244aed4917392542399e8547ed276aa3518..b9d7f28a1e41d7300d12a90fcb95fe48d85828eb 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/reboot.h>
 
 #include <asm/reboot.h>
+#include <asm/bug.h>
 
 /*
  * Urgs ...  Too many MIPS machines to handle this in a generic way.
@@ -29,16 +30,19 @@ void machine_restart(char *command)
 {
        if (_machine_restart)
                _machine_restart(command);
+       BUG();
 }
 
 void machine_halt(void)
 {
        if (_machine_halt)
                _machine_halt();
+       BUG();
 }
 
 void machine_power_off(void)
 {
        if (pm_power_off)
                pm_power_off();
+       BUG();
 }
index 2242bdd4370eb19851ff5a31540a02c65fa26498..b2840486aac27a69a58dab8958af2d76a2bd76bf 100644 (file)
@@ -18,8 +18,8 @@
 #include <asm/mipsregs.h>
 
 static atomic_t count_start_flag = ATOMIC_INIT(0);
-static atomic_t count_count_start = ATOMIC_INIT(0);
-static atomic_t count_count_stop = ATOMIC_INIT(0);
+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
 static atomic_t count_reference = ATOMIC_INIT(0);
 
 #define COUNTON 100
@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
 
        for (i = 0; i < NR_LOOPS; i++) {
                /* slaves loop on '!= 2' */
-               while (atomic_read(&count_count_start) != 1)
+               while (atomic_read_unchecked(&count_count_start) != 1)
                        mb();
-               atomic_set(&count_count_stop, 0);
+               atomic_set_unchecked(&count_count_stop, 0);
                smp_wmb();
 
                /* this lets the slaves write their count register */
-               atomic_inc(&count_count_start);
+               atomic_inc_unchecked(&count_count_start);
 
                /*
                 * Everyone initialises count in the last loop:
@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
                /*
                 * Wait for all slaves to leave the synchronization point:
                 */
-               while (atomic_read(&count_count_stop) != 1)
+               while (atomic_read_unchecked(&count_count_stop) != 1)
                        mb();
-               atomic_set(&count_count_start, 0);
+               atomic_set_unchecked(&count_count_start, 0);
                smp_wmb();
-               atomic_inc(&count_count_stop);
+               atomic_inc_unchecked(&count_count_stop);
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
        initcount = atomic_read(&count_reference);
 
        for (i = 0; i < NR_LOOPS; i++) {
-               atomic_inc(&count_count_start);
-               while (atomic_read(&count_count_start) != 2)
+               atomic_inc_unchecked(&count_count_start);
+               while (atomic_read_unchecked(&count_count_start) != 2)
                        mb();
 
                /*
@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
                if (i == NR_LOOPS-1)
                        write_c0_count(initcount);
 
-               atomic_inc(&count_count_stop);
-               while (atomic_read(&count_count_stop) != 2)
+               atomic_inc_unchecked(&count_count_stop);
+               while (atomic_read_unchecked(&count_count_stop) != 2)
                        mb();
        }
        /* Arrange for an interrupt in a short while */
index c3b41e24c05a47337509b9579d5b1302ba6f6e80..46c32e949c8c883b4a37b04ae24d06afcaea5ca7 100644 (file)
@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
        siginfo_t info;
 
        prev_state = exception_enter();
-       die_if_kernel("Integer overflow", regs);
+       if (unlikely(!user_mode(regs))) {
+
+#ifdef CONFIG_PAX_REFCOUNT
+               if (fixup_exception(regs)) {
+                       pax_report_refcount_overflow(regs);
+                       exception_exit(prev_state);
+                       return;
+               }
+#endif
+
+               die("Integer overflow", regs);
+       }
 
        info.si_code = FPE_INTOVF;
        info.si_signo = SIGFPE;
index 270bbd41769e6a93bc031ac157981ad93bd7ad5a..c01932accea6c340922c69f16ba8060db1635bb9 100644 (file)
@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
        return r;
 }
 
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
        if (kvm_mips_callbacks) {
                kvm_err("kvm: module already exists\n");
index 70ab5d664332694e92305331f13ed15a35ab1956..62940fe5c75e379dbbd5ad059aa44d4f73036d67 100644 (file)
 #include <asm/highmem.h>               /* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 5; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -201,6 +218,14 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
+                       pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
+                       do_group_exit(SIGKILL);
+               }
+#endif
+
                tsk->thread.cp0_badvaddr = address;
                tsk->thread.error_code = write;
 #if 0
index f1baadd56e82e2b0b4f2b79bd82686ea61a112d0..5472dcadfe9e0a8d10878b8d41527fa389716fa3 100644 (file)
@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
        struct vm_area_struct *vma;
        unsigned long addr = addr0;
        int do_color_align;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        if (unlikely(len > TASK_SIZE))
@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
                do_color_align = 1;
 
        /* requesting a specific address */
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_color_align)
                        addr = COLOUR_ALIGN(addr, pgoff);
@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
        info.length = len;
        info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
 
        if (dir == DOWN) {
                info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 {
        unsigned long random_factor = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (current->flags & PF_RANDOMIZE) {
                random_factor = get_random_int();
                random_factor = random_factor << PAGE_SHIFT;
@@ -157,38 +167,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base(random_factor);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-       }
-}
-
-static inline unsigned long brk_rnd(void)
-{
-       unsigned long rnd = get_random_int();
-
-       rnd = rnd << PAGE_SHIFT;
-       /* 8MB for 32bit, 256MB for 64bit */
-       if (TASK_IS_32BIT_ADDR)
-               rnd = rnd & 0x7ffffful;
-       else
-               rnd = rnd & 0xffffffful;
 
-       return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long base = mm->brk;
-       unsigned long ret;
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
 
-       ret = PAGE_ALIGN(base + brk_rnd());
-
-       if (ret < mm->brk)
-               return mm->brk;
-
-       return ret;
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+       }
 }
 
 int __virt_addr_valid(const volatile void *kaddr)
index d07e04121cc6fe7221e5f710abf68b515db24d5a..bedb72bd3a27155fb4068cfb7b9ee69d589f0c22 100644 (file)
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
 
 
 static struct pci_ops octeon_pci_ops = {
-       octeon_read_config,
-       octeon_write_config,
+       .read = octeon_read_config,
+       .write = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
index 5e36c33e5543fbc2c8a3227ab7dbf6b287f8931a..eb4a17ba4a530a9a73702bfb6fd6db1890295bb4 100644 (file)
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-       octeon_pcie0_read_config,
-       octeon_pcie0_write_config,
+       .read = octeon_pcie0_read_config,
+       .write = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-       octeon_pcie1_read_config,
-       octeon_pcie1_write_config,
+       .read = octeon_pcie1_read_config,
+       .write = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
 };
 
 static struct pci_ops octeon_dummy_ops = {
-       octeon_dummy_read_config,
-       octeon_dummy_write_config,
+       .read = octeon_dummy_read_config,
+       .write = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
index a2358b44420c5ae27b4254f4cba8248ab3c2c2c7..7cead4f0cd7646fa7b3ac47bc6f19f1bd4304427 100644 (file)
@@ -187,9 +187,9 @@ void
 cont_nmi_dump(void)
 {
 #ifndef REAL_NMI_SIGNAL
-       static atomic_t nmied_cpus = ATOMIC_INIT(0);
+       static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
 
-       atomic_inc(&nmied_cpus);
+       atomic_inc_unchecked(&nmied_cpus);
 #endif
        /*
         * Only allow 1 cpu to proceed
@@ -233,7 +233,7 @@ cont_nmi_dump(void)
                udelay(10000);
        }
 #else
-       while (atomic_read(&nmied_cpus) != num_online_cpus());
+       while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
 #endif
 
        /*
index a046b302623e0dd43d42fc425f58dec843c80db6..679952752ac7c2daeb7b6633dd21bdc2bd4b54e9 100644 (file)
@@ -270,7 +270,7 @@ spurious_8259A_irq:
                               "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
                        spurious_irq_mask |= irqmask;
                }
-               atomic_inc(&irq_err_count);
+               atomic_inc_unchecked(&irq_err_count);
                /*
                 * Theoretically we do not have to handle this IRQ,
                 * but in Linux this does not cause problems and is
index 41e873bc84747ecd05679585ea9cfdaaf2e9c3c0..34d33a769dbb6925345cbf351cb5f473e8c5f8f3 100644 (file)
@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
 
        printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
 
-       atomic_inc(&irq_err_count);
+       atomic_inc_unchecked(&irq_err_count);
 
        return -1;
 }
index ae0e4ee6c61728b7a609451023e28a1302ef7c37..e8f0692361e1d800e0cbc406cc2f244c74ba650a 100644 (file)
@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
        irq_cascade_t *cascade;
 
        if (irq >= NR_IRQS) {
-               atomic_inc(&irq_err_count);
+               atomic_inc_unchecked(&irq_err_count);
                return;
        }
 
@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
                ret = cascade->get_irq(irq);
                irq = ret;
                if (ret < 0)
-                       atomic_inc(&irq_err_count);
+                       atomic_inc_unchecked(&irq_err_count);
                else
                        irq_dispatch(irq);
                if (!irqd_irq_disabled(idata) && chip->irq_unmask)
index 967d144f307e5572d24b06e78ff50298366ce331..db12197e6185aa2159d05891c81b09092972765e 100644 (file)
 #ifndef _ASM_PROC_CACHE_H
 #define _ASM_PROC_CACHE_H
 
+#include <linux/const.h>
+
 /* L1 cache */
 
 #define L1_CACHE_NWAYS         4       /* number of ways in caches */
 #define L1_CACHE_NENTRIES      256     /* number of entries in each way */
-#define L1_CACHE_BYTES         16      /* bytes per entry */
 #define L1_CACHE_SHIFT         4       /* shift for bytes per entry */
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
 #define L1_CACHE_WAYDISP       0x1000  /* displacement of one way from the next */
 
 #define L1_CACHE_TAG_VALID     0x00000001      /* cache tag valid bit */
index bcb5df2d892f99c76012c491707a64223fa61784..84fabd26cbf807cd84d21c1a89cfb5c8beac6dec 100644 (file)
 #ifndef _ASM_PROC_CACHE_H
 #define _ASM_PROC_CACHE_H
 
+#include <linux/const.h>
+
 /*
  * L1 cache
  */
 #define L1_CACHE_NWAYS         4               /* number of ways in caches */
 #define L1_CACHE_NENTRIES      128             /* number of entries in each way */
-#define L1_CACHE_BYTES         32              /* bytes per entry */
 #define L1_CACHE_SHIFT         5               /* shift for bytes per entry */
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)   /* bytes per entry */
 #define L1_CACHE_WAYDISP       0x1000          /* distance from one way to the next */
 
 #define L1_CACHE_TAG_VALID     0x00000001      /* cache tag valid bit */
index 4ce7a01a252dc6a2c191917e2516a5ffe27aa1d1..449202aa23d0ed73f89bf0428e9a26597d691b52 100644 (file)
 #ifndef __ASM_OPENRISC_CACHE_H
 #define __ASM_OPENRISC_CACHE_H
 
+#include <linux/const.h>
+
 /* FIXME: How can we replace these with values from the CPU...
  * they shouldn't be hard-coded!
  */
 
-#define L1_CACHE_BYTES 16
 #define L1_CACHE_SHIFT 4
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* __ASM_OPENRISC_CACHE_H */
index 226f8ca993f69372016e634e345cf1d3a139cdb5..9d9b87d45c2680be1095094932c5779a66bf06a8 100644 (file)
@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        return dec;
 }
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 #endif /* !CONFIG_64BIT */
 
 
index 47f11c707b655c6568fef6a57717ca5f4df34ae1..3420df2b61e03f098b761406d85c599d6634649b 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __ARCH_PARISC_CACHE_H
 #define __ARCH_PARISC_CACHE_H
 
+#include <linux/const.h>
 
 /*
  * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
  * just ruin performance.
  */
 #ifdef CONFIG_PA20
-#define L1_CACHE_BYTES 64
 #define L1_CACHE_SHIFT 6
 #else
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
 #endif
 
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
 #ifndef __ASSEMBLY__
 
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
index 3391d061eccc02fb64ebff9211cb9146c9f50a1c..c23a2cc3570a8452a10c87b77e7af0d4c7a7dde2 100644 (file)
@@ -342,6 +342,13 @@ struct pt_regs;    /* forward declaration... */
 
 #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    0x10000UL
+
+#define PAX_DELTA_MMAP_LEN     16
+#define PAX_DELTA_STACK_LEN    16
+#endif
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
    but it's not easy, and we've already done it here.  */
index f213f5b4c4239b961e260c659447886a8646f0bd..0af3e8ee95ebdbcca47171b21cdbe925b8735152 100644 (file)
@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
                        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
 }
 
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+       pgd_populate(mm, pgd, pmd);
+}
+
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 #define pmd_alloc_one(mm, addr)                ({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, x)                        do { } while (0)
 #define pgd_populate(mm, pmd, pte)     BUG()
+#define pgd_populate_kernel(mm, pmd, pte)      BUG()
 
 #endif
 
index 22b89d1edba7a9d24939fa824a378e920dc61755..ce34230ad07a38c81960f664d2cbbe811676af97 100644 (file)
@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
 #define PAGE_COPY       PAGE_EXECREAD
 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+# define PAGE_COPY_NOEXEC      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+# define PAGE_READONLY_NOEXEC  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#else
+# define PAGE_SHARED_NOEXEC    PAGE_SHARED
+# define PAGE_COPY_NOEXEC      PAGE_COPY
+# define PAGE_READONLY_NOEXEC  PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL    __pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RWX        __pgprot(_PAGE_KERNEL_RWX)
index a5cb070b54bf4ade7cce44e50f763a5223991e83..8604ddcf3427e2cab53a24470a0ccc59243b2787 100644 (file)
@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
                                           const void __user *from,
                                           unsigned long n)
 {
-        int sz = __compiletime_object_size(to);
+        size_t sz = __compiletime_object_size(to);
         int ret = -EFAULT;
 
-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+        if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
                 ret = __copy_from_user(to, from, n);
         else
                 copy_from_user_overflow();
index 5822e8e200e6be1ab110b15baf94d4581624099c..bc5e63850c3dcb02216b8f8c56c5246f62df7f89 100644 (file)
 
 /* three functions to determine where in the module core
  * or init pieces the location is */
+static inline int in_init_rx(struct module *me, void *loc)
+{
+       return (loc >= me->module_init_rx &&
+               loc < (me->module_init_rx + me->init_size_rx));
+}
+
+static inline int in_init_rw(struct module *me, void *loc)
+{
+       return (loc >= me->module_init_rw &&
+               loc < (me->module_init_rw + me->init_size_rw));
+}
+
 static inline int in_init(struct module *me, void *loc)
 {
-       return (loc >= me->module_init &&
-               loc <= (me->module_init + me->init_size));
+       return in_init_rx(me, loc) || in_init_rw(me, loc);
+}
+
+static inline int in_core_rx(struct module *me, void *loc)
+{
+       return (loc >= me->module_core_rx &&
+               loc < (me->module_core_rx + me->core_size_rx));
+}
+
+static inline int in_core_rw(struct module *me, void *loc)
+{
+       return (loc >= me->module_core_rw &&
+               loc < (me->module_core_rw + me->core_size_rw));
 }
 
 static inline int in_core(struct module *me, void *loc)
 {
-       return (loc >= me->module_core &&
-               loc <= (me->module_core + me->core_size));
+       return in_core_rx(me, loc) || in_core_rw(me, loc);
 }
 
 static inline int in_local(struct module *me, void *loc)
@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
        }
 
        /* align things a bit */
-       me->core_size = ALIGN(me->core_size, 16);
-       me->arch.got_offset = me->core_size;
-       me->core_size += gots * sizeof(struct got_entry);
+       me->core_size_rw = ALIGN(me->core_size_rw, 16);
+       me->arch.got_offset = me->core_size_rw;
+       me->core_size_rw += gots * sizeof(struct got_entry);
 
-       me->core_size = ALIGN(me->core_size, 16);
-       me->arch.fdesc_offset = me->core_size;
-       me->core_size += fdescs * sizeof(Elf_Fdesc);
+       me->core_size_rw = ALIGN(me->core_size_rw, 16);
+       me->arch.fdesc_offset = me->core_size_rw;
+       me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
 
        me->arch.got_max = gots;
        me->arch.fdesc_max = fdescs;
@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
 
        BUG_ON(value == 0);
 
-       got = me->module_core + me->arch.got_offset;
+       got = me->module_core_rw + me->arch.got_offset;
        for (i = 0; got[i].addr; i++)
                if (got[i].addr == value)
                        goto out;
@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
 #ifdef CONFIG_64BIT
 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
 {
-       Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
+       Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
 
        if (!value) {
                printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
 
        /* Create new one */
        fdesc->addr = value;
-       fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+       fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
        return (Elf_Addr)fdesc;
 }
 #endif /* CONFIG_64BIT */
@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
 
        table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
        end = table + sechdrs[me->arch.unwind_section].sh_size;
-       gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+       gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
 
        DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
               me->arch.unwind_section, table, end, gp);
index e1ffea2f9a0b05ccda844969dcb7c519ab17077a..46ed66e16de8897a29c9ee17c3d85fd96de5c3b5 100644 (file)
@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
        unsigned long task_size = TASK_SIZE;
        int do_color_align, last_mmap;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
        if (len > task_size)
                return -ENOMEM;
@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                goto found_addr;
        }
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_color_align && last_mmap)
                        addr = COLOR_ALIGN(addr, last_mmap, pgoff);
@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.high_limit = mmap_upper_limit();
        info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
        info.align_offset = shared_align_offset(last_mmap, pgoff);
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
 found_addr:
@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        unsigned long addr = addr0;
        int do_color_align, last_mmap;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
 #ifdef CONFIG_64BIT
        /* This should only ever run for 32-bit processes.  */
@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        }
 
        /* requesting a specific address */
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_color_align && last_mmap)
                        addr = COLOR_ALIGN(addr, last_mmap, pgoff);
@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.high_limit = mm->mmap_base;
        info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
        info.align_offset = shared_align_offset(last_mmap, pgoff);
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
        if (!(addr & ~PAGE_MASK))
                goto found_addr;
@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        mm->mmap_legacy_base = mmap_legacy_base();
        mm->mmap_base = mmap_upper_limit();
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP) {
+               mm->mmap_legacy_base += mm->delta_mmap;
+               mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+       }
+#endif
+
        if (mmap_is_legacy()) {
                mm->mmap_base = mm->mmap_legacy_base;
                mm->get_unmapped_area = arch_get_unmapped_area;
index 47ee620d15d27850ab8ebac1f739dfd3215dae9b..110738769d080b54fe72517e65bf31a849155def 100644 (file)
@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
 
                        down_read(&current->mm->mmap_sem);
                        vma = find_vma(current->mm,regs->iaoq[0]);
-                       if (vma && (regs->iaoq[0] >= vma->vm_start)
-                               && (vma->vm_flags & VM_EXEC)) {
-
+                       if (vma && (regs->iaoq[0] >= vma->vm_start)) {
                                fault_address = regs->iaoq[0];
                                fault_space = regs->iasq[0];
 
index e5120e653240c4fa52d4895c7d1d206d3d12e68c..8ddb5cc88f5e0f241ae89a1150e1d49ee84fc481 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/unistd.h>
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
 static unsigned long
 parisc_acctyp(unsigned long code, unsigned int inst)
 {
-       if (code == 6 || code == 16)
+       if (code == 6 || code == 7 || code == 16)
            return VM_EXEC;
 
        switch (inst & 0xf0000000) {
@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
                        }
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when rt_sigreturn trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+       int err;
+
+       do { /* PaX: unpatched PLT emulation */
+               unsigned int bl, depwi;
+
+               err = get_user(bl, (unsigned int *)instruction_pointer(regs));
+               err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
+
+               if (err)
+                       break;
+
+               if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
+                       unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
+
+                       err = get_user(ldw, (unsigned int *)addr);
+                       err |= get_user(bv, (unsigned int *)(addr+4));
+                       err |= get_user(ldw2, (unsigned int *)(addr+8));
+
+                       if (err)
+                               break;
+
+                       if (ldw == 0x0E801096U &&
+                           bv == 0xEAC0C000U &&
+                           ldw2 == 0x0E881095U)
+                       {
+                               unsigned int resolver, map;
+
+                               err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
+                               err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
+                               if (err)
+                                       break;
+
+                               regs->gr[20] = instruction_pointer(regs)+8;
+                               regs->gr[21] = map;
+                               regs->gr[22] = resolver;
+                               regs->iaoq[0] = resolver | 3UL;
+                               regs->iaoq[1] = regs->iaoq[0] + 4;
+                               return 3;
+                       }
+               }
+       } while (0);
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+
+#ifndef CONFIG_PAX_EMUSIGRT
+       if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
+               return 1;
+#endif
+
+       do { /* PaX: rt_sigreturn emulation */
+               unsigned int ldi1, ldi2, bel, nop;
+
+               err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
+               err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
+               err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
+               err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
+
+               if (err)
+                       break;
+
+               if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
+                   ldi2 == 0x3414015AU &&
+                   bel == 0xE4008200U &&
+                   nop == 0x08000240U)
+               {
+                       regs->gr[25] = (ldi1 & 2) >> 1;
+                       regs->gr[20] = __NR_rt_sigreturn;
+                       regs->gr[31] = regs->iaoq[1] + 16;
+                       regs->sr[0] = regs->iasq[1];
+                       regs->iaoq[0] = 0x100UL;
+                       regs->iaoq[1] = regs->iaoq[0] + 4;
+                       regs->iasq[0] = regs->sr[2];
+                       regs->iasq[1] = regs->sr[2];
+                       return 2;
+               }
+       } while (0);
+#endif
+
+       return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 5; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 int fixup_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *fix;
@@ -234,8 +345,33 @@ retry:
 
 good_area:
 
-       if ((vma->vm_flags & acc_type) != acc_type)
+       if ((vma->vm_flags & acc_type) != acc_type) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
+                   (address & ~3UL) == instruction_pointer(regs))
+               {
+                       up_read(&mm->mmap_sem);
+                       switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+                       case 3:
+                               return;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+                       case 2:
+                               return;
+#endif
+
+                       }
+                       pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
+                       do_group_exit(SIGKILL);
+               }
+#endif
+
                goto bad_area;
+       }
 
        /*
         * If for any reason at all we couldn't handle the fault, make
index a2a168e2dfe757c02a6935a4a58dfc83af7f86bd..e48468264c8def929c6cb2e21d3f7a5ba3fc99a1 100644 (file)
@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
 config KEXEC
        bool "kexec system call"
        depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+       depends on !GRKERNSEC_KMEM
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 512d2782b043ddc506c865028b1e7dc54a871b75..d31fadd9b4bb14bc0cb2b661a01234874e55dfb1 100644 (file)
 
 #define ATOMIC_INIT(i)         { (i) }
 
+#define _ASM_EXTABLE(from, to)                 \
+"      .section        __ex_table,\"a\"\n"     \
+       PPC_LONG"       " #from ", " #to"\n"    \
+"      .previous\n"
+
 static __inline__ int atomic_read(const atomic_t *v)
 {
        int t;
@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
        return t;
 }
 
+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+       int t;
+
+       __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+       return t;
+}
+
 static __inline__ void atomic_set(atomic_t *v, int i)
 {
        __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 }
 
-#define ATOMIC_OP(op, asm_op)                                          \
-static __inline__ void atomic_##op(int a, atomic_t *v)                 \
+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+       __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+#ifdef CONFIG_PAX_REFCOUNT
+#define __REFCOUNT_OP(op) op##o.
+#define __OVERFLOW_PRE                 \
+       "       mcrxr   cr0\n"
+#define __OVERFLOW_POST                        \
+       "       bf 4*cr0+so, 3f\n"      \
+       "2:     .long 0x00c00b00\n"     \
+       "3:\n"
+#define __OVERFLOW_EXTABLE \
+       "\n4:\n"
+       _ASM_EXTABLE(2b, 4b)
+#else
+#define __REFCOUNT_OP(op) op
+#define __OVERFLOW_PRE
+#define __OVERFLOW_POST
+#define __OVERFLOW_EXTABLE
+#endif
+
+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable)      \
+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v)   \
 {                                                                      \
        int t;                                                          \
                                                                        \
        __asm__ __volatile__(                                           \
-"1:    lwarx   %0,0,%3         # atomic_" #op "\n"                     \
+"1:    lwarx   %0,0,%3         # atomic_" #op #suffix "\n"             \
+       pre_op                                                          \
        #asm_op " %0,%2,%0\n"                                           \
+       post_op                                                         \
        PPC405_ERR77(0,%3)                                              \
 "      stwcx.  %0,0,%3 \n"                                             \
 "      bne-    1b\n"                                                   \
+       extable                                                         \
        : "=&r" (t), "+m" (v->counter)                                  \
        : "r" (a), "r" (&v->counter)                                    \
        : "cc");                                                        \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op, asm_op)                                   \
-static __inline__ int atomic_##op##_return(int a, atomic_t *v)         \
+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , )          \
+                             __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
 {                                                                      \
        int t;                                                          \
                                                                        \
        __asm__ __volatile__(                                           \
        PPC_ATOMIC_ENTRY_BARRIER                                        \
-"1:    lwarx   %0,0,%2         # atomic_" #op "_return\n"              \
+"1:    lwarx   %0,0,%2         # atomic_" #op "_return" #suffix "\n"   \
+       pre_op                                                          \
        #asm_op " %0,%1,%0\n"                                           \
+       post_op                                                         \
        PPC405_ERR77(0,%2)                                              \
 "      stwcx.  %0,0,%2 \n"                                             \
 "      bne-    1b\n"                                                   \
+       extable                                                         \
        PPC_ATOMIC_EXIT_BARRIER                                         \
        : "=&r" (t)                                                     \
        : "r" (a), "r" (&v->counter)                                    \
@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v)               \
        return t;                                                       \
 }
 
+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
+                                    __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
 
 ATOMIC_OPS(add, add)
@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
+#undef __ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef __ATOMIC_OP
 
 #define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
 
-static __inline__ void atomic_inc(atomic_t *v)
-{
-       int t;
+/*
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Automatically increments @v by 1
+ */
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
 
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%2         # atomic_inc\n\
-       addic   %0,%0,1\n"
-       PPC405_ERR77(0,%2)
-"      stwcx.  %0,0,%2 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (&v->counter)
-       : "cc", "xer");
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+       atomic_add_unchecked(1, v);
 }
 
-static __inline__ int atomic_inc_return(atomic_t *v)
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
 {
-       int t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    lwarx   %0,0,%1         # atomic_inc_return\n\
-       addic   %0,%0,1\n"
-       PPC405_ERR77(0,%1)
-"      stwcx.  %0,0,%1 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "xer", "memory");
-
-       return t;
+       return atomic_add_return_unchecked(1, v);
 }
 
 /*
@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
  */
 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
 
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
 {
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%2         # atomic_dec\n\
-       addic   %0,%0,-1\n"
-       PPC405_ERR77(0,%2)\
-"      stwcx.  %0,0,%2\n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (&v->counter)
-       : "cc", "xer");
+       return atomic_add_return_unchecked(1, v) == 0;
 }
 
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    lwarx   %0,0,%1         # atomic_dec_return\n\
-       addic   %0,%0,-1\n"
-       PPC405_ERR77(0,%1)
-"      stwcx.  %0,0,%1\n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "xer", "memory");
+/* 
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ * 
+ * Atomically decrements @v by 1
+ */
+#define atomic_dec(v) atomic_sub(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
 
-       return t;
+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+       atomic_sub_unchecked(1, v);
 }
 
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+{
+       return cmpxchg(&(v->counter), old, new);
+}
+
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) 
+{
+       return xchg(&(v->counter), new);
+}
+
 /**
  * __atomic_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%1         # __atomic_add_unless\n\
        cmpw    0,%0,%3 \n\
-       beq-    2f \n\
-       add     %0,%2,%0 \n"
+       beq-    2f \n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      mcrxr   cr0\n"
+"      addo.   %0,%2,%0\n"
+"      bf 4*cr0+so, 4f\n"
+"3:.long " "0x00c00b00""\n"
+"4:\n"
+#else
+       "add    %0,%2,%0 \n"
+#endif
+
        PPC405_ERR77(0,%2)
 "      stwcx.  %0,0,%1 \n\
        bne-    1b \n"
+"5:"
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(3b, 5b)
+#endif
+
        PPC_ATOMIC_EXIT_BARRIER
 "      subf    %0,%2,%0 \n\
 2:"
@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
 }
 #define atomic_dec_if_positive atomic_dec_if_positive
 
+#define smp_mb__before_atomic_dec()     smp_mb()
+#define smp_mb__after_atomic_dec()      smp_mb()
+#define smp_mb__before_atomic_inc()     smp_mb()
+#define smp_mb__after_atomic_inc()      smp_mb()
+
 #ifdef __powerpc64__
 
 #define ATOMIC64_INIT(i)       { (i) }
@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
        return t;
 }
 
+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       long t;
+
+       __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+
+       return t;
+}
+
 static __inline__ void atomic64_set(atomic64_t *v, long i)
 {
        __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 }
 
-#define ATOMIC64_OP(op, asm_op)                                                \
-static __inline__ void atomic64_##op(long a, atomic64_t *v)            \
+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+       __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+}
+
+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable)    \
+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
 {                                                                      \
        long t;                                                         \
                                                                        \
        __asm__ __volatile__(                                           \
 "1:    ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
+       pre_op                                                          \
        #asm_op " %0,%2,%0\n"                                           \
+       post_op                                                         \
 "      stdcx.  %0,0,%3 \n"                                             \
 "      bne-    1b\n"                                                   \
+       extable                                                         \
        : "=&r" (t), "+m" (v->counter)                                  \
        : "r" (a), "r" (&v->counter)                                    \
        : "cc");                                                        \
 }
 
-#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v)   \
+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , )              \
+                               __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
 {                                                                      \
        long t;                                                         \
                                                                        \
        __asm__ __volatile__(                                           \
        PPC_ATOMIC_ENTRY_BARRIER                                        \
 "1:    ldarx   %0,0,%2         # atomic64_" #op "_return\n"            \
+       pre_op                                                          \
        #asm_op " %0,%1,%0\n"                                           \
+       post_op                                                         \
 "      stdcx.  %0,0,%2 \n"                                             \
 "      bne-    1b\n"                                                   \
+       extable                                                         \
        PPC_ATOMIC_EXIT_BARRIER                                         \
        : "=&r" (t)                                                     \
        : "r" (a), "r" (&v->counter)                                    \
@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v)        \
        return t;                                                       \
 }
 
+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
+                                      __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
 
 ATOMIC64_OPS(add, add)
@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
+#undef __ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
+#undef __ATOMIC64_OP
+#undef __OVERFLOW_EXTABLE
+#undef __OVERFLOW_POST
+#undef __OVERFLOW_PRE
+#undef __REFCOUNT_OP
 
 #define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 
-static __inline__ void atomic64_inc(atomic64_t *v)
-{
-       long t;
+/*
+ * atomic64_inc - increment atomic variable
+ * @v: pointer of type atomic64_t
+ *
+ * Automatically increments @v by 1
+ */
+#define atomic64_inc(v) atomic64_add(1, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
 
-       __asm__ __volatile__(
-"1:    ldarx   %0,0,%2         # atomic64_inc\n\
-       addic   %0,%0,1\n\
-       stdcx.  %0,0,%2 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (&v->counter)
-       : "cc", "xer");
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
+       atomic64_add_unchecked(1, v);
 }
 
-static __inline__ long atomic64_inc_return(atomic64_t *v)
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
 {
-       long t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%1         # atomic64_inc_return\n\
-       addic   %0,%0,1\n\
-       stdcx.  %0,0,%1 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "xer", "memory");
-
-       return t;
+       return atomic64_add_return_unchecked(1, v);
 }
 
 /*
@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
  */
 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
 
-static __inline__ void atomic64_dec(atomic64_t *v)
-{
-       long t;
-
-       __asm__ __volatile__(
-"1:    ldarx   %0,0,%2         # atomic64_dec\n\
-       addic   %0,%0,-1\n\
-       stdcx.  %0,0,%2\n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (&v->counter)
-       : "cc", "xer");
-}
+/* 
+ * atomic64_dec - decrement atomic variable
+ * @v: pointer of type atomic64_t
+ * 
+ * Atomically decrements @v by 1
+ */
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
 
-static __inline__ long atomic64_dec_return(atomic64_t *v)
+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
 {
-       long t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%1         # atomic64_dec_return\n\
-       addic   %0,%0,-1\n\
-       stdcx.  %0,0,%1\n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (&v->counter)
-       : "cc", "xer", "memory");
-
-       return t;
+       atomic64_sub_unchecked(1, v);
 }
 
 #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
+{
+       return cmpxchg(&(v->counter), old, new);
+}
+
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) 
+{
+       return xchg(&(v->counter), new);
+}
+
 /**
  * atomic64_add_unless - add unless the number is a given value
  * @v: pointer of type atomic64_t
@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 
        __asm__ __volatile__ (
        PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%1         # __atomic_add_unless\n\
+"1:    ldarx   %0,0,%1         # atomic64_add_unless\n\
        cmpd    0,%0,%3 \n\
-       beq-    2f \n\
-       add     %0,%2,%0 \n"
+       beq-    2f \n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      mcrxr   cr0\n"
+"      addo.   %0,%2,%0\n"
+"      bf 4*cr0+so, 4f\n"
+"3:.long " "0x00c00b00""\n"
+"4:\n"
+#else
+       "add    %0,%2,%0 \n"
+#endif
+
 "      stdcx.  %0,0,%1 \n\
        bne-    1b \n"
        PPC_ATOMIC_EXIT_BARRIER
+"5:"
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(3b, 5b)
+#endif
+
 "      subf    %0,%2,%0 \n\
 2:"
        : "=&r" (t)
index a3bf5be111ff1d073eb329476ce773c07ce32ed5..e03ba81c07ef69863f8c9db287d63825c658a176 100644 (file)
@@ -76,7 +76,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_lwsync();                                                   \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 34a05a1a990bb1337e36a1161d805dcc63efaef3..a1f2c673872e6ea28893550e547bc026339ad713 100644 (file)
@@ -4,6 +4,7 @@
 #ifdef __KERNEL__
 
 #include <asm/reg.h>
+#include <linux/const.h>
 
 /* bytes per L1 cache line */
 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -23,7 +24,7 @@
 #define L1_CACHE_SHIFT         7
 #endif
 
-#define        L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
+#define        L1_CACHE_BYTES          (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define        SMP_CACHE_BYTES         L1_CACHE_BYTES
 
index 57d289acb80325a063539eb8aa2ab92a6edefab0..b36c98cf503f21149cc54c083929c8702e48c966 100644 (file)
 
 #define ELF_ET_DYN_BASE        0x20000000
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (0x10000000UL)
+
+#ifdef __powerpc64__
+#define PAX_DELTA_MMAP_LEN     (is_32bit_task() ? 16 : 28)
+#define PAX_DELTA_STACK_LEN    (is_32bit_task() ? 16 : 28)
+#else
+#define PAX_DELTA_MMAP_LEN     15
+#define PAX_DELTA_STACK_LEN    15
+#endif
+#endif
+
 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
 
 /*
@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
        (0x7ff >> (PAGE_SHIFT - 12)) : \
        (0x3ffff >> (PAGE_SHIFT - 12)))
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
-
 #ifdef CONFIG_SPU_BASE
 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
 #define NT_SPU         1
index 8196e9c7d7e8b7ba2e3ebe8bb97cb06524ca88af..d83a9f35f722893db9bcaa7eef375dace82d3ab9 100644 (file)
@@ -4,6 +4,6 @@
 #ifndef _ASM_POWERPC_EXEC_H
 #define _ASM_POWERPC_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* _ASM_POWERPC_EXEC_H */
index 5acabbd7ac6f7816f707382d1b2ce6b53b52022e..7ea14fa618917fa217bb34ce0f6ba9b9981fb568 100644 (file)
@@ -10,7 +10,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#define KM_TYPE_NR 16
+#define KM_TYPE_NR 17
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
index b8da913638646e3b7f432ad52b2d59495415f7ca..c02b593ccc3bcb9a6c9c4f3e2d90fd09183c757d 100644 (file)
@@ -9,20 +9,64 @@ typedef struct
        atomic_long_t a;
 } local_t;
 
+typedef struct
+{
+       atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)        atomic_long_read_unchecked(&(l)->a)
 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l,i)       atomic_long_set_unchecked(&(l)->a, (i))
 
 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_add_unchecked(i,l)       atomic_long_add_unchecked((i),(&(l)->a))
 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_sub_unchecked(i,l)       atomic_long_sub_unchecked((i),(&(l)->a))
 #define local_inc(l)   atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)   atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
 
 static __inline__ long local_add_return(long a, local_t *l)
 {
        long t;
 
+       __asm__ __volatile__(
+"1:"   PPC_LLARX(%0,0,%2,0) "                  # local_add_return\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      mcrxr   cr0\n"
+"      addo.   %0,%1,%0\n"
+"      bf 4*cr0+so, 3f\n"
+"2:.long " "0x00c00b00""\n"
+#else
+"      add     %0,%1,%0\n"
+#endif
+
+"3:\n"
+       PPC405_ERR77(0,%2)
+       PPC_STLCX       "%0,0,%2 \n\
+       bne-    1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+       _ASM_EXTABLE(2b, 4b)
+#endif
+
+       : "=&r" (t)
+       : "r" (a), "r" (&(l->a.counter))
+       : "cc", "memory");
+
+       return t;
+}
+
+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
+{
+       long t;
+
        __asm__ __volatile__(
 "1:"   PPC_LLARX(%0,0,%2,0) "                  # local_add_return\n\
        add     %0,%1,%0\n"
@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
 
 #define local_cmpxchg(l, o, n) \
        (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+       (cmpxchg_local(&((l)->a.counter), (o), (n)))
 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
 
 /**
index 8565c254151abed6bd3b698551c86035201c28b4..28651902af1492ee87f57713cab7422d26ec058a 100644 (file)
@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
 }
 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
 
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
 {
        return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
 }
index 69c059887a2c0def2a13f566c385c3eca97c0a60..2c56964d0f915a748ca0956e571217bb7d1873c6 100644 (file)
@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
  * and needs to be executable.  This means the whole heap ends
  * up being executable.
  */
-#define VM_DATA_DEFAULT_FLAGS32        (VM_READ | VM_WRITE | VM_EXEC | \
-                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS32 \
+       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define VM_DATA_DEFAULT_FLAGS64        (VM_READ | VM_WRITE | \
                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
 #define is_kernel_addr(x)      ((x) >= PAGE_OFFSET)
 #endif
 
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+
 #ifndef CONFIG_PPC_BOOK3S_64
 /*
  * Use the top bit of the higher-level page table entries to indicate whether
index d908a46d05c0b1be8bbb5b35a90425465fd5d3aa..3753f7195a688e9731bf24fe69d3a853698633bb 100644 (file)
@@ -172,15 +172,18 @@ do {                                              \
  * stack by default, so in the absence of a PT_GNU_STACK program header
  * we turn execute permission off.
  */
-#define VM_STACK_DEFAULT_FLAGS32       (VM_READ | VM_WRITE | VM_EXEC | \
-                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_STACK_DEFAULT_FLAGS32 \
+       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define VM_STACK_DEFAULT_FLAGS64       (VM_READ | VM_WRITE | \
                                         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#ifndef CONFIG_PAX_PAGEEXEC
 #define VM_STACK_DEFAULT_FLAGS \
        (is_32bit_task() ? \
         VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+#endif
 
 #include <asm-generic/getorder.h>
 
index 4b0be20fcbfdeee22498ea67f7a6b2adb3b55213..c15a27d68392f08e6a8996027f378df2f00ddf5f 100644 (file)
@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #ifndef CONFIG_PPC_64K_PAGES
 
 #define pgd_populate(MM, PGD, PUD)     pgd_set(PGD, PUD)
+#define pgd_populate_kernel(MM, PGD, PUD)      pgd_populate((MM), (PGD), (PUD))
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        pud_set(pud, (unsigned long)pmd);
 }
 
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       pud_populate(mm, pud, pmd);
+}
+
 #define pmd_populate(mm, pmd, pte_page) \
        pmd_populate_kernel(mm, pmd, page_address(pte_page))
 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
 #endif
 
 #define pud_populate(mm, pud, pmd)     pud_set(pud, (unsigned long)pmd)
+#define pud_populate_kernel(mm, pud, pmd)      pud_populate((mm), (pud), (pmd))
 
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
                                       pte_t *pte)
index a8805fee0df91bffdcdc4560e0b259985df2f930..6d6961789b0d0611eea8e2a7d9ba12be14f48922 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_POWERPC_PGTABLE_H
 #ifdef __KERNEL__
 
+#include <linux/const.h>
 #ifndef __ASSEMBLY__
 #include <linux/mmdebug.h>
 #include <linux/mmzone.h>
index 4aad4132d0a87fa3e677ebe00b05d0d317fe7292..85d86bfe0e1963f65cb8cb92629855182879206a 100644 (file)
@@ -21,6 +21,7 @@
 #define _PAGE_FILE     0x004   /* when !present: nonlinear file mapping */
 #define _PAGE_USER     0x004   /* usermode access allowed */
 #define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
+#define _PAGE_EXEC     _PAGE_GUARDED
 #define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
 #define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
 #define _PAGE_WRITETHRU        0x040   /* W: cache write-through */
index 1c874fb533bbf22fe8ff019b328ae623214243bb..e8480a45b2018815cb11ae9a9d73d9de0775230b 100644 (file)
 #define SPRN_DBCR      0x136   /* e300 Data Breakpoint Control Reg */
 #define SPRN_DSISR     0x012   /* Data Storage Interrupt Status Register */
 #define   DSISR_NOHPTE         0x40000000      /* no translation found */
+#define   DSISR_GUARDED                0x10000000      /* fetch from guarded storage */
 #define   DSISR_PROTFAULT      0x08000000      /* protection fault */
 #define   DSISR_ISSTORE                0x02000000      /* access was a store */
 #define   DSISR_DABRMATCH      0x00400000      /* hit data breakpoint */
index 5a6614a7f0b23b45c65caaddf690d3c24b6effc0..d89995d1f05177e31e1b22a9fe8fcc057c78af12 100644 (file)
@@ -51,7 +51,7 @@ struct smp_ops_t {
        int   (*cpu_disable)(void);
        void  (*cpu_die)(unsigned int nr);
        int   (*cpu_bootable)(unsigned int nr);
-};
+} __no_const;
 
 extern void smp_send_debugger_break(void);
 extern void start_secondary_resume(void);
index 4dbe072eecbefea4482d65af5291d2cf8c74e255..b80327543bef34959d58003f9f6315ddd326aadf 100644 (file)
@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
        __asm__ __volatile__(
 "1:    " PPC_LWARX(%0,0,%1,1) "\n"
        __DO_SIGN_EXTEND
-"      addic.          %0,%0,1\n\
-       ble-            2f\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      mcrxr   cr0\n"
+"      addico.         %0,%0,1\n"
+"      bf 4*cr0+so, 3f\n"
+"2:.long " "0x00c00b00""\n"
+#else
+"      addic.          %0,%0,1\n"
+#endif
+
+"3:\n"
+       "ble-           4f\n"
        PPC405_ERR77(0,%1)
 "      stwcx.          %0,0,%1\n\
        bne-            1b\n"
        PPC_ACQUIRE_BARRIER
-"2:"   : "=&r" (tmp)
+"4:"   
+
+#ifdef CONFIG_PAX_REFCOUNT
+       _ASM_EXTABLE(2b,4b)
+#endif
+
+       : "=&r" (tmp)
        : "r" (&rw->lock)
        : "cr0", "xer", "memory");
 
@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        __asm__ __volatile__(
        "# read_unlock\n\t"
        PPC_RELEASE_BARRIER
-"1:    lwarx           %0,0,%1\n\
-       addic           %0,%0,-1\n"
+"1:    lwarx           %0,0,%1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      mcrxr   cr0\n"
+"      addico.         %0,%0,-1\n"
+"      bf 4*cr0+so, 3f\n"
+"2:.long " "0x00c00b00""\n"
+#else
+"      addic.          %0,%0,-1\n"
+#endif
+
+"3:\n"
        PPC405_ERR77(0,%1)
 "      stwcx.          %0,0,%1\n\
        bne-            1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+       _ASM_EXTABLE(2b, 4b)
+#endif
+
        : "=&r"(tmp)
        : "r"(&rw->lock)
        : "cr0", "xer", "memory");
index 0be6c681cab1341061c02031464d5355ff8a4d7d..9c3c6eebe9eeecee8653aaa6f718fd51facaff8d 100644 (file)
@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
 #if defined(CONFIG_PPC64)
 #define TIF_ELF2ABI            18      /* function descriptors must die! */
 #endif
+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
+#define TIF_GRSEC_SETXID       6       /* update credentials on syscall entry/exit */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_EMULATE_STACK_STORE       (1<<TIF_EMULATE_STACK_STORE)
 #define _TIF_NOHZ              (1<<TIF_NOHZ)
+#define _TIF_GRSEC_SETXID      (1<<TIF_GRSEC_SETXID)
 #define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
                                 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
-                                _TIF_NOHZ)
+                                _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 #define _TIF_USER_WORK_MASK    (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                                 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
index a0c071d24e0e5e744969bc2eeea473b210bde785..49cdc7ff4530a48788e75be5d4d2c71e700980eb 100644 (file)
@@ -58,6 +58,7 @@
 
 #endif
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)            \
        (__chk_user_ptr(addr),                  \
         __access_ok((__force unsigned long)(addr), (size), get_fs()))
@@ -318,52 +319,6 @@ do {                                                               \
 extern unsigned long __copy_tofrom_user(void __user *to,
                const void __user *from, unsigned long size);
 
-#ifndef __powerpc64__
-
-static inline unsigned long copy_from_user(void *to,
-               const void __user *from, unsigned long n)
-{
-       unsigned long over;
-
-       if (access_ok(VERIFY_READ, from, n))
-               return __copy_tofrom_user((__force void __user *)to, from, n);
-       if ((unsigned long)from < TASK_SIZE) {
-               over = (unsigned long)from + n - TASK_SIZE;
-               return __copy_tofrom_user((__force void __user *)to, from,
-                               n - over) + over;
-       }
-       return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
-               const void *from, unsigned long n)
-{
-       unsigned long over;
-
-       if (access_ok(VERIFY_WRITE, to, n))
-               return __copy_tofrom_user(to, (__force void __user *)from, n);
-       if ((unsigned long)to < TASK_SIZE) {
-               over = (unsigned long)to + n - TASK_SIZE;
-               return __copy_tofrom_user(to, (__force void __user *)from,
-                               n - over) + over;
-       }
-       return n;
-}
-
-#else /* __powerpc64__ */
-
-#define __copy_in_user(to, from, size) \
-       __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
-                                   unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
-                                 unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
-                                 unsigned long n);
-
-#endif /* __powerpc64__ */
-
 static inline unsigned long __copy_from_user_inatomic(void *to,
                const void __user *from, unsigned long n)
 {
@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
        return __copy_to_user_inatomic(to, from, size);
 }
 
+#ifndef __powerpc64__
+
+static inline unsigned long __must_check copy_from_user(void *to,
+               const void __user *from, unsigned long n)
+{
+       unsigned long over;
+
+       if ((long)n < 0)
+               return n;
+
+       if (access_ok(VERIFY_READ, from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
+               return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
+       if ((unsigned long)from < TASK_SIZE) {
+               over = (unsigned long)from + n - TASK_SIZE;
+               if (!__builtin_constant_p(n - over))
+                       check_object_size(to, n - over, false);
+               return __copy_tofrom_user((__force void __user *)to, from,
+                               n - over) + over;
+       }
+       return n;
+}
+
+static inline unsigned long __must_check copy_to_user(void __user *to,
+               const void *from, unsigned long n)
+{
+       unsigned long over;
+
+       if ((long)n < 0)
+               return n;
+
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
+               return __copy_tofrom_user(to, (__force void __user *)from, n);
+       }
+       if ((unsigned long)to < TASK_SIZE) {
+               over = (unsigned long)to + n - TASK_SIZE;
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n - over, true);
+               return __copy_tofrom_user(to, (__force void __user *)from,
+                               n - over) + over;
+       }
+       return n;
+}
+
+#else /* __powerpc64__ */
+
+#define __copy_in_user(to, from, size) \
+       __copy_tofrom_user((to), (from), (size))
+
+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       if ((long)n < 0 || n > INT_MAX)
+               return n;
+
+       if (!__builtin_constant_p(n))
+               check_object_size(to, n, false);
+
+       if (likely(access_ok(VERIFY_READ, from, n)))
+               n = __copy_from_user(to, from, n);
+       else
+               memset(to, 0, n);
+       return n;
+}
+
+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       if ((long)n < 0 || n > INT_MAX)
+               return n;
+
+       if (likely(access_ok(VERIFY_WRITE, to, n))) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
+               n = __copy_to_user(to, from, n);
+       }
+       return n;
+}
+
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+                                 unsigned long n);
+
+#endif /* __powerpc64__ */
+
 extern unsigned long __clear_user(void __user *addr, unsigned long size);
 
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
index 502cf69b6c89e30c75545ac34cc4092b4af80416..53936a18434b9b15bbc88a2b37982520a1e6eab0 100644 (file)
@@ -15,6 +15,11 @@ CFLAGS_prom_init.o      += -fPIC
 CFLAGS_btext.o         += -fPIC
 endif
 
+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
 endif
 
+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+
 obj-y                          := cputable.o ptrace.o syscalls.o \
                                   irq.o align.o signal_32.o pmc.o vdso.o \
                                   process.o systbl.o idle.o \
index 3e68d1c69718541578a7b875551ed203b2d9d49d..72a5ee66b57abd2d75f0ba4a6b62389ee4205c9f 100644 (file)
@@ -1010,6 +1010,7 @@ storage_fault_common:
        std     r14,_DAR(r1)
        std     r15,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      save_nvgprs
        mr      r4,r14
        mr      r5,r15
        ld      r14,PACA_EXGEN+EX_R14(r13)
@@ -1018,8 +1019,7 @@ storage_fault_common:
        cmpdi   r3,0
        bne-    1f
        b       ret_from_except_lite
-1:     bl      save_nvgprs
-       mr      r5,r3
+1:     mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
        ld      r4,_DAR(r1)
        bl      bad_page_fault
index c2df8150bd7a0425fc00ebcc78d784d6b280c146..bae3d12d4076accf48daac8baa21564582bc5fbd 100644 (file)
@@ -1599,10 +1599,10 @@ handle_page_fault:
 11:    ld      r4,_DAR(r1)
        ld      r5,_DSISR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      save_nvgprs
        bl      do_page_fault
        cmpdi   r3,0
        beq+    12f
-       bl      save_nvgprs
        mr      r5,r3
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lwz     r4,_DAR(r1)
index 45096033d37bf7082bfe80dd7a25c782a4b1943a..cdb491f53787521ca9897e957a405156a97f5a99 100644 (file)
@@ -460,6 +460,8 @@ void migrate_irqs(void)
 }
 #endif
 
+extern void gr_handle_kernel_exploit(void);
+
 static inline void check_stack_overflow(void)
 {
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
                pr_err("do_IRQ: stack overflow: %ld\n",
                        sp - sizeof(struct thread_info));
                dump_stack();
+               gr_handle_kernel_exploit();
        }
 #endif
 }
index c94d2e018d843dc9d9054561252bef19f7672516..992a9ce7aeeb0c1fec4179838dc68d5100c78648 100644 (file)
@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
                        me->arch.core_plt_section = i;
        }
        if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
-               pr_err("Module doesn't contain .plt or .init.plt sections.\n");
+               pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
                return -ENOEXEC;
        }
 
@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
 
        pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
        /* Init, or core PLT? */
-       if (location >= mod->module_core
-           && location < mod->module_core + mod->core_size)
+       if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
+           (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
                entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
-       else
+       else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
+                (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
                entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
+       else {
+               printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
+               return ~0UL;
+       }
 
        /* Find this entry, or if that fails, the next avail. entry */
        while (entry->jump[0]) {
@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
        }
 #ifdef CONFIG_DYNAMIC_FTRACE
        module->arch.tramp =
-               do_plt_call(module->module_core,
+               do_plt_call(module->module_core_rx,
                            (unsigned long)ftrace_caller,
                            sechdrs, module);
 #endif
index b4cc7bef6b16d88d8ba5a27d60d7df13d2f7e74a..1fe8bb32009e609d728779a6c09042bbddd4777d 100644 (file)
@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
         * Lookup NIP late so we have the best change of getting the
         * above info out without failing
         */
-       printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
-       printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
+       printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
+       printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
 #endif
        show_stack(current, (unsigned long *) regs->gpr[1]);
        if (!user_mode(regs))
@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
                newsp = stack[0];
                ip = stack[STACK_FRAME_LR_SAVE];
                if (!firstframe || ip != lr) {
-                       printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
+                       printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                        if ((ip == rth) && curr_frame >= 0) {
-                               printk(" (%pS)",
+                               printk(" (%pA)",
                                       (void *)current->ret_stack[curr_frame].ret);
                                curr_frame--;
                        }
@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
                        struct pt_regs *regs = (struct pt_regs *)
                                (sp + STACK_FRAME_OVERHEAD);
                        lr = regs->link;
-                       printk("--- interrupt: %lx at %pS\n    LR = %pS\n",
+                       printk("--- interrupt: %lx at %pA\n    LR = %pA\n",
                               regs->trap, (void *)regs->nip, (void *)lr);
                        firstframe = 1;
                }
@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
        mtspr(SPRN_CTRLT, ctrl);
 }
 #endif /* CONFIG_PPC64 */
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() & ~PAGE_MASK;
-       return sp & ~0xf;
-}
-
-static inline unsigned long brk_rnd(void)
-{
-        unsigned long rnd = 0;
-
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
-       else
-               rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
-
-       return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long base = mm->brk;
-       unsigned long ret;
-
-#ifdef CONFIG_PPC_STD_MMU_64
-       /*
-        * If we are using 1TB segments and we are allowed to randomise
-        * the heap, we can put it above 1TB so it is backed by a 1TB
-        * segment. Otherwise the heap will be in the bottom 1TB
-        * which always uses 256MB segments and this may result in a
-        * performance penalty.
-        */
-       if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
-               base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
-       ret = PAGE_ALIGN(base + brk_rnd());
-
-       if (ret < mm->brk)
-               return mm->brk;
-
-       return ret;
-}
-
index f21897b420576c63330eb5c62982ba0729229a85..28c0428d86be620eaa919c2121da4a50279decda 100644 (file)
@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
        return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 
        secure_computing_strict(regs->gpr[0]);
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
                /*
@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
index b171001698ff32f256e4298c9f7fdfda2ddeef39..4ac7ac57902b9d1448f1a06d71fea8667317d1fa 100644 (file)
@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        /* Save user registers on the stack */
        frame = &rt_sf->uc.uc_mcontext;
        addr = frame;
-       if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
+       if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
                sigret = 0;
                tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
        } else {
index 2cb0c94cafa5ef9e883b33136db22dcad1a7728b..c0c0bc9be5d7dbc680e373356b791beecabaceea 100644 (file)
@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
        current->thread.fp_state.fpscr = 0;
 
        /* Set up to return from userspace. */
-       if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+       if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
                regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
        } else {
                err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
index e6595b72269b5c09eb7582c57a3a672f25f8bdfc..24bde6ed3cb2cff8519a1440dc01f46841d86c29 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/debugfs.h>
 #include <linux/ratelimit.h>
 #include <linux/context_tracking.h>
+#include <linux/uaccess.h>
 
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
        return flags;
 }
 
+extern void gr_handle_kernel_exploit(void);
+
 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
                               int signr)
 {
@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
                panic("Fatal exception in interrupt");
        if (panic_on_oops)
                panic("Fatal exception");
+
+       gr_handle_kernel_exploit();
+
        do_exit(signr);
 }
 
@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        enum ctx_state prev_state = exception_enter();
        unsigned int reason = get_reason(regs);
 
+#ifdef CONFIG_PAX_REFCOUNT
+       unsigned int bkpt;
+       const struct exception_table_entry *entry;
+
+       if (reason & REASON_ILLEGAL) {
+               /* Check if PaX bad instruction */
+               if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
+                       current->thread.trap_nr = 0;
+                       pax_report_refcount_overflow(regs);
+                       /* fixup_exception() for PowerPC does not exist, simulate its job */
+                       if ((entry = search_exception_tables(regs->nip)) != NULL) {
+                               regs->nip = entry->fixup;
+                               return;
+                       }
+                       /* fixup_exception() could not handle */
+                       goto bail;
+               }
+       }
+#endif
+
        /* We can now get here via a FP Unavailable exception if the core
         * has no FPU, in that case the reason flags will be 0 */
 
index 305eb0d9b76882d44e9f72805c951e1fa8c68b05..accc5b40238ae2a8f3ec2cfdc1a7b3edbb49ca1a 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/vdso.h>
 #include <asm/vdso_datapage.h>
 #include <asm/setup.h>
+#include <asm/mman.h>
 
 #undef DEBUG
 
@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vdso_base = VDSO32_MBASE;
 #endif
 
-       current->mm->context.vdso_base = 0;
+       current->mm->context.vdso_base = ~0UL;
 
        /* vDSO has a problem and was disabled, just don't "enable" it for the
         * process
@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vdso_base = get_unmapped_area(NULL, vdso_base,
                                      (vdso_pages << PAGE_SHIFT) +
                                      ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
-                                     0, 0);
+                                     0, MAP_PRIVATE | MAP_EXECUTABLE);
        if (IS_ERR_VALUE(vdso_base)) {
                rc = vdso_base;
                goto fail_mmapsem;
index c45eaab752b0d074f3b60689af861ccb75110269..5f41b57c4352fd233105a6599eea86872a98f4d6 100644 (file)
@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
 }
 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
 
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
        return 0;
 }
index 5eea6f3c1e03be13c06fb25ea04f64e45ece5954..5d1039679f7b67ed168b29a9db99efd4695b41ee 100644 (file)
@@ -9,22 +9,6 @@
 #include <linux/module.h>
 #include <asm/uaccess.h>
 
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       if (likely(access_ok(VERIFY_READ, from, n)))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       if (likely(access_ok(VERIFY_WRITE, to, n)))
-               n = __copy_to_user(to, from, n);
-       return n;
-}
-
 unsigned long copy_in_user(void __user *to, const void __user *from,
                           unsigned long n)
 {
@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
        return n;
 }
 
-EXPORT_SYMBOL(copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(copy_in_user);
 
index 6154b0a2b06331f0c29efe56b210baa6f90d43c7..4de2b1979587306dd2910e633390fda7778bba86 100644 (file)
 #include <linux/ratelimit.h>
 #include <linux/context_tracking.h>
 #include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
+#include <linux/unistd.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
 }
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (regs->nip = fault address)
+ *
+ * returns 1 when task should be killed
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+       return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 5; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int __user *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 /*
  * Check whether the instruction at regs->nip is a store using
  * an update addressing form which will update r1.
@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
         * indicate errors in DSISR but can validly be set in SRR1.
         */
        if (trap == 0x400)
-               error_code &= 0x48200000;
+               error_code &= 0x58200000;
        else
                is_write = error_code & DSISR_ISSTORE;
 #else
@@ -383,7 +414,7 @@ good_area:
          * "undefined".  Of those that can be set, this is the only
          * one which seems bad.
          */
-       if (error_code & 0x10000000)
+       if (error_code & DSISR_GUARDED)
                 /* Guarded storage error. */
                goto bad_area;
 #endif /* CONFIG_8xx */
@@ -398,7 +429,7 @@ good_area:
                 * processors use the same I/D cache coherency mechanism
                 * as embedded.
                 */
-               if (error_code & DSISR_PROTFAULT)
+               if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
                        goto bad_area;
 #endif /* CONFIG_PPC_STD_MMU */
 
@@ -490,6 +521,23 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+#ifdef CONFIG_PPC_STD_MMU
+                       if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
+#else
+                       if (is_exec && regs->nip == address) {
+#endif
+                               switch (pax_handle_fetch_fault(regs)) {
+                               }
+
+                               pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
+                               do_group_exit(SIGKILL);
+                       }
+               }
+#endif
+
                _exception(SIGSEGV, regs, code, address);
                goto bail;
        }
index cb8bdbe4972fa8bbe4c63039537ab9cbd4ccb233..cde4bc7746dbdab4c7c014df67955074ad4bf7e8 100644 (file)
@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
        return sysctl_legacy_va_layout;
 }
 
-static unsigned long mmap_rnd(void)
+static unsigned long mmap_rnd(struct mm_struct *mm)
 {
        unsigned long rnd = 0;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (current->flags & PF_RANDOMIZE) {
                /* 8MB for 32bit, 1GB for 64bit */
                if (is_32bit_task())
@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
        return rnd << PAGE_SHIFT;
 }
 
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(struct mm_struct *mm)
 {
        unsigned long gap = rlimit(RLIMIT_STACK);
 
@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
        else if (gap > MAX_GAP)
                gap = MAX_GAP;
 
-       return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+       return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
 }
 
 /*
@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
         */
        if (mmap_is_legacy()) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
-               mm->mmap_base = mmap_base();
+               mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
index ded0ea1afde4021578d21d2e969318ea6cd046eb..f213a9bbb71a744e77a46c3834a53b93f6e2a0b2 100644 (file)
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
        if ((mm->task_size - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
-       return (!vma || (addr + len) <= vma->vm_start);
+       return check_heap_stack_gap(vma, addr, len, 0);
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
        info.align_offset = 0;
 
        addr = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP)
+               addr += mm->delta_mmap;
+#endif
+
        while (addr < TASK_SIZE) {
                info.low_limit = addr;
                if (!slice_scan_available(addr, available, 1, &addr))
@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
        if (fixed && addr > (mm->task_size - len))
                return -ENOMEM;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
+               addr = 0;
+#endif
+
        /* If hint, make sure it matches our alignment restrictions */
        if (!fixed && addr) {
                addr = _ALIGN_UP(addr, 1ul << pshift);
index f223875980405ef6547f6dd30b6645990c27e120..94170e4f2ce775d19c4255eddb339512bff13350 100644 (file)
@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops scc_pciex_pci_ops = {
-       scc_pciex_read_config,
-       scc_pciex_write_config,
+       .read = scc_pciex_read_config,
+       .write = scc_pciex_write_config,
 };
 
 static void pciex_clear_intr_all(unsigned int __iomem *base)
index d966bbe58b8f368979154edb3ead64f735c5bb2b..372124a0165680d40ad1c333cafb7430eae3c2c6 100644 (file)
@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return VM_FAULT_NOPAGE;
 }
 
-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
                                unsigned long address,
-                               void *buf, int len, int write)
+                               void *buf, size_t len, int write)
 {
        struct spu_context *ctx = vma->vm_file->private_data;
        unsigned long offset = address - vma->vm_start;
index fa934fe080c17bf30e9ff3e0829186b65b7dfc7b..c2960564179744c2e28dc182a1d2c78098ff12e2 100644 (file)
@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 #endif /* __ARCH_S390_ATOMIC__  */
index 8d724718ec21c8d0331e1b8fbc14c96cb5892649..532250003a3a9015f7c36e0068921c832dcefd27 100644 (file)
@@ -42,7 +42,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 4d7ccac5fd1d693e3875f2eac011d46ff3750a0e..d03d0ad0e18df9bf0210032590893942abb2dfd0 100644 (file)
@@ -9,8 +9,10 @@
 #ifndef __ARCH_S390_CACHE_H
 #define __ARCH_S390_CACHE_H
 
-#define L1_CACHE_BYTES     256
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT     8
+#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
 #define NET_SKB_PAD       32
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
index f6e43d39e3d82e7c86e771078b752a88f998c481..5f57681d3926ebc2e8602f3a9a82552859dd4256 100644 (file)
@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE                (randomize_et_dyn(STACK_TOP / 3 * 2))
+#define ELF_ET_DYN_BASE                (STACK_TOP / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
+
+#define PAX_DELTA_MMAP_LEN     (test_thread_flag(TIF_31BIT) ? 15 : 26)
+#define PAX_DELTA_STACK_LEN    (test_thread_flag(TIF_31BIT) ? 15 : 26)
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
@@ -223,9 +229,6 @@ struct linux_binprm;
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 int arch_setup_additional_pages(struct linux_binprm *, int);
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
 
 #endif
index c4a93d6327fa7248bf2eb8404ba60b44758c1190..4d2a9b4162a0a520fedeee14fd797c93aaf6f4e8 100644 (file)
@@ -7,6 +7,6 @@
 #ifndef __ASM_EXEC_H
 #define __ASM_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* __ASM_EXEC_H */
index cd4c68e0398dd9bc04b0ed76e80afe3c55a6fd81..6764641331980c2dd6414207e8321b63fd51fad2 100644 (file)
@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
        __range_ok((unsigned long)(addr), (size));      \
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size) __access_ok(addr, size)
 
 /*
@@ -275,6 +276,10 @@ static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        might_fault();
+
+       if ((long)n < 0)
+               return n;
+
        return __copy_to_user(to, from, n);
 }
 
@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       unsigned int sz = __compiletime_object_size(to);
+       size_t sz = __compiletime_object_size(to);
 
        might_fault();
-       if (unlikely(sz != -1 && sz < n)) {
+
+       if ((long)n < 0)
+               return n;
+
+       if (unlikely(sz != (size_t)-1 && sz < n)) {
                copy_from_user_overflow();
                return n;
        }
index 409d152585bea67a6aca845bb0c3e4db130b1505..d90d368900c38fc1b6a815686a2da1cca605857c 100644 (file)
@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
 
        /* Increase core size by size of got & plt and set start
           offsets for got and plt. */
-       me->core_size = ALIGN(me->core_size, 4);
-       me->arch.got_offset = me->core_size;
-       me->core_size += me->arch.got_size;
-       me->arch.plt_offset = me->core_size;
-       me->core_size += me->arch.plt_size;
+       me->core_size_rw = ALIGN(me->core_size_rw, 4);
+       me->arch.got_offset = me->core_size_rw;
+       me->core_size_rw += me->arch.got_size;
+       me->arch.plt_offset = me->core_size_rx;
+       me->core_size_rx += me->arch.plt_size;
        return 0;
 }
 
@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
                if (info->got_initialized == 0) {
                        Elf_Addr *gotent;
 
-                       gotent = me->module_core + me->arch.got_offset +
+                       gotent = me->module_core_rw + me->arch.got_offset +
                                info->got_offset;
                        *gotent = val;
                        info->got_initialized = 1;
@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
                        rc = apply_rela_bits(loc, val, 0, 64, 0);
                else if (r_type == R_390_GOTENT ||
                         r_type == R_390_GOTPLTENT) {
-                       val += (Elf_Addr) me->module_core - loc;
+                       val += (Elf_Addr) me->module_core_rw - loc;
                        rc = apply_rela_bits(loc, val, 1, 32, 1);
                }
                break;
@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
        case R_390_PLTOFF64:    /* 16 bit offset from GOT to PLT. */
                if (info->plt_initialized == 0) {
                        unsigned int *ip;
-                       ip = me->module_core + me->arch.plt_offset +
+                       ip = me->module_core_rx + me->arch.plt_offset +
                                info->plt_offset;
 #ifndef CONFIG_64BIT
                        ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
                               val - loc + 0xffffUL < 0x1ffffeUL) ||
                              (r_type == R_390_PLT32DBL &&
                               val - loc + 0xffffffffULL < 0x1fffffffeULL)))
-                               val = (Elf_Addr) me->module_core +
+                               val = (Elf_Addr) me->module_core_rx +
                                        me->arch.plt_offset +
                                        info->plt_offset;
                        val += rela->r_addend - loc;
@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
        case R_390_GOTOFF32:    /* 32 bit offset to GOT.  */
        case R_390_GOTOFF64:    /* 64 bit offset to GOT. */
                val = val + rela->r_addend -
-                       ((Elf_Addr) me->module_core + me->arch.got_offset);
+                       ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
                if (r_type == R_390_GOTOFF16)
                        rc = apply_rela_bits(loc, val, 0, 16, 0);
                else if (r_type == R_390_GOTOFF32)
@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
                break;
        case R_390_GOTPC:       /* 32 bit PC relative offset to GOT. */
        case R_390_GOTPCDBL:    /* 32 bit PC rel. off. to GOT shifted by 1. */
-               val = (Elf_Addr) me->module_core + me->arch.got_offset +
+               val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
                        rela->r_addend - loc;
                if (r_type == R_390_GOTPC)
                        rc = apply_rela_bits(loc, val, 1, 32, 0);
index aa7a83948c7b13fda4712ec1081808f849ef2b23..6c2a916d706b9d75bb411922edadc346898262de 100644 (file)
@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
        }
        return 0;
 }
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() & ~PAGE_MASK;
-       return sp & ~0xf;
-}
-
-static inline unsigned long brk_rnd(void)
-{
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-       else
-               return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long ret;
-
-       ret = PAGE_ALIGN(mm->brk + brk_rnd());
-       return (ret > mm->brk) ? ret : mm->brk;
-}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
-       unsigned long ret;
-
-       if (!(current->flags & PF_RANDOMIZE))
-               return base;
-       ret = PAGE_ALIGN(base + brk_rnd());
-       return (ret > base) ? ret : base;
-}
index 9b436c21195ec6b16465bb6e1964e2ad343b8321..54fbf0a9886da4ff130bd3922434e22553164441 100644 (file)
@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
         */
        if (mmap_is_legacy()) {
                mm->mmap_base = mmap_base_legacy();
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base();
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
         */
        if (mmap_is_legacy()) {
                mm->mmap_base = mmap_base_legacy();
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = s390_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base();
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
                mm->get_unmapped_area = s390_get_unmapped_area_topdown;
        }
 }
index ae3d59f2d2c43b672811f85411d41fc51cd89dca..f65f0751c98bf4246995a2f86c8cb38f250dc036 100644 (file)
@@ -1,7 +1,9 @@
 #ifndef _ASM_SCORE_CACHE_H
 #define _ASM_SCORE_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT         4
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* _ASM_SCORE_CACHE_H */
index f9f3cd59c86055a8a8c5611d78d5a046a1e8dd6c..58ff438bdac05c9f9ed4a4160b60cc47c10f2759 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _ASM_SCORE_EXEC_H
 #define _ASM_SCORE_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) (x)
 
 #endif /* _ASM_SCORE_EXEC_H */
index a1519ad3d49d68e0e3202ecadb5b6e1b027a7df3..e8ac1ff9e6086d6c9568b616ac7c9c7e2ba0618f 100644 (file)
@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
 
        return task_pt_regs(task)->cp0_epc;
 }
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-       return sp;
-}
index ef9e555aafba5bad5247d0baeae0d256064b1620..331bd29e75dc62e6b01e6622c15e1be5ea48e316 100644 (file)
@@ -9,10 +9,11 @@
 #define __ASM_SH_CACHE_H
 #ifdef __KERNEL__
 
+#include <linux/const.h>
 #include <linux/init.h>
 #include <cpu/cache.h>
 
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
index 6777177807c26f5d6630ec48535bb4f7d5eba0e2..cb5e44fd2c72dac8299416f753e28d953d8fd01d 100644 (file)
@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        int do_colour_align;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        if (flags & MAP_FIXED) {
@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (filp || (flags & MAP_SHARED))
                do_colour_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_colour_align)
                        addr = COLOUR_ALIGN(addr, pgoff);
@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
        info.flags = 0;
        info.length = len;
-       info.low_limit = TASK_UNMAPPED_BASE;
+       info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
        info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        int do_colour_align;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        if (flags & MAP_FIXED) {
@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (filp || (flags & MAP_SHARED))
                do_colour_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* requesting a specific address */
        if (addr) {
                if (do_colour_align)
@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = TASK_SIZE;
                addr = vm_unmapped_area(&info);
        }
index 4082749913ce06109a3bd923e0bef4677d0376c9..fd97781847e6fefa4c10ed0574c9795643edba0b 100644 (file)
 #define ATOMIC64_INIT(i)       { (i) }
 
 #define atomic_read(v)         ACCESS_ONCE((v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
 #define atomic64_read(v)       ACCESS_ONCE((v)->counter)
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       return ACCESS_ONCE(v->counter);
+}
 
 #define atomic_set(v, i)       (((v)->counter) = i)
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+       v->counter = i;
+}
 #define atomic64_set(v, i)     (((v)->counter) = i)
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+       v->counter = i;
+}
+
+#define __ATOMIC_OP(op, suffix)                                                \
+void atomic_##op##suffix(int, atomic##suffix##_t *);                   \
+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
+
+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
 
-#define ATOMIC_OP(op)                                                  \
-void atomic_##op(int, atomic_t *);                                     \
-void atomic64_##op(long, atomic64_t *);
+#define __ATOMIC_OP_RETURN(op, suffix)                                 \
+int atomic_##op##_return##suffix(int, atomic##suffix##_t *);           \
+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
 
-#define ATOMIC_OP_RETURN(op)                                           \
-int atomic_##op##_return(int, atomic_t *);                             \
-long atomic64_##op##_return(long, atomic64_t *);
+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
 
 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
+#undef __ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef __ATOMIC_OP
 
 #define atomic_dec_return(v)   atomic_sub_return(1, v)
 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
 
 #define atomic_inc_return(v)   atomic_add_return(1, v)
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v);
+}
 #define atomic64_inc_return(v) atomic64_add_return(1, v)
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+       return atomic64_add_return_unchecked(1, v);
+}
 
 /*
  * atomic_inc_and_test - increment and test
@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
  * other cases.
  */
 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_inc_return_unchecked(v) == 0;
+}
 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
 
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
 
 #define atomic_inc(v) atomic_add(1, v)
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+       atomic_add_unchecked(1, v);
+}
 #define atomic64_inc(v) atomic64_add(1, v)
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
+       atomic64_add_unchecked(1, v);
+}
 
 #define atomic_dec(v) atomic_sub(1, v)
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+       atomic_sub_unchecked(1, v);
+}
 #define atomic64_dec(v) atomic64_sub(1, v)
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
+       atomic64_sub_unchecked(1, v);
+}
 
 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
 
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+{
+       return cmpxchg(&v->counter, old, new);
+}
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+       return xchg(&v->counter, new);
+}
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-       int c, old;
+       int c, old, new;
        c = atomic_read(v);
        for (;;) {
-               if (unlikely(c == (u)))
+               if (unlikely(c == u))
                        break;
-               old = atomic_cmpxchg((v), c, c + (a));
+
+               asm volatile("addcc %2, %0, %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                            "tvs %%icc, 6\n"
+#endif
+
+                            : "=r" (new)
+                            : "0" (c), "ir" (a)
+                            : "cc");
+
+               old = atomic_cmpxchg(v, c, new);
                if (likely(old == c))
                        break;
                c = old;
@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic64_cmpxchg(v, o, n) \
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
+{
+       return xchg(&v->counter, new);
+}
 
 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-       long c, old;
+       long c, old, new;
        c = atomic64_read(v);
        for (;;) {
-               if (unlikely(c == (u)))
+               if (unlikely(c == u))
                        break;
-               old = atomic64_cmpxchg((v), c, c + (a));
+
+               asm volatile("addcc %2, %0, %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                            "tvs %%xcc, 6\n"
+#endif
+
+                            : "=r" (new)
+                            : "0" (c), "ir" (a)
+                            : "cc");
+
+               old = atomic64_cmpxchg(v, c, new);
                if (likely(old == c))
                        break;
                c = old;
        }
-       return c != (u);
+       return c != u;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
index 76648941fea71b4327e058d6e995a3331587541c..45a974bcffc02b32364a6dc1f1d809a75bf18f68 100644 (file)
@@ -60,7 +60,7 @@ do {  __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index 5bb6991b48575026134498f743390ba598b20564..5c2132e9641e034352586d0f0baff887017c9d10 100644 (file)
@@ -7,10 +7,12 @@
 #ifndef _SPARC_CACHE_H
 #define _SPARC_CACHE_H
 
+#include <linux/const.h>
+
 #define ARCH_SLAB_MINALIGN     __alignof__(unsigned long long)
 
 #define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES 32
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #ifdef CONFIG_SPARC32
 #define SMP_CACHE_BYTES_SHIFT 5
index a24e41fcdde160b594a10243e014d02b3c1cab91..47677ff75d307bd356f328d3322a47aaed005ccc 100644 (file)
@@ -114,6 +114,13 @@ typedef struct {
 
 #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    0x10000UL
+
+#define PAX_DELTA_MMAP_LEN     16
+#define PAX_DELTA_STACK_LEN    16
+#endif
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  This can NOT be done in userspace
    on Sparc.  */
index 370ca1e71ffbc8dc2fb4cd9567b44f95cf61eee6..d4f4a9838c1002828ec0907bf6aeae6317e06ca3 100644 (file)
@@ -189,6 +189,13 @@ typedef struct {
 #define ELF_ET_DYN_BASE                0x0000010000000000UL
 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE    (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
+
+#define PAX_DELTA_MMAP_LEN     (test_thread_flag(TIF_32BIT) ? 14 : 28)
+#define PAX_DELTA_STACK_LEN    (test_thread_flag(TIF_32BIT) ? 15 : 29)
+#endif
+
 extern unsigned long sparc64_elf_hwcap;
 #define ELF_HWCAP      sparc64_elf_hwcap
 
index a3890da9442892e886f55f42ff9d223f75583262..f6a408eb71fe1d76c8d3e7601ca7062d49d01771 100644 (file)
@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 }
 
 #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
+#define pgd_populate_kernel(MM, PGD, PMD)      pgd_populate((MM), (PGD), (PMD))
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
                                   unsigned long address)
index 5e3187185b4a8b9c3f22d4a67ea538a12fdcd9e7..13469c6a0f088758afd4f9404f4cd71fed2dbfcd 100644 (file)
@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
 }
 
 #define pgd_populate(MM, PGD, PUD)     __pgd_populate(PGD, PUD)
+#define pgd_populate_kernel(MM, PGD, PMD)      pgd_populate((MM), (PGD), (PMD))
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
 }
 
 #define pud_populate(MM, PUD, PMD)     __pud_populate(PUD, PMD)
+#define pud_populate_kernel(MM, PUD, PMD)      pud_populate((MM), (PUD), (PMD))
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
index 59ba6f6207322ef7533a73998f54d1b1b888fc94..451812819ef471ad27782136b3c00d879c57bc74 100644 (file)
@@ -5,4 +5,8 @@
 #else
 #include <asm/pgtable_32.h>
 #endif
+
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+
 #endif
index b9b91ae19fe125116826b4960f55ec38876a8aaf..950b91ed9437119332f05cccaea4703c434cbcbb 100644 (file)
@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
 #define PAGE_SHARED    SRMMU_PAGE_SHARED
 #define PAGE_COPY      SRMMU_PAGE_COPY
 #define PAGE_READONLY  SRMMU_PAGE_RDONLY
+#define PAGE_SHARED_NOEXEC     SRMMU_PAGE_SHARED_NOEXEC
+#define PAGE_COPY_NOEXEC       SRMMU_PAGE_COPY_NOEXEC
+#define PAGE_READONLY_NOEXEC   SRMMU_PAGE_RDONLY_NOEXEC
 #define PAGE_KERNEL    SRMMU_PAGE_KERNEL
 
 /* Top-level page directory - dummy used by init-mm.
@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
 
 /*         xwr */
 #define __P000  PAGE_NONE
-#define __P001  PAGE_READONLY
-#define __P010  PAGE_COPY
-#define __P011  PAGE_COPY
+#define __P001  PAGE_READONLY_NOEXEC
+#define __P010  PAGE_COPY_NOEXEC
+#define __P011  PAGE_COPY_NOEXEC
 #define __P100  PAGE_READONLY
 #define __P101  PAGE_READONLY
 #define __P110  PAGE_COPY
 #define __P111  PAGE_COPY
 
 #define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
+#define __S001 PAGE_READONLY_NOEXEC
+#define __S010 PAGE_SHARED_NOEXEC
+#define __S011 PAGE_SHARED_NOEXEC
 #define __S100 PAGE_READONLY
 #define __S101 PAGE_READONLY
 #define __S110 PAGE_SHARED
index 79da17866fa8997ab3032f444b7598076dacaf36..c2eede82811e74f98ab8ee1bbb4d6b3e8930d172 100644 (file)
                                    SRMMU_EXEC | SRMMU_REF)
 #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
                                    SRMMU_EXEC | SRMMU_REF)
+
+#define SRMMU_PAGE_SHARED_NOEXEC       __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
+#define SRMMU_PAGE_COPY_NOEXEC         __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
+#define SRMMU_PAGE_RDONLY_NOEXEC       __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
+
 #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
                                    SRMMU_DIRTY | SRMMU_REF)
 
index 29d64b1758ed2a0ceb795a07c10205e6f923e6c6..4272fe8d2d8b1d0a883c5fba0efdba9f32138a6e 100644 (file)
@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
 void handle_ld_nf(u32 insn, struct pt_regs *regs);
 
 /* init_64.c */
-extern atomic_t dcpage_flushes;
-extern atomic_t dcpage_flushes_xcall;
+extern atomic_unchecked_t dcpage_flushes;
+extern atomic_unchecked_t dcpage_flushes_xcall;
 
 extern int sysctl_tsb_ratio;
 #endif
index 9689176949781fc5ae8985a794def4521e6ba72b..63c18ea4ff76607a1416446d77a80c755a15df8c 100644 (file)
@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
 
 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
 
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
        __asm__ __volatile__ (
 "1:    ldsw            [%2], %0\n"
 "      brlz,pn         %0, 2f\n"
-"4:     add            %0, 1, %1\n"
+"4:     addcc          %0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      tvs             %%icc, 6\n"
+#endif
+
 "      cas             [%2], %0, %1\n"
 "      cmp             %0, %1\n"
 "      bne,pn          %%icc, 1b\n"
@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
 "      .previous"
        : "=&r" (tmp1), "=&r" (tmp2)
        : "r" (lock)
-       : "memory");
+       : "memory", "cc");
 }
 
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
 {
        int tmp1, tmp2;
 
@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
 "1:    ldsw            [%2], %0\n"
 "      brlz,a,pn       %0, 2f\n"
 "       mov            0, %0\n"
-"      add             %0, 1, %1\n"
+"      addcc           %0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      tvs             %%icc, 6\n"
+#endif
+
 "      cas             [%2], %0, %1\n"
 "      cmp             %0, %1\n"
 "      bne,pn          %%icc, 1b\n"
@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
        return tmp1;
 }
 
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
 
        __asm__ __volatile__(
 "1:    lduw    [%2], %0\n"
-"      sub     %0, 1, %1\n"
+"      subcc   %0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"      tvs     %%icc, 6\n"
+#endif
+
 "      cas     [%2], %0, %1\n"
 "      cmp     %0, %1\n"
 "      bne,pn  %%xcc, 1b\n"
@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
        : "memory");
 }
 
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2;
 
@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
        : "memory");
 }
 
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
 {
        __asm__ __volatile__(
 "      stw             %%g0, [%0]"
@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
        : "memory");
 }
 
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
 {
        unsigned long mask, tmp1, tmp2, result;
 
index 025c98446b1ee1bce743d41ecdf4494eb62e7d50..a216504dad002f8f76e63e8734de1dfc2ae5468b 100644 (file)
@@ -49,6 +49,8 @@ struct thread_info {
        unsigned long           w_saved;
 
        struct restart_block    restart_block;
+
+       unsigned long           lowest_stack;
 };
 
 /*
index 798f0279a4b56a3ccd23b29c7b527bbd329b3061..b009941d12b307b1513709b4e4683be01c07246e 100644 (file)
@@ -63,6 +63,8 @@ struct thread_info {
        struct pt_regs          *kern_una_regs;
        unsigned int            kern_una_insn;
 
+       unsigned long           lowest_stack;
+
        unsigned long           fpregs[(7 * 256) / sizeof(unsigned long)]
                __attribute__ ((aligned(64)));
 };
@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 /* flag bit 4 is available */
 #define TIF_UNALIGNED          5       /* allowed to do unaligned accesses */
-/* flag bit 6 is available */
+#define TIF_GRSEC_SETXID       6       /* update credentials on syscall entry/exit */
 #define TIF_32BIT              7       /* 32-bit binary */
 #define TIF_NOHZ               8       /* in adaptive nohz mode */
 #define TIF_SECCOMP            9       /* secure computing */
 #define TIF_SYSCALL_AUDIT      10      /* syscall auditing active */
 #define TIF_SYSCALL_TRACEPOINT 11      /* syscall tracepoint instrumentation */
+
 /* NOTE: Thread flags >= 12 should be ones we have no interest
  *       in using in assembly, else we can't use the mask as
  *       an immediate value in instructions such as andcc.
@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
+#define _TIF_GRSEC_SETXID      (1<<TIF_GRSEC_SETXID)
 
 #define _TIF_USER_WORK_MASK    ((0xff << TI_FLAG_WSAVED_SHIFT) | \
                                 _TIF_DO_NOTIFY_RESUME_MASK | \
                                 _TIF_NEED_RESCHED)
 #define _TIF_DO_NOTIFY_RESUME_MASK     (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
 
+#define _TIF_WORK_SYSCALL              \
+       (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
+        _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
+
 #define is_32bit_task()        (test_thread_flag(TIF_32BIT))
 
 /*
index bd56c28fff9fea8cdec3e98fe7e155a61f1d55bb..4b63d83e19c99f7abd8c131376ff2f4c7a6fa5a4 100644 (file)
@@ -1,5 +1,6 @@
 #ifndef ___ASM_SPARC_UACCESS_H
 #define ___ASM_SPARC_UACCESS_H
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/uaccess_64.h>
 #else
index 9634d086fc562f1c128bddb08961d489526abdc1..f55fe4fc659a03f7ed10603ce7640d524b798ce0 100644 (file)
@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) to, n))
+       if ((long)n < 0)
+               return n;
+
+       if (n && __access_ok((unsigned long) to, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
+       if (!__builtin_constant_p(n))
+               check_object_size(from, n, true);
+
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (n && __access_ok((unsigned long) from, n))
+       if ((long)n < 0)
+               return n;
+
+       if (n && __access_ok((unsigned long) from, n)) {
+               if (!__builtin_constant_p(n))
+                       check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
-       else
+       else
                return n;
 }
 
 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        return __copy_user((__force void __user *) to, from, n);
 }
 
index c990a5e577f02738f6a9dd119bace01f3e8e68b0..f17b9c1c4644d0b5c8470231daa2b7eddee9f591 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
+#include <linux/kernel.h>
 #include <asm/asi.h>
 #include <asm/spitfire.h>
 #include <asm-generic/uaccess-unaligned.h>
@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-       unsigned long ret = ___copy_from_user(to, from, size);
+       unsigned long ret;
 
+       if ((long)size < 0 || size > INT_MAX)
+               return size;
+
+       if (!__builtin_constant_p(size))
+               check_object_size(to, size, false);
+
+       ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
                ret = copy_from_user_fixup(to, from, size);
 
@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-       unsigned long ret = ___copy_to_user(to, from, size);
+       unsigned long ret;
+
+       if ((long)size < 0 || size > INT_MAX)
+               return size;
+
+       if (!__builtin_constant_p(size))
+               check_object_size(from, size, true);
 
+       ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
        return ret;
index 7cf9c6ea3f1f210c0856351e47d1a4252913667b..620664832bbf42b819c3a7e1b8837d8ffcfbefa6 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 asflags-y := -ansi
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 extra-y     := head_$(BITS).o
 
index 50e7b626afe86493c54fc686d900ce8806bb2c77..79fae35a07587bb323ceddac9ff0f11821a6fe26 100644 (file)
@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
 
         printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
               r->psr, r->pc, r->npc, r->y, print_tainted());
-       printk("PC: <%pS>\n", (void *) r->pc);
+       printk("PC: <%pA>\n", (void *) r->pc);
        printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
               r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
               r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
        printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
               r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
               r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
-       printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
+       printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
 
        printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
               rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
                rw = (struct reg_window32 *) fp;
                pc = rw->ins[7];
                printk("[%08lx : ", pc);
-               printk("%pS ] ", (void *) pc);
+               printk("%pA ] ", (void *) pc);
                fp = rw->ins[6];
        } while (++count < 16);
        printk("\n");
index 0be7bf978cb1da03b8d657958ec6d0ec3c4e7d8b..2b1cba81e998b34b6b14a5ce43b28248b7cf7836 100644 (file)
@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
        printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
               rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
        if (regs->tstate & TSTATE_PRIV)
-               printk("I7: <%pS>\n", (void *) rwk->ins[7]);
+               printk("I7: <%pA>\n", (void *) rwk->ins[7]);
 }
 
 void show_regs(struct pt_regs *regs)
@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
 
        printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
               regs->tpc, regs->tnpc, regs->y, print_tainted());
-       printk("TPC: <%pS>\n", (void *) regs->tpc);
+       printk("TPC: <%pA>\n", (void *) regs->tpc);
        printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
               regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
               regs->u_regs[3]);
@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
        printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
               regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
               regs->u_regs[15]);
-       printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
+       printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
        show_regwindow(regs);
        show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
 }
@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
                       ((tp && tp->task) ? tp->task->pid : -1));
 
                if (gp->tstate & TSTATE_PRIV) {
-                       printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
+                       printk("             TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
                               (void *) gp->tpc,
                               (void *) gp->o7,
                               (void *) gp->i7,
index 79cc0d1a477d0dbeb80178fb977c0d9e04bd24f5..ec627348251ea93d470ffe049e163075e03ef209 100644 (file)
@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
 
 unsigned int prom_early_allocated __initdata;
 
-static struct of_pdt_ops prom_sparc_ops __initdata = {
+static struct of_pdt_ops prom_sparc_ops __initconst = {
        .nextprop = prom_common_nextprop,
        .getproplen = prom_getproplen,
        .getproperty = prom_getproperty,
index 9ddc4928a089b599568331792097c2bc35ea0be8..27a56199595f30da42a42a4b0a5031c8fc22898f 100644 (file)
@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
        return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
 {
        int ret = 0;
@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (test_thread_flag(TIF_NOHZ))
                user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                ret = tracehook_report_syscall_entry(regs);
 
@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
        if (test_thread_flag(TIF_NOHZ))
                user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
index da6f1a7fc4db4713425d1927af185cb797b4c1fc..e5dea8f49892f85f1d2c3d448471368a977ca158 100644 (file)
@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                return;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-       atomic_inc(&dcpage_flushes);
+       atomic_inc_unchecked(&dcpage_flushes);
 #endif
 
        this_cpu = get_cpu();
@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                        xcall_deliver(data0, __pa(pg_addr),
                                      (u64) pg_addr, cpumask_of(cpu));
 #ifdef CONFIG_DEBUG_DCFLUSH
-                       atomic_inc(&dcpage_flushes_xcall);
+                       atomic_inc_unchecked(&dcpage_flushes_xcall);
 #endif
                }
        }
@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        preempt_disable();
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-       atomic_inc(&dcpage_flushes);
+       atomic_inc_unchecked(&dcpage_flushes);
 #endif
        data0 = 0;
        pg_addr = page_address(page);
@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
                xcall_deliver(data0, __pa(pg_addr),
                              (u64) pg_addr, cpu_online_mask);
 #ifdef CONFIG_DEBUG_DCFLUSH
-               atomic_inc(&dcpage_flushes_xcall);
+               atomic_inc_unchecked(&dcpage_flushes_xcall);
 #endif
        }
        __local_flush_dcache_page(page);
index 646988d4c1a35aca56c0a3f0c56abdf1ca183587..b88905f087b9b51de5b376e625e0af6ff8bd05ae 100644 (file)
@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        if (len > TASK_SIZE - PAGE_SIZE)
                return -ENOMEM;
        if (!addr)
-               addr = TASK_UNMAPPED_BASE;
+               addr = current->mm->mmap_base;
 
        info.flags = 0;
        info.length = len;
index c85403d0496c24f7639a32fa06b7dc53c1559381..6af95c93031297b582f1ec3ea3fa11eae7abe47a 100644 (file)
@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        struct vm_area_struct * vma;
        unsigned long task_size = TASK_SIZE;
        int do_color_align;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        if (flags & MAP_FIXED) {
                /* We do not accept a shared mapping if it would violate
                 * cache aliasing constraints.
                 */
-               if ((flags & MAP_SHARED) &&
+               if ((filp || (flags & MAP_SHARED)) &&
                    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
                        return -EINVAL;
                return addr;
@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        if (filp || (flags & MAP_SHARED))
                do_color_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                if (do_color_align)
                        addr = COLOR_ALIGN(addr, pgoff);
@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
        info.flags = 0;
        info.length = len;
-       info.low_limit = TASK_UNMAPPED_BASE;
+       info.low_limit = mm->mmap_base;
        info.high_limit = min(task_size, VA_EXCLUDE_START);
        info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
                VM_BUG_ON(addr != -ENOMEM);
                info.low_limit = VA_EXCLUDE_END;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = task_size;
                addr = vm_unmapped_area(&info);
        }
@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        unsigned long task_size = STACK_TOP32;
        unsigned long addr = addr0;
        int do_color_align;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
        struct vm_unmapped_area_info info;
 
        /* This should only ever run for 32-bit processes.  */
@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                /* We do not accept a shared mapping if it would violate
                 * cache aliasing constraints.
                 */
-               if ((flags & MAP_SHARED) &&
+               if ((filp || (flags & MAP_SHARED)) &&
                    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
                        return -EINVAL;
                return addr;
@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (filp || (flags & MAP_SHARED))
                do_color_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* requesting a specific address */
        if (addr) {
                if (do_color_align)
@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        addr = PAGE_ALIGN(addr);
 
                vma = find_vma(mm, addr);
-               if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.high_limit = mm->mmap_base;
        info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = STACK_TOP32;
                addr = vm_unmapped_area(&info);
        }
@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
 EXPORT_SYMBOL(get_fb_unmapped_area);
 
 /* Essentially the same as PowerPC.  */
-static unsigned long mmap_rnd(void)
+static unsigned long mmap_rnd(struct mm_struct *mm)
 {
        unsigned long rnd = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (current->flags & PF_RANDOMIZE) {
                unsigned long val = get_random_int();
                if (test_thread_flag(TIF_32BIT))
@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
 
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
-       unsigned long random_factor = mmap_rnd();
+       unsigned long random_factor = mmap_rnd(mm);
        unsigned long gap;
 
        /*
@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
            gap == RLIM_INFINITY ||
            sysctl_legacy_va_layout) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base += mm->delta_mmap;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                /* We know it's 32-bit */
@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
                        gap = (task_size / 6 * 5);
 
                mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
index bb0008927598b1f7bbeeccdf30c6fa154219a4ce..e0ea580646f101ccc091786e18ce4164c6a557a7 100644 (file)
@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
 #endif
        .align  32
 1:     ldx     [%g6 + TI_FLAGS], %l5
-       andcc   %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
+       andcc   %l5, _TIF_WORK_SYSCALL, %g0
        be,pt   %icc, rtrap
         nop
        call    syscall_trace_leave
@@ -194,7 +194,7 @@ linux_sparc_syscall32:
 
        srl     %i3, 0, %o3                             ! IEU0
        srl     %i2, 0, %o2                             ! IEU0  Group
-       andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
+       andcc   %l0, _TIF_WORK_SYSCALL, %g0
        bne,pn  %icc, linux_syscall_trace32             ! CTI
         mov    %i0, %l5                                ! IEU1
 5:     call    %l7                                     ! CTI   Group brk forced
@@ -218,7 +218,7 @@ linux_sparc_syscall:
 
        mov     %i3, %o3                                ! IEU1
        mov     %i4, %o4                                ! IEU0  Group
-       andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
+       andcc   %l0, _TIF_WORK_SYSCALL, %g0
        bne,pn  %icc, linux_syscall_trace               ! CTI   Group
         mov    %i0, %l5                                ! IEU0
 2:     call    %l7                                     ! CTI   Group brk forced
@@ -233,7 +233,7 @@ ret_sys_call:
 
        cmp     %o0, -ERESTART_RESTARTBLOCK
        bgeu,pn %xcc, 1f
-        andcc  %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
+        andcc  %l0, _TIF_WORK_SYSCALL, %g0
        ldx     [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
 
 2:
index 6fd386c5232a65dc068d48e811f8508cc771cae6..6907d81b5794819d57bd8a9e86ec8ec8f8280523 100644 (file)
@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
 
+extern void gr_handle_kernel_exploit(void);
+
 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
                      count++ < 30                              &&
                       (((unsigned long) rw) >= PAGE_OFFSET)    &&
                      !(((unsigned long) rw) & 0x7)) {
-                       printk("Caller[%08lx]: %pS\n", rw->ins[7],
+                       printk("Caller[%08lx]: %pA\n", rw->ins[7],
                               (void *) rw->ins[7]);
                        rw = (struct reg_window32 *)rw->ins[6];
                }
        }
        printk("Instruction DUMP:");
        instruction_dump ((unsigned long *) regs->pc);
-       if(regs->psr & PSR_PS)
+       if(regs->psr & PSR_PS) {
+               gr_handle_kernel_exploit();
                do_exit(SIGKILL);
+       }
        do_exit(SIGSEGV);
 }
 
index 981a769b955802573c00d184ed76581f68d88777..d906eda33f39d1e6735e205466f020dd92aa275f 100644 (file)
@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
                       i + 1,
                       p->trapstack[i].tstate, p->trapstack[i].tpc,
                       p->trapstack[i].tnpc, p->trapstack[i].tt);
-               printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
+               printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
        }
 }
 
@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
 
        lvl -= 0x100;
        if (regs->tstate & TSTATE_PRIV) {
+
+#ifdef CONFIG_PAX_REFCOUNT
+               if (lvl == 6)
+                       pax_report_refcount_overflow(regs);
+#endif
+
                sprintf(buffer, "Kernel bad sw trap %lx", lvl);
                die_if_kernel(buffer, regs);
        }
@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
 void bad_trap_tl1(struct pt_regs *regs, long lvl)
 {
        char buffer[32];
-       
+
        if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
                       0, lvl, SIGTRAP) == NOTIFY_STOP)
                return;
 
+#ifdef CONFIG_PAX_REFCOUNT
+       if (lvl == 6)
+               pax_report_refcount_overflow(regs);
+#endif
+
        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 
        sprintf (buffer, "Bad trap %lx at tl>0", lvl);
@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
               regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
        printk("%s" "ERROR(%d): ",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
-       printk("TPC<%pS>\n", (void *) regs->tpc);
+       printk("TPC<%pA>\n", (void *) regs->tpc);
        printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
               (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
                       smp_processor_id(),
                       (type & 0x1) ? 'I' : 'D',
                       regs->tpc);
-               printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
+               printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
                panic("Irrecoverable Cheetah+ parity error.");
        }
 
@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
               smp_processor_id(),
               (type & 0x1) ? 'I' : 'D',
               regs->tpc);
-       printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
+       printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
 }
 
 struct sun4v_error_entry {
@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
 /*0x38*/u64            reserved_5;
 };
 
-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
 
 static const char *sun4v_err_type_to_str(u8 type)
 {
@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
 }
 
 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
-                           int cpu, const char *pfx, atomic_t *ocnt)
+                           int cpu, const char *pfx, atomic_unchecked_t *ocnt)
 {
        u64 *raw_ptr = (u64 *) ent;
        u32 attrs;
@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
 
        show_regs(regs);
 
-       if ((cnt = atomic_read(ocnt)) != 0) {
-               atomic_set(ocnt, 0);
+       if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
+               atomic_set_unchecked(ocnt, 0);
                wmb();
                printk("%s: Queue overflowed %d times.\n",
                       pfx, cnt);
@@ -2048,7 +2059,7 @@ out:
  */
 void sun4v_resum_overflow(struct pt_regs *regs)
 {
-       atomic_inc(&sun4v_resum_oflow_cnt);
+       atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
 }
 
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
        /* XXX Actually even this can make not that much sense.  Perhaps
         * XXX we should just pull the plug and panic directly from here?
         */
-       atomic_inc(&sun4v_nonresum_oflow_cnt);
+       atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
 }
 
 static void sun4v_tlb_error(struct pt_regs *regs)
@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
 
        printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
               regs->tpc, tl);
-       printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
        printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-       printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
+       printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
               (void *) regs->u_regs[UREG_I7]);
        printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
               "pte[%lx] error[%lx]\n",
@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
 
        printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
               regs->tpc, tl);
-       printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
        printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-       printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
+       printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
               (void *) regs->u_regs[UREG_I7]);
        printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
               "pte[%lx] error[%lx]\n",
@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
                        fp = (unsigned long)sf->fp + STACK_BIAS;
                }
 
-               printk(" [%016lx] %pS\n", pc, (void *) pc);
+               printk(" [%016lx] %pA\n", pc, (void *) pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
                        int index = tsk->curr_ret_stack;
                        if (tsk->ret_stack && index >= graph) {
                                pc = tsk->ret_stack[index - graph].ret;
-                               printk(" [%016lx] %pS\n", pc, (void *) pc);
+                               printk(" [%016lx] %pA\n", pc, (void *) pc);
                                graph++;
                        }
                }
@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
        return (struct reg_window *) (fp + STACK_BIAS);
 }
 
+extern void gr_handle_kernel_exploit(void);
+
 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
                while (rw &&
                       count++ < 30 &&
                       kstack_valid(tp, (unsigned long) rw)) {
-                       printk("Caller[%016lx]: %pS\n", rw->ins[7],
+                       printk("Caller[%016lx]: %pA\n", rw->ins[7],
                               (void *) rw->ins[7]);
 
                        rw = kernel_stack_up(rw);
@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
                }
                user_instruction_dump ((unsigned int __user *) regs->tpc);
        }
-       if (regs->tstate & TSTATE_PRIV)
+       if (regs->tstate & TSTATE_PRIV) {
+               gr_handle_kernel_exploit();
                do_exit(SIGKILL);
+       }
        do_exit(SIGSEGV);
 }
 EXPORT_SYMBOL(die_if_kernel);
index 62098a89bbbf5b87fe1fc640e7699d380efb5028..547ab2c3d7f48e14338a539230df19f47dee4fbf 100644 (file)
@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
        static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
 
        if (__ratelimit(&ratelimit)) {
-               printk("Kernel unaligned access at TPC[%lx] %pS\n",
+               printk("Kernel unaligned access at TPC[%lx] %pA\n",
                       regs->tpc, (void *) regs->tpc);
        }
 }
index 3269b0234093bfdbd6b6dcd22388cdc65a627d0a..64f5231a1571f8db369ceb1b91b4b7a50b7aee4c 100644 (file)
@@ -2,7 +2,7 @@
 #
 
 asflags-y := -ansi -DST_DIV0=0x02
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 lib-$(CONFIG_SPARC32) += ashrdi3.o
 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
index 05dac43907d119ebb2f45037336e853f24068c2c..76f8ed4369c5cfd24a6a88e196e26992a31c7a39 100644 (file)
         * a value and does the barriers.
         */
 
-#define ATOMIC_OP(op)                                                  \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */             \
+#ifdef CONFIG_PAX_REFCOUNT
+#define __REFCOUNT_OP(op) op##cc
+#define __OVERFLOW_IOP tvs     %icc, 6;
+#define __OVERFLOW_XOP tvs     %xcc, 6;
+#else
+#define __REFCOUNT_OP(op) op
+#define __OVERFLOW_IOP
+#define __OVERFLOW_XOP
+#endif
+
+#define __ATOMIC_OP(op, suffix, asm_op, post_op)                       \
+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */     \
        BACKOFF_SETUP(%o2);                                             \
 1:     lduw    [%o1], %g1;                                             \
-       op      %g1, %o0, %g7;                                          \
+       asm_op  %g1, %o0, %g7;                                          \
+       post_op                                                         \
        cas     [%o1], %g1, %g7;                                        \
        cmp     %g1, %g7;                                               \
        bne,pn  %icc, BACKOFF_LABEL(2f, 1b);                            \
@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */          \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic_##op);                                                  \
 
-#define ATOMIC_OP_RETURN(op)                                           \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */    \
+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
+                     __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
+
+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op)                        \
+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
        BACKOFF_SETUP(%o2);                                             \
 1:     lduw    [%o1], %g1;                                             \
-       op      %g1, %o0, %g7;                                          \
+       asm_op  %g1, %o0, %g7;                                          \
+       post_op                                                         \
        cas     [%o1], %g1, %g7;                                        \
        cmp     %g1, %g7;                                               \
        bne,pn  %icc, BACKOFF_LABEL(2f, 1b);                            \
@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */   \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic_##op##_return);
 
+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
+                            __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
+
 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
 ATOMIC_OPS(add)
@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
+#undef __ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef __ATOMIC_OP
 
-#define ATOMIC64_OP(op)                                                        \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */           \
+#define __ATOMIC64_OP(op, suffix, asm_op, post_op)                     \
+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */   \
        BACKOFF_SETUP(%o2);                                             \
 1:     ldx     [%o1], %g1;                                             \
-       op      %g1, %o0, %g7;                                          \
+       asm_op  %g1, %o0, %g7;                                          \
+       post_op                                                         \
        casx    [%o1], %g1, %g7;                                        \
        cmp     %g1, %g7;                                               \
        bne,pn  %xcc, BACKOFF_LABEL(2f, 1b);                            \
@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */                \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic64_##op);                                                        \
 
-#define ATOMIC64_OP_RETURN(op)                                         \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */  \
+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
+                       __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
+
+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op)              \
+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
        BACKOFF_SETUP(%o2);                                             \
 1:     ldx     [%o1], %g1;                                             \
-       op      %g1, %o0, %g7;                                          \
+       asm_op  %g1, %o0, %g7;                                          \
+       post_op                                                         \
        casx    [%o1], %g1, %g7;                                        \
        cmp     %g1, %g7;                                               \
        bne,pn  %xcc, BACKOFF_LABEL(2f, 1b);                            \
@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */        \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic64_##op##_return);
 
+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
+i                             __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
+
 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
 
 ATOMIC64_OPS(add)
@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
+#undef __ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
+#undef __ATOMIC64_OP
+#undef __OVERFLOW_XOP
+#undef __OVERFLOW_IOP
+#undef __REFCOUNT_OP
 
 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
        BACKOFF_SETUP(%o2)
index 1d649a95660c8cad57fbe90feadb7c43b9e8263f..fbc5bfc0192b23ac3eb2698fceee32c600b539f9 100644 (file)
@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
 /* Atomic counter implementation. */
 #define ATOMIC_OP(op)                                                  \
 EXPORT_SYMBOL(atomic_##op);                                            \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(atomic_##op##_unchecked);                                        \
+EXPORT_SYMBOL(atomic64_##op);                                          \
+EXPORT_SYMBOL(atomic64_##op##_unchecked);
 
 #define ATOMIC_OP_RETURN(op)                                           \
 EXPORT_SYMBOL(atomic_##op##_return);                                   \
@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
 
 ATOMIC_OPS(add)
+EXPORT_SYMBOL(atomic_add_ret_unchecked);
+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
 ATOMIC_OPS(sub)
 
 #undef ATOMIC_OPS
index 30c3eccfdf5a209f8b09458824aa9d706f3079dd..736f015d4ca78bbfa76e69e4dc710f91156b830d 100644 (file)
@@ -2,7 +2,7 @@
 #
 
 asflags-y := -ansi
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
index 70d817154fe8bfd04aeaa71f45f15667f4962c23..274c6c013dc9e1312df4300a9846609eb69332e3 100644 (file)
@@ -21,6 +21,9 @@
 #include <linux/perf_event.h>
 #include <linux/interrupt.h>
 #include <linux/kdebug.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
        return safe_compute_effective_address(regs, insn);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+#ifdef CONFIG_PAX_DLRESOLVE
+static void pax_emuplt_close(struct vm_area_struct *vma)
+{
+       vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       unsigned int *kaddr;
+
+       vmf->page = alloc_page(GFP_HIGHUSER);
+       if (!vmf->page)
+               return VM_FAULT_OOM;
+
+       kaddr = kmap(vmf->page);
+       memset(kaddr, 0, PAGE_SIZE);
+       kaddr[0] = 0x9DE3BFA8U; /* save */
+       flush_dcache_page(vmf->page);
+       kunmap(vmf->page);
+       return VM_FAULT_MAJOR;
+}
+
+static const struct vm_operations_struct pax_vm_ops = {
+       .close = pax_emuplt_close,
+       .fault = pax_emuplt_fault
+};
+
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+       int ret;
+
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+       vma->vm_mm = current->mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + PAGE_SIZE;
+       vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       vma->vm_ops = &pax_vm_ops;
+
+       ret = insert_vm_struct(current->mm, vma);
+       if (ret)
+               return ret;
+
+       ++current->mm->total_vm;
+       return 0;
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+       int err;
+
+       do { /* PaX: patched PLT emulation #1 */
+               unsigned int sethi1, sethi2, jmpl;
+
+               err = get_user(sethi1, (unsigned int *)regs->pc);
+               err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
+               err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+                   (sethi2 & 0xFFC00000U) == 0x03000000U &&
+                   (jmpl & 0xFFFFE000U) == 0x81C06000U)
+               {
+                       unsigned int addr;
+
+                       regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+                       addr = regs->u_regs[UREG_G1];
+                       addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+                       regs->pc = addr;
+                       regs->npc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #2 */
+               unsigned int ba;
+
+               err = get_user(ba, (unsigned int *)regs->pc);
+
+               if (err)
+                       break;
+
+               if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+                       unsigned int addr;
+
+                       if ((ba & 0xFFC00000U) == 0x30800000U)
+                               addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
+                       else
+                               addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+                       regs->pc = addr;
+                       regs->npc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #3 */
+               unsigned int sethi, bajmpl, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->pc);
+               err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
+               err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+                   nop == 0x01000000U)
+               {
+                       unsigned int addr;
+
+                       addr = (sethi & 0x003FFFFFU) << 10;
+                       regs->u_regs[UREG_G1] = addr;
+                       if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
+                               addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+                       else
+                               addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+                       regs->pc = addr;
+                       regs->npc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: unpatched PLT emulation step 1 */
+               unsigned int sethi, ba, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->pc);
+               err |= get_user(ba, (unsigned int *)(regs->pc+4));
+               err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+                   nop == 0x01000000U)
+               {
+                       unsigned int addr, save, call;
+
+                       if ((ba & 0xFFC00000U) == 0x30800000U)
+                               addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
+                       else
+                               addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+
+                       err = get_user(save, (unsigned int *)addr);
+                       err |= get_user(call, (unsigned int *)(addr+4));
+                       err |= get_user(nop, (unsigned int *)(addr+8));
+                       if (err)
+                               break;
+
+#ifdef CONFIG_PAX_DLRESOLVE
+                       if (save == 0x9DE3BFA8U &&
+                           (call & 0xC0000000U) == 0x40000000U &&
+                           nop == 0x01000000U)
+                       {
+                               struct vm_area_struct *vma;
+                               unsigned long call_dl_resolve;
+
+                               down_read(&current->mm->mmap_sem);
+                               call_dl_resolve = current->mm->call_dl_resolve;
+                               up_read(&current->mm->mmap_sem);
+                               if (likely(call_dl_resolve))
+                                       goto emulate;
+
+                               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+                               down_write(&current->mm->mmap_sem);
+                               if (current->mm->call_dl_resolve) {
+                                       call_dl_resolve = current->mm->call_dl_resolve;
+                                       up_write(&current->mm->mmap_sem);
+                                       if (vma)
+                                               kmem_cache_free(vm_area_cachep, vma);
+                                       goto emulate;
+                               }
+
+                               call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+                               if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+                                       up_write(&current->mm->mmap_sem);
+                                       if (vma)
+                                               kmem_cache_free(vm_area_cachep, vma);
+                                       return 1;
+                               }
+
+                               if (pax_insert_vma(vma, call_dl_resolve)) {
+                                       up_write(&current->mm->mmap_sem);
+                                       kmem_cache_free(vm_area_cachep, vma);
+                                       return 1;
+                               }
+
+                               current->mm->call_dl_resolve = call_dl_resolve;
+                               up_write(&current->mm->mmap_sem);
+
+emulate:
+                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+                               regs->pc = call_dl_resolve;
+                               regs->npc = addr+4;
+                               return 3;
+                       }
+#endif
+
+                       /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
+                       if ((save & 0xFFC00000U) == 0x05000000U &&
+                           (call & 0xFFFFE000U) == 0x85C0A000U &&
+                           nop == 0x01000000U)
+                       {
+                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+                               regs->u_regs[UREG_G2] = addr + 4;
+                               addr = (save & 0x003FFFFFU) << 10;
+                               addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+                               regs->pc = addr;
+                               regs->npc = addr+4;
+                               return 3;
+                       }
+               }
+       } while (0);
+
+       do { /* PaX: unpatched PLT emulation step 2 */
+               unsigned int save, call, nop;
+
+               err = get_user(save, (unsigned int *)(regs->pc-4));
+               err |= get_user(call, (unsigned int *)regs->pc);
+               err |= get_user(nop, (unsigned int *)(regs->pc+4));
+               if (err)
+                       break;
+
+               if (save == 0x9DE3BFA8U &&
+                   (call & 0xC0000000U) == 0x40000000U &&
+                   nop == 0x01000000U)
+               {
+                       unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
+
+                       regs->u_regs[UREG_RETPC] = regs->pc;
+                       regs->pc = dl_resolve;
+                       regs->npc = dl_resolve+4;
+                       return 3;
+               }
+       } while (0);
+#endif
+
+       return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 8; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
                                      int text_fault)
 {
@@ -226,6 +500,24 @@ good_area:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
        } else {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
+                       up_read(&mm->mmap_sem);
+                       switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+                       case 2:
+                       case 3:
+                               return;
+#endif
+
+                       }
+                       pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
+                       do_group_exit(SIGKILL);
+               }
+#endif
+
                /* Allow reads even for write-only mappings */
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
index 4798232494294a7ece0bef232216dd4a26408d88..f76e3aa5494f17ec9c8d9f3bc986d42baccc4875 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/kdebug.h>
 #include <linux/percpu.h>
 #include <linux/context_tracking.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
        printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
               regs->tpc);
        printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
-       printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
+       printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
        printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
        dump_stack();
        unhandled_fault(regs->tpc, current, regs);
@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
        show_regs(regs);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+#ifdef CONFIG_PAX_DLRESOLVE
+static void pax_emuplt_close(struct vm_area_struct *vma)
+{
+       vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       unsigned int *kaddr;
+
+       vmf->page = alloc_page(GFP_HIGHUSER);
+       if (!vmf->page)
+               return VM_FAULT_OOM;
+
+       kaddr = kmap(vmf->page);
+       memset(kaddr, 0, PAGE_SIZE);
+       kaddr[0] = 0x9DE3BFA8U; /* save */
+       flush_dcache_page(vmf->page);
+       kunmap(vmf->page);
+       return VM_FAULT_MAJOR;
+}
+
+static const struct vm_operations_struct pax_vm_ops = {
+       .close = pax_emuplt_close,
+       .fault = pax_emuplt_fault
+};
+
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+       int ret;
+
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+       vma->vm_mm = current->mm;
+       vma->vm_start = addr;
+       vma->vm_end = addr + PAGE_SIZE;
+       vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+       vma->vm_ops = &pax_vm_ops;
+
+       ret = insert_vm_struct(current->mm, vma);
+       if (ret)
+               return ret;
+
+       ++current->mm->total_vm;
+       return 0;
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->tpc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+       int err;
+
+       do { /* PaX: patched PLT emulation #1 */
+               unsigned int sethi1, sethi2, jmpl;
+
+               err = get_user(sethi1, (unsigned int *)regs->tpc);
+               err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
+               err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+                   (sethi2 & 0xFFC00000U) == 0x03000000U &&
+                   (jmpl & 0xFFFFE000U) == 0x81C06000U)
+               {
+                       unsigned long addr;
+
+                       regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+                       addr = regs->u_regs[UREG_G1];
+                       addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #2 */
+               unsigned int ba;
+
+               err = get_user(ba, (unsigned int *)regs->tpc);
+
+               if (err)
+                       break;
+
+               if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+                       unsigned long addr;
+
+                       if ((ba & 0xFFC00000U) == 0x30800000U)
+                               addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
+                       else
+                               addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #3 */
+               unsigned int sethi, bajmpl, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
+               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long addr;
+
+                       addr = (sethi & 0x003FFFFFU) << 10;
+                       regs->u_regs[UREG_G1] = addr;
+                       if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
+                               addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+                       else
+                               addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #4 */
+               unsigned int sethi, mov1, call, mov2;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
+               err |= get_user(call, (unsigned int *)(regs->tpc+8));
+               err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   mov1 == 0x8210000FU &&
+                   (call & 0xC0000000U) == 0x40000000U &&
+                   mov2 == 0x9E100001U)
+               {
+                       unsigned long addr;
+
+                       regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
+                       addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #5 */
+               unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
+               err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
+               err |= get_user(or1, (unsigned int *)(regs->tpc+12));
+               err |= get_user(or2, (unsigned int *)(regs->tpc+16));
+               err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
+               err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
+               err |= get_user(nop, (unsigned int *)(regs->tpc+28));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   (sethi1 & 0xFFC00000U) == 0x03000000U &&
+                   (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+                   (or1 & 0xFFFFE000U) == 0x82106000U &&
+                   (or2 & 0xFFFFE000U) == 0x8A116000U &&
+                   sllx == 0x83287020U &&
+                   jmpl == 0x81C04005U &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long addr;
+
+                       regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
+                       regs->u_regs[UREG_G1] <<= 32;
+                       regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
+                       addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: patched PLT emulation #6 */
+               unsigned int sethi, sethi1, sethi2, sllx, or,  jmpl, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
+               err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
+               err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
+               err |= get_user(or, (unsigned int *)(regs->tpc+16));
+               err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
+               err |= get_user(nop, (unsigned int *)(regs->tpc+24));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   (sethi1 & 0xFFC00000U) == 0x03000000U &&
+                   (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+                   sllx == 0x83287020U &&
+                   (or & 0xFFFFE000U) == 0x8A116000U &&
+                   jmpl == 0x81C04005U &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long addr;
+
+                       regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
+                       regs->u_regs[UREG_G1] <<= 32;
+                       regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
+                       addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: unpatched PLT emulation step 1 */
+               unsigned int sethi, ba, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(ba, (unsigned int *)(regs->tpc+4));
+               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long addr;
+                       unsigned int save, call;
+                       unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
+
+                       if ((ba & 0xFFC00000U) == 0x30800000U)
+                               addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
+                       else
+                               addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       err = get_user(save, (unsigned int *)addr);
+                       err |= get_user(call, (unsigned int *)(addr+4));
+                       err |= get_user(nop, (unsigned int *)(addr+8));
+                       if (err)
+                               break;
+
+#ifdef CONFIG_PAX_DLRESOLVE
+                       if (save == 0x9DE3BFA8U &&
+                           (call & 0xC0000000U) == 0x40000000U &&
+                           nop == 0x01000000U)
+                       {
+                               struct vm_area_struct *vma;
+                               unsigned long call_dl_resolve;
+
+                               down_read(&current->mm->mmap_sem);
+                               call_dl_resolve = current->mm->call_dl_resolve;
+                               up_read(&current->mm->mmap_sem);
+                               if (likely(call_dl_resolve))
+                                       goto emulate;
+
+                               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+                               down_write(&current->mm->mmap_sem);
+                               if (current->mm->call_dl_resolve) {
+                                       call_dl_resolve = current->mm->call_dl_resolve;
+                                       up_write(&current->mm->mmap_sem);
+                                       if (vma)
+                                               kmem_cache_free(vm_area_cachep, vma);
+                                       goto emulate;
+                               }
+
+                               call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+                               if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+                                       up_write(&current->mm->mmap_sem);
+                                       if (vma)
+                                               kmem_cache_free(vm_area_cachep, vma);
+                                       return 1;
+                               }
+
+                               if (pax_insert_vma(vma, call_dl_resolve)) {
+                                       up_write(&current->mm->mmap_sem);
+                                       kmem_cache_free(vm_area_cachep, vma);
+                                       return 1;
+                               }
+
+                               current->mm->call_dl_resolve = call_dl_resolve;
+                               up_write(&current->mm->mmap_sem);
+
+emulate:
+                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+                               regs->tpc = call_dl_resolve;
+                               regs->tnpc = addr+4;
+                               return 3;
+                       }
+#endif
+
+                       /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
+                       if ((save & 0xFFC00000U) == 0x05000000U &&
+                           (call & 0xFFFFE000U) == 0x85C0A000U &&
+                           nop == 0x01000000U)
+                       {
+                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+                               regs->u_regs[UREG_G2] = addr + 4;
+                               addr = (save & 0x003FFFFFU) << 10;
+                               addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+
+                               if (test_thread_flag(TIF_32BIT))
+                                       addr &= 0xFFFFFFFFUL;
+
+                               regs->tpc = addr;
+                               regs->tnpc = addr+4;
+                               return 3;
+                       }
+
+                       /* PaX: 64-bit PLT stub */
+                       err = get_user(sethi1, (unsigned int *)addr);
+                       err |= get_user(sethi2, (unsigned int *)(addr+4));
+                       err |= get_user(or1, (unsigned int *)(addr+8));
+                       err |= get_user(or2, (unsigned int *)(addr+12));
+                       err |= get_user(sllx, (unsigned int *)(addr+16));
+                       err |= get_user(add, (unsigned int *)(addr+20));
+                       err |= get_user(jmpl, (unsigned int *)(addr+24));
+                       err |= get_user(nop, (unsigned int *)(addr+28));
+                       if (err)
+                               break;
+
+                       if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
+                           (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+                           (or1 & 0xFFFFE000U) == 0x88112000U &&
+                           (or2 & 0xFFFFE000U) == 0x8A116000U &&
+                           sllx == 0x89293020U &&
+                           add == 0x8A010005U &&
+                           jmpl == 0x89C14000U &&
+                           nop == 0x01000000U)
+                       {
+                               regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+                               regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
+                               regs->u_regs[UREG_G4] <<= 32;
+                               regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
+                               regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
+                               regs->u_regs[UREG_G4] = addr + 24;
+                               addr = regs->u_regs[UREG_G5];
+                               regs->tpc = addr;
+                               regs->tnpc = addr+4;
+                               return 3;
+                       }
+               }
+       } while (0);
+
+#ifdef CONFIG_PAX_DLRESOLVE
+       do { /* PaX: unpatched PLT emulation step 2 */
+               unsigned int save, call, nop;
+
+               err = get_user(save, (unsigned int *)(regs->tpc-4));
+               err |= get_user(call, (unsigned int *)regs->tpc);
+               err |= get_user(nop, (unsigned int *)(regs->tpc+4));
+               if (err)
+                       break;
+
+               if (save == 0x9DE3BFA8U &&
+                   (call & 0xC0000000U) == 0x40000000U &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               dl_resolve &= 0xFFFFFFFFUL;
+
+                       regs->u_regs[UREG_RETPC] = regs->tpc;
+                       regs->tpc = dl_resolve;
+                       regs->tnpc = dl_resolve+4;
+                       return 3;
+               }
+       } while (0);
+#endif
+
+       do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
+               unsigned int sethi, ba, nop;
+
+               err = get_user(sethi, (unsigned int *)regs->tpc);
+               err |= get_user(ba, (unsigned int *)(regs->tpc+4));
+               err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+               if (err)
+                       break;
+
+               if ((sethi & 0xFFC00000U) == 0x03000000U &&
+                   (ba & 0xFFF00000U) == 0x30600000U &&
+                   nop == 0x01000000U)
+               {
+                       unsigned long addr;
+
+                       addr = (sethi & 0x003FFFFFU) << 10;
+                       regs->u_regs[UREG_G1] = addr;
+                       addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
+
+                       if (test_thread_flag(TIF_32BIT))
+                               addr &= 0xFFFFFFFFUL;
+
+                       regs->tpc = addr;
+                       regs->tnpc = addr+4;
+                       return 2;
+               }
+       } while (0);
+
+#endif
+
+       return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       unsigned long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 8; i++) {
+               unsigned int c;
+               if (get_user(c, (unsigned int *)pc+i))
+                       printk(KERN_CONT "???????? ");
+               else
+                       printk(KERN_CONT "%08x ", c);
+       }
+       printk("\n");
+}
+#endif
+
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -353,6 +816,29 @@ retry:
        if (!vma)
                goto bad_area;
 
+#ifdef CONFIG_PAX_PAGEEXEC
+       /* PaX: detect ITLB misses on non-exec pages */
+       if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
+           !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
+       {
+               if (address != regs->tpc)
+                       goto good_area;
+
+               up_read(&mm->mmap_sem);
+               switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+               case 2:
+               case 3:
+                       return;
+#endif
+
+               }
+               pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
+               do_group_exit(SIGKILL);
+       }
+#endif
+
        /* Pure DTLB misses do not tell us whether the fault causing
         * load/store/atomic was a write or not, it only says that there
         * was no match.  So in such a case we (carefully) read the
index d329537739c6f39bb1aa8fa0b7b21d3d4c7ed52c..2c3746a32f2280e726c2dcb613e5b527b778596b 100644 (file)
@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
                                                        unsigned long addr,
                                                        unsigned long len,
                                                        unsigned long pgoff,
-                                                       unsigned long flags)
+                                                       unsigned long flags,
+                                                       unsigned long offset)
 {
+       struct mm_struct *mm = current->mm;
        unsigned long task_size = TASK_SIZE;
        struct vm_unmapped_area_info info;
 
@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 
        info.flags = 0;
        info.length = len;
-       info.low_limit = TASK_UNMAPPED_BASE;
+       info.low_limit = mm->mmap_base;
        info.high_limit = min(task_size, VA_EXCLUDE_START);
        info.align_mask = PAGE_MASK & ~HPAGE_MASK;
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
                VM_BUG_ON(addr != -ENOMEM);
                info.low_limit = VA_EXCLUDE_END;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = task_size;
                addr = vm_unmapped_area(&info);
        }
@@ -55,7 +64,8 @@ static unsigned long
 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                                  const unsigned long len,
                                  const unsigned long pgoff,
-                                 const unsigned long flags)
+                                 const unsigned long flags,
+                                 const unsigned long offset)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.high_limit = mm->mmap_base;
        info.align_mask = PAGE_MASK & ~HPAGE_MASK;
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = STACK_TOP32;
                addr = vm_unmapped_area(&info);
        }
@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long task_size = TASK_SIZE;
+       unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 
        if (test_thread_flag(TIF_32BIT))
                task_size = STACK_TOP32;
@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return addr;
        }
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                addr = ALIGN(addr, HPAGE_SIZE);
                vma = find_vma(mm, addr);
-               if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                               pgoff, flags);
+                               pgoff, flags, offset);
        else
                return hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
+                               pgoff, flags, offset);
 }
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
index 3ea267c53320d49683ab39c9e0d95189e4adb56e..93f0659571092e8140fe93af76b47b691bf6a298 100644 (file)
@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
 int num_kernel_image_mappings;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-atomic_t dcpage_flushes = ATOMIC_INIT(0);
+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
 #ifdef CONFIG_SMP
-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
 #endif
 #endif
 
@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
 {
        BUG_ON(tlb_type == hypervisor);
 #ifdef CONFIG_DEBUG_DCFLUSH
-       atomic_inc(&dcpage_flushes);
+       atomic_inc_unchecked(&dcpage_flushes);
 #endif
 
 #ifdef DCACHE_ALIASING_POSSIBLE
@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
 
 #ifdef CONFIG_DEBUG_DCFLUSH
        seq_printf(m, "DCPageFlushes\t: %d\n",
-                  atomic_read(&dcpage_flushes));
+                  atomic_read_unchecked(&dcpage_flushes));
 #ifdef CONFIG_SMP
        seq_printf(m, "DCPageFlushesXC\t: %d\n",
-                  atomic_read(&dcpage_flushes_xcall));
+                  atomic_read_unchecked(&dcpage_flushes_xcall));
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_DEBUG_DCFLUSH */
 }
index 7cca41842a9e5acc9382f73896278f3404772eb3..53fc030a61c3862eae0ac3f09bfb0b781ce2e1fd 100644 (file)
@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
 
 config KEXEC
        bool "kexec system call"
+       depends on !GRKERNSEC_KMEM
        ---help---
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 7b11c5fadd4220f5e8694b6e2a2f906ccf4a50d3..755a02652db8accf52e0f7313f4372f05f8e2ee5 100644 (file)
@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
+#define atomic64_read_unchecked(v)             atomic64_read(v)
+#define atomic64_set_unchecked(v, i)           atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)           atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)    atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)           atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)              atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)       atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)              atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)    atomic64_cmpxchg((v), (o), (n))
+
 /* Define this to indicate that cmpxchg is an efficient operation. */
 #define __HAVE_ARCH_CMPXCHG
 
index 6160761d5f611319ecd9f838d3f407c934e61ae6..00cac88db6ca0ffd4c967286905d80755cef6cd8 100644 (file)
 #ifndef _ASM_TILE_CACHE_H
 #define _ASM_TILE_CACHE_H
 
+#include <linux/const.h>
 #include <arch/chip.h>
 
 /* bytes per L1 data cache line */
 #define L1_CACHE_SHIFT         CHIP_L1D_LOG_LINE_SIZE()
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 /* bytes per L2 cache line */
 #define L2_CACHE_SHIFT         CHIP_L2_LOG_LINE_SIZE()
index b6cde3209b963d76ba35815d45b003ef929691f2..c0cb736cea160b6b9234446777df30211a54641e 100644 (file)
@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
                                          const void __user *from,
                                          unsigned long n)
 {
-       int sz = __compiletime_object_size(to);
+       size_t sz = __compiletime_object_size(to);
 
-       if (likely(sz == -1 || sz >= n))
+       if (likely(sz == (size_t)-1 || sz >= n))
                n = _copy_from_user(to, from, n);
        else
                copy_from_user_overflow();
index 3270e0019266334f5094948436423f7dbc080f68..a77236ecdb1994e7afec1071d427340f77aeb6db 100644 (file)
@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
        info.high_limit = TASK_SIZE;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
+       info.threadstack_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
        info.high_limit = current->mm->mmap_base;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
+       info.threadstack_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
index e4b1a9639c4ddfab7db3d91a080272b9c00fdba8..16162f8934708818ce70b0a7d20c2520790a9f07 100644 (file)
@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
        $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
        $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
 
+ifdef CONSTIFY_PLUGIN
+USER_CFLAGS    += -fplugin-arg-constify_plugin-no-constify
+endif
+
 #This will adjust *FLAGS accordingly to the platform.
 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
 
index 19e1bdd674165739b66dab05cb04e7527f174b79..3665b77a67b01bd5e9224b24ba6aa9b8f13c88ae 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __UM_CACHE_H
 #define __UM_CACHE_H
 
+#include <linux/const.h>
 
 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
 # define L1_CACHE_SHIFT                (CONFIG_X86_L1_CACHE_SHIFT)
@@ -12,6 +13,6 @@
 # define L1_CACHE_SHIFT                5
 #endif
 
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif
index 2e0a6b1d83000a15cf4695608e1733ed5f538dcc..a64d0f53aaebe1ca1da7bb438f83d9767e2250d6 100644 (file)
@@ -8,6 +8,6 @@
 
 /* No more #include "asm/arch/kmap_types.h" ! */
 
-#define KM_TYPE_NR 14
+#define KM_TYPE_NR 15
 
 #endif
index 71c5d132062aa32074b3bda4a30db2ca08b86c5b..4c7b9f13bb1fb44cb9717e9806406c3acbaa936c 100644 (file)
@@ -14,6 +14,9 @@
 #define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK      (~(PAGE_SIZE-1))
 
+#define ktla_ktva(addr)                        (addr)
+#define ktva_ktla(addr)                        (addr)
+
 #ifndef __ASSEMBLY__
 
 struct page;
index 0032f9212e74a2b8b4722427611a29aeb5cd62c7..cd151e0229c7c7c0d528ad8cb704c875b1927602 100644 (file)
@@ -58,6 +58,7 @@
 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
 #define pud_populate(mm, pud, pmd) \
        set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
 
 #ifdef CONFIG_64BIT
 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
index f17bca8ed2ce610873076aa768a5dc78a6e0c72a..48adb87fa6ece4f6bf924b547a975641fdf20e31 100644 (file)
@@ -356,22 +356,6 @@ int singlestepping(void * t)
        return 2;
 }
 
-/*
- * Only x86 and x86_64 have an arch_align_stack().
- * All other arches have "#define arch_align_stack(x) (x)"
- * in their asm/exec.h
- * As this is included in UML from asm-um/system-generic.h,
- * we can use it to behave as the subarch does.
- */
-#ifndef arch_align_stack
-unsigned long arch_align_stack(unsigned long sp)
-{
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() % 8192;
-       return sp & ~0xf;
-}
-#endif
-
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long stack_page, sp, ip;
index ad8f795d86ca103857b06c425fa69b4bb7c21df3..2c7eec6355b6ec807c182ced52eae5eaf4c603d5 100644 (file)
 #ifndef __UNICORE_CACHE_H__
 #define __UNICORE_CACHE_H__
 
-#define L1_CACHE_SHIFT         (5)
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#include <linux/const.h>
+
+#define L1_CACHE_SHIFT         5
+#define L1_CACHE_BYTES         (_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
index 0dc9d0144a27957d2bd2cdadf3b141a3195ccab0..98df10303cbfec907b129849fedd4c748b9611bf 100644 (file)
@@ -130,7 +130,7 @@ config X86
        select RTC_LIB
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
-       select HAVE_CC_STACKPROTECTOR
+       select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
        select GENERIC_CPU_AUTOPROBE
        select HAVE_ARCH_AUDITSYSCALL
        select ARCH_SUPPORTS_ATOMIC_RMW
@@ -263,7 +263,7 @@ config X86_HT
 
 config X86_32_LAZY_GS
        def_bool y
-       depends on X86_32 && !CC_STACKPROTECTOR
+       depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
 
 config ARCH_HWEIGHT_CFLAGS
        string
@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
 
 menuconfig HYPERVISOR_GUEST
        bool "Linux guest support"
+       depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
        ---help---
          Say Y here to enable options for running Linux under various hyper-
          visors. This option enables basic hypervisor detection and platform
@@ -978,6 +979,7 @@ config VM86
 
 config X86_16BIT
        bool "Enable support for 16-bit segments" if EXPERT
+       depends on !GRKERNSEC
        default y
        ---help---
          This option is required by programs like Wine to run 16-bit
@@ -1151,6 +1153,7 @@ choice
 
 config NOHIGHMEM
        bool "off"
+       depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
        ---help---
          Linux can use up to 64 Gigabytes of physical memory on x86 systems.
          However, the address space of 32-bit x86 processors is only 4
@@ -1187,6 +1190,7 @@ config NOHIGHMEM
 
 config HIGHMEM4G
        bool "4GB"
+       depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
        ---help---
          Select this if you have a 32-bit processor and between 1 and 4
          gigabytes of physical RAM.
@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
        hex
        default 0xB0000000 if VMSPLIT_3G_OPT
        default 0x80000000 if VMSPLIT_2G
-       default 0x78000000 if VMSPLIT_2G_OPT
+       default 0x70000000 if VMSPLIT_2G_OPT
        default 0x40000000 if VMSPLIT_1G
        default 0xC0000000
        depends on X86_32
@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
 
 config KEXEC
        bool "kexec system call"
+       depends on !GRKERNSEC_KMEM
        ---help---
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
 
 config PHYSICAL_ALIGN
        hex "Alignment value to which kernel should be aligned"
-       default "0x200000"
+       default "0x1000000"
+       range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
+       range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
        range 0x2000 0x1000000 if X86_32
        range 0x200000 0x1000000 if X86_64
        ---help---
@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
        def_bool n
        prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
        depends on X86_32 || IA32_EMULATION
+       depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
        ---help---
          Certain buggy versions of glibc will crash if they are
          presented with a 32-bit vDSO that is not mapped at the address
index 6983314c8b377cfdf8c4206d626766c7dd1fbcc2..54ad7e861a3f4fad95e3693b572483b11f079abd 100644 (file)
@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
 
 config X86_F00F_BUG
        def_bool y
-       depends on M586MMX || M586TSC || M586 || M486
+       depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
 
 config X86_INVD_BUG
        def_bool y
@@ -327,7 +327,7 @@ config X86_INVD_BUG
 
 config X86_ALIGNMENT_16
        def_bool y
-       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
 
 config X86_INTEL_USERCOPY
        def_bool y
@@ -369,7 +369,7 @@ config X86_CMPXCHG64
 # generates cmov.
 config X86_CMOV
        def_bool y
-       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+       depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
 
 config X86_MINIMUM_CPU_FAMILY
        int
index 61bd2ad94281884f13b70f3bb9e397fdcef9338a..50b625dd61e06c0d2e3366122233b30a22c570a4 100644 (file)
@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        default y
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && BROKEN
        ---help---
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
 
 config DEBUG_SET_MODULE_RONX
        bool "Set loadable kernel module data as NX and text as RO"
-       depends on MODULES
+       depends on MODULES && BROKEN
        ---help---
          This option helps catch unintended modifications to loadable
          kernel module's text and read-only data. It also prevents execution
index 920e6160c5353cda685d3e9d0e22ceacbdceded1..ac3d4dfd3d5828c5b227251a30a93ceb6fbe6d18 100644 (file)
@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include $(srctree)/arch/x86/Makefile_32.cpu
         KBUILD_CFLAGS += $(cflags-y)
-
-        # temporary until string.h is fixed
-        KBUILD_CFLAGS += -ffreestanding
 else
         BITS := 64
         UTS_MACHINE := x86_64
@@ -107,6 +104,9 @@ else
         KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
 endif
 
+# temporary until string.h is fixed
+KBUILD_CFLAGS += -ffreestanding
+
 # Make sure compiler does not have buggy stack-protector support.
 ifdef CONFIG_CC_STACKPROTECTOR
        cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
@@ -180,6 +180,7 @@ archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/syscalls all
 
 archprepare:
+       $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
 ifeq ($(CONFIG_KEXEC_FILE),y)
        $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
 endif
@@ -263,3 +264,9 @@ define archhelp
   echo  '                  FDARGS="..."  arguments for the booted kernel'
   echo  '                  FDINITRD=file initrd for the booted kernel'
 endef
+
+define OLD_LD
+
+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
+*** Please upgrade your binutils to 2.18 or newer
+endef
index 3db07f30636fe40c4cfea973abb08af8adc3c13c..9d81d0f3ecf52b921d339ec39bd5a4ebb6d42fe2 100644 (file)
@@ -56,6 +56,9 @@ clean-files += cpustr.h
 # ---------------------------------------------------------------------------
 
 KBUILD_CFLAGS  := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS  += -fplugin-arg-constify_plugin-no-constify
+endif
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 
index 878e4b9940d9212ce581c5bea0ac518ae6bbf85f..20537abb34141442d8e3cdf755c0bbcb716adc35 100644 (file)
@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
        u8 v;
        const u32 *p = (const u32 *)addr;
 
-       asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+       asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
        return v;
 }
 
@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
 
 static inline void set_bit(int nr, void *addr)
 {
-       asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+       asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
 }
 
 #endif /* BOOT_BITOPS_H */
index bd49ec61255c7f9d65a18153dfc50e0ad8ed5be1..94c7f58131f7aac2e705cd39e21ee59417e226d3 100644 (file)
@@ -84,7 +84,7 @@ static inline void io_delay(void)
 static inline u16 ds(void)
 {
        u16 seg;
-       asm("movw %%ds,%0" : "=rm" (seg));
+       asm volatile("movw %%ds,%0" : "=rm" (seg));
        return seg;
 }
 
index 8bd44e8ee6e2a52b20b3cc254e394d19b8cbd451..6b111e95805076ff8ac5100ecb6a12512f169252 100644 (file)
@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
 KBUILD_CFLAGS += -mno-mmx -mno-sse
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
+endif
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
index a53440e81d5267a03830b2c86331e6d5686ff890..c3dbf1ed7c94cf4dd0499300344daa2f8cafdef1 100644 (file)
@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
         * parameter 2, ..., param n. To make things easy, we save the return
         * address of efi_call_phys in a global variable.
         */
-       popl    %ecx
-       movl    %ecx, saved_return_addr(%edx)
-       /* get the function pointer into ECX*/
-       popl    %ecx
-       movl    %ecx, efi_rt_function_ptr(%edx)
+       popl    saved_return_addr(%edx)
+       popl    efi_rt_function_ptr(%edx)
 
        /*
         * 3. Call the physical function.
         */
-       call    *%ecx
+       call    *efi_rt_function_ptr(%edx)
 
        /*
         * 4. Balance the stack. And because EAX contain the return value,
@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
 1:     popl    %edx
        subl    $1b, %edx
 
-       movl    efi_rt_function_ptr(%edx), %ecx
-       pushl   %ecx
+       pushl   efi_rt_function_ptr(%edx)
 
        /*
         * 10. Push the saved return address onto the stack and return.
         */
-       movl    saved_return_addr(%edx), %ecx
-       pushl   %ecx
-       ret
+       jmpl    *saved_return_addr(%edx)
 ENDPROC(efi_call_phys)
 .previous
 
index 630384a4c14a96ead4e97079349cbb33a29b1e26..278e7884da07c31c6fd28955ebde28c3a72d70d7 100644 (file)
@@ -189,8 +189,8 @@ efi_gdt64:
        .long   0                       /* Filled out by user */
        .word   0
        .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x00af9b000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
        .quad   0x0080890000000000      /* TS descriptor */
        .quad   0x0000000000000000      /* TS continued */
 efi_gdt64_end:
index 1d7fbbcc196d6f8b661545130972453f109e27d7..36ecd584f2188a44881f89292e5908a8e7af8014 100644 (file)
@@ -140,10 +140,10 @@ preferred_addr:
        addl    %eax, %ebx
        notl    %eax
        andl    %eax, %ebx
-       cmpl    $LOAD_PHYSICAL_ADDR, %ebx
+       cmpl    $____LOAD_PHYSICAL_ADDR, %ebx
        jge     1f
 #endif
-       movl    $LOAD_PHYSICAL_ADDR, %ebx
+       movl    $____LOAD_PHYSICAL_ADDR, %ebx
 1:
 
        /* Target address to relocate to for decompression */
index 6b1766c6c08205f3bda8a527dff88097b48e77e3..ad465c93715517b442c2b38c2082300a042f6138 100644 (file)
@@ -94,10 +94,10 @@ ENTRY(startup_32)
        addl    %eax, %ebx
        notl    %eax
        andl    %eax, %ebx
-       cmpl    $LOAD_PHYSICAL_ADDR, %ebx
+       cmpl    $____LOAD_PHYSICAL_ADDR, %ebx
        jge     1f
 #endif
-       movl    $LOAD_PHYSICAL_ADDR, %ebx
+       movl    $____LOAD_PHYSICAL_ADDR, %ebx
 1:
 
        /* Target address to relocate to for decompression */
@@ -322,10 +322,10 @@ preferred_addr:
        addq    %rax, %rbp
        notq    %rax
        andq    %rax, %rbp
-       cmpq    $LOAD_PHYSICAL_ADDR, %rbp
+       cmpq    $____LOAD_PHYSICAL_ADDR, %rbp
        jge     1f
 #endif
-       movq    $LOAD_PHYSICAL_ADDR, %rbp
+       movq    $____LOAD_PHYSICAL_ADDR, %rbp
 1:
 
        /* Target address to relocate to for decompression */
@@ -434,8 +434,8 @@ gdt:
        .long   gdt
        .word   0
        .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x00af9b000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
        .quad   0x0080890000000000      /* TS descriptor */
        .quad   0x0000000000000000      /* TS continued */
 gdt_end:
index a950864a64dab3d558197c77bef3c56a07961494..c710239844e5c4c4dbf7ab613017fefef0ed6c50 100644 (file)
@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
         * Calculate the delta between where vmlinux was linked to load
         * and where it was actually loaded.
         */
-       delta = min_addr - LOAD_PHYSICAL_ADDR;
+       delta = min_addr - ____LOAD_PHYSICAL_ADDR;
        if (!delta) {
                debug_putstr("No relocation needed... ");
                return;
@@ -324,7 +324,7 @@ static void parse_elf(void *output)
        Elf32_Ehdr ehdr;
        Elf32_Phdr *phdrs, *phdr;
 #endif
-       void *dest;
+       void *dest, *prev;
        int i;
 
        memcpy(&ehdr, output, sizeof(ehdr));
@@ -351,13 +351,16 @@ static void parse_elf(void *output)
                case PT_LOAD:
 #ifdef CONFIG_RELOCATABLE
                        dest = output;
-                       dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
+                       dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
 #else
                        dest = (void *)(phdr->p_paddr);
 #endif
                        memcpy(dest,
                               output + phdr->p_offset,
                               phdr->p_filesz);
+                       if (i)
+                               memset(prev, 0xff, dest - prev);
+                       prev = dest + phdr->p_filesz;
                        break;
                default: /* Ignore other PT_* */ break;
                }
@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                error("Destination address too large");
 #endif
 #ifndef CONFIG_RELOCATABLE
-       if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
+       if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
                error("Wrong destination address");
 #endif
 
index 1fd7d575092e9e51834b7b4750c48f5b567e1bce..0f7d0966329a2e4507298505b8c6a17f8adcbc47 100644 (file)
@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
                u32 ecx = MSR_K7_HWCR;
                u32 eax, edx;
 
-               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
                eax &= ~(1 << 15);
-               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
                get_cpuflags(); /* Make sure it really did something */
                err = check_cpuflags();
@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
                u32 ecx = MSR_VIA_FCR;
                u32 eax, edx;
 
-               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
                eax |= (1<<1)|(1<<7);
-               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
                set_bit(X86_FEATURE_CX8, cpu.flags);
                err = check_cpuflags();
@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
                u32 eax, edx;
                u32 level = 1;
 
-               asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-               asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-               asm("cpuid"
+               asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+               asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+               asm volatile("cpuid"
                    : "+a" (level), "=d" (cpu.flags[0])
                    : : "ecx", "ebx");
-               asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+               asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
                err = check_cpuflags();
        } else if (err == 0x01 &&
index 16ef02596db2daf1fa8eadd9c17fd994ec3c21b3..91e033bbbf0e84692f51946533e83638ffb0dc99 100644 (file)
@@ -438,10 +438,14 @@ setup_data:               .quad 0                 # 64-bit physical pointer to
                                                # single linked list of
                                                # struct setup_data
 
-pref_address:          .quad LOAD_PHYSICAL_ADDR        # preferred load addr
+pref_address:          .quad ____LOAD_PHYSICAL_ADDR    # preferred load addr
 
 #define ZO_INIT_SIZE   (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define VO_INIT_SIZE   (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
+#else
 #define VO_INIT_SIZE   (VO__end - VO__text)
+#endif
 #if ZO_INIT_SIZE > VO_INIT_SIZE
 #define INIT_SIZE ZO_INIT_SIZE
 #else
index db75d07c3645c0375090596ae29d1085f0f8a38a..8e6d0afade9d3d0628c1fedab4c743755ebf0cd8 100644 (file)
@@ -19,7 +19,7 @@
 
 static int detect_memory_e820(void)
 {
-       int count = 0;
+       unsigned int count = 0;
        struct biosregs ireg, oreg;
        struct e820entry *desc = boot_params.e820_map;
        static struct e820entry buf; /* static so it is zeroed */
index ba3e100654db0239622a3f23f5d9d64855ebffd0..6501b8fa6c7f2c00954ce0919387ad8d67d6bee8 100644 (file)
@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
 
        boot_params.screen_info.vesapm_seg = oreg.es;
        boot_params.screen_info.vesapm_off = oreg.di;
+       boot_params.screen_info.vesapm_size = oreg.cx;
 }
 
 /*
index 43eda284d27fe96c2a4d407273f4b8d61bae87e2..5ab5fdb72416fd15d5b87809dc4663b8a9d71d3a 100644 (file)
@@ -96,7 +96,7 @@ static void store_mode_params(void)
 static unsigned int get_entry(void)
 {
        char entry_buf[4];
-       int i, len = 0;
+       unsigned int i, len = 0;
        int key;
        unsigned int v;
 
index 91056554716355c82aaa51407b62eeca2490bf3c..41779c152ac48d4519f22ded37938f8dde90d96f 100644 (file)
@@ -8,6 +8,8 @@
  * including this sentence is retained in full.
  */
 
+#include <asm/alternative-asm.h>
+
 .extern crypto_ft_tab
 .extern crypto_it_tab
 .extern crypto_fl_tab
@@ -70,6 +72,8 @@
        je      B192;                   \
        leaq    32(r9),r9;
 
+#define ret    pax_force_retaddr; ret
+
 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
        movq    r1,r2;                  \
        movq    r3,r4;                  \
index 477e9d75149b8c62a0c971e37108b6b03ffa7668..c92c7d80168ad5f235c04eda3a4e4649f777277a 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <linux/linkage.h>
 #include <asm/inst.h>
+#include <asm/alternative-asm.h>
 
 #ifdef __x86_64__
 .data
@@ -205,7 +206,7 @@ enc:        .octa 0x2
 * num_initial_blocks = b mod 4
 * encrypt the initial num_initial_blocks blocks and apply ghash on
 * the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
 * are clobbered
 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
 */
@@ -214,8 +215,8 @@ enc:        .octa 0x2
 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
        mov        arg7, %r10           # %r10 = AAD
-       mov        arg8, %r12           # %r12 = aadLen
-       mov        %r12, %r11
+       mov        arg8, %r15           # %r15 = aadLen
+       mov        %r15, %r11
        pxor       %xmm\i, %xmm\i
 _get_AAD_loop\num_initial_blocks\operation:
        movd       (%r10), \TMP1
@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
        psrldq     $4, %xmm\i
        pxor       \TMP1, %xmm\i
        add        $4, %r10
-       sub        $4, %r12
+       sub        $4, %r15
        jne        _get_AAD_loop\num_initial_blocks\operation
        cmp        $16, %r11
        je         _get_AAD_loop2_done\num_initial_blocks\operation
-       mov        $16, %r12
+       mov        $16, %r15
 _get_AAD_loop2\num_initial_blocks\operation:
        psrldq     $4, %xmm\i
-       sub        $4, %r12
-       cmp        %r11, %r12
+       sub        $4, %r15
+       cmp        %r11, %r15
        jne        _get_AAD_loop2\num_initial_blocks\operation
 _get_AAD_loop2_done\num_initial_blocks\operation:
         movdqa     SHUF_MASK(%rip), %xmm14
@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
 * num_initial_blocks = b mod 4
 * encrypt the initial num_initial_blocks blocks and apply ghash on
 * the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
 * are clobbered
 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
 */
@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
        mov        arg7, %r10           # %r10 = AAD
-       mov        arg8, %r12           # %r12 = aadLen
-       mov        %r12, %r11
+       mov        arg8, %r15           # %r15 = aadLen
+       mov        %r15, %r11
        pxor       %xmm\i, %xmm\i
 _get_AAD_loop\num_initial_blocks\operation:
        movd       (%r10), \TMP1
@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
        psrldq     $4, %xmm\i
        pxor       \TMP1, %xmm\i
        add        $4, %r10
-       sub        $4, %r12
+       sub        $4, %r15
        jne        _get_AAD_loop\num_initial_blocks\operation
        cmp        $16, %r11
        je         _get_AAD_loop2_done\num_initial_blocks\operation
-       mov        $16, %r12
+       mov        $16, %r15
 _get_AAD_loop2\num_initial_blocks\operation:
        psrldq     $4, %xmm\i
-       sub        $4, %r12
-       cmp        %r11, %r12
+       sub        $4, %r15
+       cmp        %r11, %r15
        jne        _get_AAD_loop2\num_initial_blocks\operation
 _get_AAD_loop2_done\num_initial_blocks\operation:
         movdqa     SHUF_MASK(%rip), %xmm14
@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
 *
 *****************************************************************************/
 ENTRY(aesni_gcm_dec)
-       push    %r12
+       push    %r15
        push    %r13
        push    %r14
        mov     %rsp, %r14
@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
 */
        sub     $VARIABLE_OFFSET, %rsp
        and     $~63, %rsp                        # align rsp to 64 bytes
-       mov     %arg6, %r12
-       movdqu  (%r12), %xmm13                    # %xmm13 = HashKey
+       mov     %arg6, %r15
+       movdqu  (%r15), %xmm13                    # %xmm13 = HashKey
         movdqa  SHUF_MASK(%rip), %xmm2
        PSHUFB_XMM %xmm2, %xmm13
 
@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
        movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
        mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
        and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
-       mov %r13, %r12
-       and $(3<<4), %r12
+       mov %r13, %r15
+       and $(3<<4), %r15
        jz _initial_num_blocks_is_0_decrypt
-       cmp $(2<<4), %r12
+       cmp $(2<<4), %r15
        jb _initial_num_blocks_is_1_decrypt
        je _initial_num_blocks_is_2_decrypt
 _initial_num_blocks_is_3_decrypt:
@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
        sub $16, %r11
        add %r13, %r11
        movdqu (%arg3,%r11,1), %xmm1   # receive the last <16 byte block
-       lea SHIFT_MASK+16(%rip), %r12
-       sub %r13, %r12
+       lea SHIFT_MASK+16(%rip), %r15
+       sub %r13, %r15
 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
 # (%r13 is the number of bytes in plaintext mod 16)
-       movdqu (%r12), %xmm2           # get the appropriate shuffle mask
+       movdqu (%r15), %xmm2           # get the appropriate shuffle mask
        PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
 
        movdqa  %xmm1, %xmm2
        pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
-       movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+       movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
        # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
        pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
        pand    %xmm1, %xmm2
@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
        sub     $1, %r13
        jne     _less_than_8_bytes_left_decrypt
 _multiple_of_16_bytes_decrypt:
-       mov     arg8, %r12                # %r13 = aadLen (number of bytes)
-       shl     $3, %r12                  # convert into number of bits
-       movd    %r12d, %xmm15             # len(A) in %xmm15
+       mov     arg8, %r15                # %r13 = aadLen (number of bytes)
+       shl     $3, %r15                  # convert into number of bits
+       movd    %r15d, %xmm15             # len(A) in %xmm15
        shl     $3, %arg4                 # len(C) in bits (*128)
        MOVQ_R64_XMM    %arg4, %xmm1
        pslldq  $8, %xmm15                # %xmm15 = len(A)||0x0000000000000000
@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
        mov     %r14, %rsp
        pop     %r14
        pop     %r13
-       pop     %r12
+       pop     %r15
+       pax_force_retaddr
        ret
 ENDPROC(aesni_gcm_dec)
 
@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
 * poly = x^128 + x^127 + x^126 + x^121 + 1
 ***************************************************************************/
 ENTRY(aesni_gcm_enc)
-       push    %r12
+       push    %r15
        push    %r13
        push    %r14
        mov     %rsp, %r14
@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
 #
        sub     $VARIABLE_OFFSET, %rsp
        and     $~63, %rsp
-       mov     %arg6, %r12
-       movdqu  (%r12), %xmm13
+       mov     %arg6, %r15
+       movdqu  (%r15), %xmm13
         movdqa  SHUF_MASK(%rip), %xmm2
        PSHUFB_XMM %xmm2, %xmm13
 
@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
        movdqa  %xmm13, HashKey(%rsp)
        mov     %arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
        and     $-16, %r13
-       mov     %r13, %r12
+       mov     %r13, %r15
 
         # Encrypt first few blocks
 
-       and     $(3<<4), %r12
+       and     $(3<<4), %r15
        jz      _initial_num_blocks_is_0_encrypt
-       cmp     $(2<<4), %r12
+       cmp     $(2<<4), %r15
        jb      _initial_num_blocks_is_1_encrypt
        je      _initial_num_blocks_is_2_encrypt
 _initial_num_blocks_is_3_encrypt:
@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
        sub $16, %r11
        add %r13, %r11
        movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
-       lea SHIFT_MASK+16(%rip), %r12
-       sub %r13, %r12
+       lea SHIFT_MASK+16(%rip), %r15
+       sub %r13, %r15
        # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
        # (%r13 is the number of bytes in plaintext mod 16)
-       movdqu  (%r12), %xmm2           # get the appropriate shuffle mask
+       movdqu  (%r15), %xmm2           # get the appropriate shuffle mask
        PSHUFB_XMM      %xmm2, %xmm1            # shift right 16-r13 byte
        pxor    %xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
-       movdqu  ALL_F-SHIFT_MASK(%r12), %xmm1
+       movdqu  ALL_F-SHIFT_MASK(%r15), %xmm1
        # get the appropriate mask to mask out top 16-r13 bytes of xmm0
        pand    %xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
         movdqa SHUF_MASK(%rip), %xmm10
@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
        sub $1, %r13
        jne _less_than_8_bytes_left_encrypt
 _multiple_of_16_bytes_encrypt:
-       mov     arg8, %r12    # %r12 = addLen (number of bytes)
-       shl     $3, %r12
-       movd    %r12d, %xmm15       # len(A) in %xmm15
+       mov     arg8, %r15    # %r15 = addLen (number of bytes)
+       shl     $3, %r15
+       movd    %r15d, %xmm15       # len(A) in %xmm15
        shl     $3, %arg4               # len(C) in bits (*128)
        MOVQ_R64_XMM    %arg4, %xmm1
        pslldq  $8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
        mov     %r14, %rsp
        pop     %r14
        pop     %r13
-       pop     %r12
+       pop     %r15
+       pax_force_retaddr
        ret
 ENDPROC(aesni_gcm_enc)
 
@@ -1722,6 +1725,7 @@ _key_expansion_256a:
        pxor %xmm1, %xmm0
        movaps %xmm0, (TKEYP)
        add $0x10, TKEYP
+       pax_force_retaddr
        ret
 ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
@@ -1748,6 +1752,7 @@ _key_expansion_192a:
        shufps $0b01001110, %xmm2, %xmm1
        movaps %xmm1, 0x10(TKEYP)
        add $0x20, TKEYP
+       pax_force_retaddr
        ret
 ENDPROC(_key_expansion_192a)
 
@@ -1768,6 +1773,7 @@ _key_expansion_192b:
 
        movaps %xmm0, (TKEYP)
        add $0x10, TKEYP
+       pax_force_retaddr
        ret
 ENDPROC(_key_expansion_192b)
 
@@ -1781,6 +1787,7 @@ _key_expansion_256b:
        pxor %xmm1, %xmm2
        movaps %xmm2, (TKEYP)
        add $0x10, TKEYP
+       pax_force_retaddr
        ret
 ENDPROC(_key_expansion_256b)
 
@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
 #ifndef __x86_64__
        popl KEYP
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_set_key)
 
@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
        popl KLEN
        popl KEYP
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_enc)
 
@@ -1974,6 +1983,7 @@ _aesni_enc1:
        AESENC KEY STATE
        movaps 0x70(TKEYP), KEY
        AESENCLAST KEY STATE
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_enc1)
 
@@ -2083,6 +2093,7 @@ _aesni_enc4:
        AESENCLAST KEY STATE2
        AESENCLAST KEY STATE3
        AESENCLAST KEY STATE4
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_enc4)
 
@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
        popl KLEN
        popl KEYP
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_dec)
 
@@ -2164,6 +2176,7 @@ _aesni_dec1:
        AESDEC KEY STATE
        movaps 0x70(TKEYP), KEY
        AESDECLAST KEY STATE
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_dec1)
 
@@ -2273,6 +2286,7 @@ _aesni_dec4:
        AESDECLAST KEY STATE2
        AESDECLAST KEY STATE3
        AESDECLAST KEY STATE4
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_dec4)
 
@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
        popl KEYP
        popl LEN
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_ecb_enc)
 
@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
        popl KEYP
        popl LEN
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_ecb_dec)
 
@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
        popl LEN
        popl IVP
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_cbc_enc)
 
@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
        popl LEN
        popl IVP
 #endif
+       pax_force_retaddr
        ret
 ENDPROC(aesni_cbc_dec)
 
@@ -2550,6 +2568,7 @@ _aesni_inc_init:
        mov $1, TCTR_LOW
        MOVQ_R64_XMM TCTR_LOW INC
        MOVQ_R64_XMM CTR TCTR_LOW
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_inc_init)
 
@@ -2579,6 +2598,7 @@ _aesni_inc:
 .Linc_low:
        movaps CTR, IV
        PSHUFB_XMM BSWAP_MASK IV
+       pax_force_retaddr
        ret
 ENDPROC(_aesni_inc)
 
@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
 .Lctr_enc_ret:
        movups IV, (IVP)
 .Lctr_enc_just_ret:
+       pax_force_retaddr
        ret
 ENDPROC(aesni_ctr_enc)
 
@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
        pxor INC, STATE4
        movdqu STATE4, 0x70(OUTP)
 
+       pax_force_retaddr
        ret
 ENDPROC(aesni_xts_crypt8)
 
index 246c67006ed06ad84516b3a56ceac06e05734a1c..466e2d61e1be09ee59e73b437af3bed7bc0de249 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "blowfish-x86_64-asm.S"
 .text
@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
        jnz .L__enc_xor;
 
        write_block();
+       pax_force_retaddr
        ret;
 .L__enc_xor:
        xor_block();
+       pax_force_retaddr
        ret;
 ENDPROC(__blowfish_enc_blk)
 
@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
 
        movq %r11, %rbp;
 
+       pax_force_retaddr
        ret;
 ENDPROC(blowfish_dec_blk)
 
@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
 
        popq %rbx;
        popq %rbp;
+       pax_force_retaddr
        ret;
 
 .L__enc_xor4:
@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
 
        popq %rbx;
        popq %rbp;
+       pax_force_retaddr
        ret;
 ENDPROC(__blowfish_enc_blk_4way)
 
@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
        popq %rbx;
        popq %rbp;
 
+       pax_force_retaddr
        ret;
 ENDPROC(blowfish_dec_blk_4way)
index ce71f9212409f16326ffc92efec812c2be7d731c..1dce7ecbad250daf019e6988c4dac1e40eea6a0e 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
        roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
                  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
                  %rcx, (%r9));
+       pax_force_retaddr
        ret;
 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
        roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
                  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
                  %rax, (%r9));
+       pax_force_retaddr
        ret;
 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
@@ -780,6 +783,7 @@ __camellia_enc_blk16:
                    %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
                    %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
 
+       pax_force_retaddr
        ret;
 
 .align 8
@@ -865,6 +869,7 @@ __camellia_dec_blk16:
                    %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
                    %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
 
+       pax_force_retaddr
        ret;
 
 .align 8
@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
                     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
                     %xmm8, %rsi);
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ecb_enc_16way)
 
@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
                     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
                     %xmm8, %rsi);
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ecb_dec_16way)
 
@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
                     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
                     %xmm8, %rsi);
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_cbc_dec_16way)
 
@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
                     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
                     %xmm8, %rsi);
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ctr_16way)
 
@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
                     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
                     %xmm8, %rsi);
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_xts_crypt_16way)
 
index 0e0b8863a34bd168c618941407b2890c385a442d..5a3123c98f12891b6cc4af6fafd148c944778401 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
        roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
                  %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
                  %rcx, (%r9));
+       pax_force_retaddr
        ret;
 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
        roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
                  %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
                  %rax, (%r9));
+       pax_force_retaddr
        ret;
 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
@@ -820,6 +823,7 @@ __camellia_enc_blk32:
                    %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
                    %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
 
+       pax_force_retaddr
        ret;
 
 .align 8
@@ -905,6 +909,7 @@ __camellia_dec_blk32:
                    %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
                    %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
 
+       pax_force_retaddr
        ret;
 
 .align 8
@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ecb_enc_32way)
 
@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ecb_dec_32way)
 
@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_cbc_dec_32way)
 
@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_ctr_32way)
 
@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_xts_crypt_32way)
 
index 310319c601ede2884b2d031ca4603593f304e4b3..db3d7b5ec8e81353c25d01ab00e21bbf0930967a 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "camellia-x86_64-asm_64.S"
 .text
@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
        enc_outunpack(mov, RT1);
 
        movq RRBP, %rbp;
+       pax_force_retaddr
        ret;
 
 .L__enc_xor:
        enc_outunpack(xor, RT1);
 
        movq RRBP, %rbp;
+       pax_force_retaddr
        ret;
 ENDPROC(__camellia_enc_blk)
 
@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
        dec_outunpack();
 
        movq RRBP, %rbp;
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_dec_blk)
 
@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
 
        movq RRBP, %rbp;
        popq %rbx;
+       pax_force_retaddr
        ret;
 
 .L__enc2_xor:
@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
 
        movq RRBP, %rbp;
        popq %rbx;
+       pax_force_retaddr
        ret;
 ENDPROC(__camellia_enc_blk_2way)
 
@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
 
        movq RRBP, %rbp;
        movq RXOR, %rbx;
+       pax_force_retaddr
        ret;
 ENDPROC(camellia_dec_blk_2way)
index c35fd5d6ecd26f4b01180484c520c19a10637389..2d8c7db8ec15b43326e54650be7b25302ca48c83 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "cast5-avx-x86_64-asm_64.S"
 
@@ -281,6 +282,7 @@ __cast5_enc_blk16:
        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__cast5_enc_blk16)
 
@@ -352,6 +354,7 @@ __cast5_dec_blk16:
        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
+       pax_force_retaddr
        ret;
 
 .L__skip_dec:
@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
        vmovdqu RR4, (6*4*4)(%r11);
        vmovdqu RL4, (7*4*4)(%r11);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast5_ecb_enc_16way)
 
@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
        vmovdqu RR4, (6*4*4)(%r11);
        vmovdqu RL4, (7*4*4)(%r11);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast5_ecb_dec_16way)
 
@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
         *      %rdx: src
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        vmovdqu (0*16)(%rdx), RL1;
        vmovdqu (1*16)(%rdx), RR1;
@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
        call __cast5_dec_blk16;
 
        /* xor with src */
-       vmovq (%r12), RX;
+       vmovq (%r14), RX;
        vpshufd $0x4f, RX, RX;
        vpxor RX, RR1, RR1;
-       vpxor 0*16+8(%r12), RL1, RL1;
-       vpxor 1*16+8(%r12), RR2, RR2;
-       vpxor 2*16+8(%r12), RL2, RL2;
-       vpxor 3*16+8(%r12), RR3, RR3;
-       vpxor 4*16+8(%r12), RL3, RL3;
-       vpxor 5*16+8(%r12), RR4, RR4;
-       vpxor 6*16+8(%r12), RL4, RL4;
+       vpxor 0*16+8(%r14), RL1, RL1;
+       vpxor 1*16+8(%r14), RR2, RR2;
+       vpxor 2*16+8(%r14), RL2, RL2;
+       vpxor 3*16+8(%r14), RR3, RR3;
+       vpxor 4*16+8(%r14), RL3, RL3;
+       vpxor 5*16+8(%r14), RR4, RR4;
+       vpxor 6*16+8(%r14), RL4, RL4;
 
        vmovdqu RR1, (0*16)(%r11);
        vmovdqu RL1, (1*16)(%r11);
@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
        vmovdqu RR4, (6*16)(%r11);
        vmovdqu RL4, (7*16)(%r11);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast5_cbc_dec_16way)
 
@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
         *      %rcx: iv (big endian, 64bit)
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        vpcmpeqd RTMP, RTMP, RTMP;
        vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
        call __cast5_enc_blk16;
 
        /* dst = src ^ iv */
-       vpxor (0*16)(%r12), RR1, RR1;
-       vpxor (1*16)(%r12), RL1, RL1;
-       vpxor (2*16)(%r12), RR2, RR2;
-       vpxor (3*16)(%r12), RL2, RL2;
-       vpxor (4*16)(%r12), RR3, RR3;
-       vpxor (5*16)(%r12), RL3, RL3;
-       vpxor (6*16)(%r12), RR4, RR4;
-       vpxor (7*16)(%r12), RL4, RL4;
+       vpxor (0*16)(%r14), RR1, RR1;
+       vpxor (1*16)(%r14), RL1, RL1;
+       vpxor (2*16)(%r14), RR2, RR2;
+       vpxor (3*16)(%r14), RL2, RL2;
+       vpxor (4*16)(%r14), RR3, RR3;
+       vpxor (5*16)(%r14), RL3, RL3;
+       vpxor (6*16)(%r14), RR4, RR4;
+       vpxor (7*16)(%r14), RL4, RL4;
        vmovdqu RR1, (0*16)(%r11);
        vmovdqu RL1, (1*16)(%r11);
        vmovdqu RR2, (2*16)(%r11);
@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
        vmovdqu RR4, (6*16)(%r11);
        vmovdqu RL4, (7*16)(%r11);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast5_ctr_16way)
index e3531f833951bbc2535d1f6c51fe58d428c3b637..e123f35b9a9ccefaa9f55654bb6dd335e18c75b2 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "cast6-avx-x86_64-asm_64.S"
@@ -295,6 +296,7 @@ __cast6_enc_blk8:
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__cast6_enc_blk8)
 
@@ -340,6 +342,7 @@ __cast6_dec_blk8:
        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__cast6_dec_blk8)
 
@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
 
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_ecb_enc_8way)
 
@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
 
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_ecb_dec_8way)
 
@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
         *      %rdx: src
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
        call __cast6_dec_blk8;
 
-       store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_cbc_dec_8way)
 
@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
         *      %rcx: iv (little endian, 128bit)
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
                      RD2, RX, RKR, RKM);
 
        call __cast6_enc_blk8;
 
-       store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_ctr_8way)
 
@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_xts_enc_8way)
 
@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(cast6_xts_dec_8way)
index 26d49ebae0404ee74fea7a5e7f6bd7d22b9bad80..c0a8c845fa73c808364d3081c7e1c2a5833c1a30 100644 (file)
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -309,6 +310,7 @@ do_return:
        popq    %rsi
        popq    %rdi
        popq    %rbx
+       pax_force_retaddr
         ret
 
         ################################################################
index 5d1e0075ac24fa5eae975930647f6fc14c5b256d..098cb4f3a82f75d68fb4c39fdac0fe275f575642 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/linkage.h>
 #include <asm/inst.h>
+#include <asm/alternative-asm.h>
 
 .data
 
@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
        psrlq $1, T2
        pxor T2, T1
        pxor T1, DATA
+       pax_force_retaddr
        ret
 ENDPROC(__clmul_gf128mul_ble)
 
@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
        call __clmul_gf128mul_ble
        PSHUFB_XMM BSWAP DATA
        movups DATA, (%rdi)
+       pax_force_retaddr
        ret
 ENDPROC(clmul_ghash_mul)
 
@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
        PSHUFB_XMM BSWAP DATA
        movups DATA, (%rdi)
 .Lupdate_just_ret:
+       pax_force_retaddr
        ret
 ENDPROC(clmul_ghash_update)
index 9279e0b2d60ec85fc89f1c23cb7287f64531b4a0..c4b3d2c41bb2d2347c499f24e4d5b9a0d59b294e 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 # enter salsa20_encrypt_bytes
 ENTRY(salsa20_encrypt_bytes)
@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
        add     %r11,%rsp
        mov     %rdi,%rax
        mov     %rsi,%rdx
+       pax_force_retaddr
        ret
 #   bytesatleast65:
 ._bytesatleast65:
@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
        add     %r11,%rsp
        mov     %rdi,%rax
        mov     %rsi,%rdx
+       pax_force_retaddr
        ret
 ENDPROC(salsa20_keysetup)
 
@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
        add     %r11,%rsp
        mov     %rdi,%rax
        mov     %rsi,%rdx
+       pax_force_retaddr
        ret
 ENDPROC(salsa20_ivsetup)
index 2f202f49872b2f6a6ed4ed5a6c6d01a8f0958660..d9164d6e913528c104c330b113cb5b03c6611b03 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "serpent-avx-x86_64-asm_64.S"
@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
        write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__serpent_enc_blk8_avx)
 
@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
        write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__serpent_dec_blk8_avx)
 
@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
 
        store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ecb_enc_8way_avx)
 
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
 
        store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ecb_dec_8way_avx)
 
@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
 
        store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_cbc_dec_8way_avx)
 
@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
 
        store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ctr_8way_avx)
 
@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_xts_enc_8way_avx)
 
@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_xts_dec_8way_avx)
index b222085cccac756efc94be3e6780a67c9dd61779..abd483cc0051fc62e87a124aa8724f4a8067b95d 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx2.S"
 
 .file "serpent-avx2-asm_64.S"
@@ -610,6 +611,7 @@ __serpent_enc_blk16:
        write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__serpent_enc_blk16)
 
@@ -664,6 +666,7 @@ __serpent_dec_blk16:
        write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__serpent_dec_blk16)
 
@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ecb_enc_16way)
 
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ecb_dec_16way)
 
@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_cbc_dec_16way)
 
@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_ctr_16way)
 
@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_xts_enc_16way)
 
@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
 
        vzeroupper;
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_xts_dec_16way)
index acc066c7c6b21c0c09f3391b36200b6fc35e79d4..1559cc4ff71c6ea32af41b53b9c477976fdec1c4 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "serpent-sse2-x86_64-asm_64.S"
 .text
@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
        write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 
 .L__enc_xor8:
        xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
        xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__serpent_enc_blk_8way)
 
@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
        write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
        write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(serpent_dec_blk_8way)
index a4109506a5e8884ba54562aed09bc36e5e800d12..9dfe7adaa5914e15c6ae2d344dfafd7ee341b174 100644 (file)
@@ -29,6 +29,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CTX    %rdi    // arg1
 #define BUF    %rsi    // arg2
@@ -75,9 +76,9 @@
 
        push    %rbx
        push    %rbp
-       push    %r12
+       push    %r14
 
-       mov     %rsp, %r12
+       mov     %rsp, %r14
        sub     $64, %rsp               # allocate workspace
        and     $~15, %rsp              # align stack
 
        xor     %rax, %rax
        rep stosq
 
-       mov     %r12, %rsp              # deallocate workspace
+       mov     %r14, %rsp              # deallocate workspace
 
-       pop     %r12
+       pop     %r14
        pop     %rbp
        pop     %rbx
+       pax_force_retaddr
        ret
 
        ENDPROC(\name)
index 642f15687a0ac4205f59850cacab030768eaf6d4..51a513c9fcae998c28d956aa9b4bd0295e4c20a1 100644 (file)
@@ -49,6 +49,7 @@
 
 #ifdef CONFIG_AS_AVX
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define    VMOVDQ vmovdqu
@@ -460,6 +461,7 @@ done_hash:
        popq    %r13
        popq    %rbp
        popq    %rbx
+       pax_force_retaddr
        ret
 ENDPROC(sha256_transform_avx)
 
index 9e86944c539dc8e41f6b125d941f453a5a1c1c2e..3795e6a2013fb644e5bce464af4b29611c9307bf 100644 (file)
@@ -50,6 +50,7 @@
 
 #ifdef CONFIG_AS_AVX2
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define        VMOVDQ vmovdqu
@@ -720,6 +721,7 @@ done_hash:
        popq    %r12
        popq    %rbp
        popq    %rbx
+       pax_force_retaddr
        ret
 ENDPROC(sha256_transform_rorx)
 
index f833b74d902ba87919184ba7828b226ee067c096..8c62a9e55a2df47f9730cf0238d8b7365392bee1 100644 (file)
@@ -47,6 +47,7 @@
 ########################################################################
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define    MOVDQ movdqu
@@ -471,6 +472,7 @@ done_hash:
        popq    %rbp
        popq    %rbx
 
+       pax_force_retaddr
        ret
 ENDPROC(sha256_transform_ssse3)
 
index 974dde9bc6cd220960b9d053a82e81c78596449e..a823ff99c1276cb8a2d9b9d419d5b056968b5ca3 100644 (file)
@@ -49,6 +49,7 @@
 
 #ifdef CONFIG_AS_AVX
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -364,6 +365,7 @@ updateblock:
        mov     frame_RSPSAVE(%rsp), %rsp
 
 nowork:
+       pax_force_retaddr
        ret
 ENDPROC(sha512_transform_avx)
 
index 568b96105f5cdb20d625c15564ff81d2df992301..ed20c37bebe680d2c05e1d7888187b9b349b390b 100644 (file)
@@ -51,6 +51,7 @@
 
 #ifdef CONFIG_AS_AVX2
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -678,6 +679,7 @@ done_hash:
 
        # Restore Stack Pointer
        mov     frame_RSPSAVE(%rsp), %rsp
+       pax_force_retaddr
        ret
 ENDPROC(sha512_transform_rorx)
 
index fb56855d51f553f2705d537128169bc963610658..6edd7685dd9ba1749ac9a52ca40a4bbfecc92667 100644 (file)
@@ -48,6 +48,7 @@
 ########################################################################
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -363,6 +364,7 @@ updateblock:
        mov     frame_RSPSAVE(%rsp), %rsp
 
 nowork:
+       pax_force_retaddr
        ret
 ENDPROC(sha512_transform_ssse3)
 
index 05058134c443176ec717cbdec6eb50235530fb03..b067311c6a417ce8a7854a7b9eeb002f7a65f631 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "twofish-avx-x86_64-asm_64.S"
@@ -284,6 +285,7 @@ __twofish_enc_blk8:
        outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__twofish_enc_blk8)
 
@@ -324,6 +326,7 @@ __twofish_dec_blk8:
        outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
        outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(__twofish_dec_blk8)
 
@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
 
        store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_ecb_enc_8way)
 
@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
 
        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_ecb_dec_8way)
 
@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
         *      %rdx: src
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
        call __twofish_dec_blk8;
 
-       store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+       store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_cbc_dec_8way)
 
@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
         *      %rcx: iv (little endian, 128bit)
         */
 
-       pushq %r12;
+       pushq %r14;
 
        movq %rsi, %r11;
-       movq %rdx, %r12;
+       movq %rdx, %r14;
 
        load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
                      RD2, RX0, RX1, RY0);
 
        call __twofish_enc_blk8;
 
-       store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+       store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
-       popq %r12;
+       popq %r14;
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_ctr_8way)
 
@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_xts_enc_8way)
 
@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
        /* dst <= regs xor IVs(in dst) */
        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_xts_dec_8way)
index 1c3b7ceb36d24c5f301f0709f0082c47a4af98c1..02f578dd40d378a37fc3118e8033288f2c619b21 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "twofish-x86_64-asm-3way.S"
 .text
@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
        popq %r13;
        popq %r14;
        popq %r15;
+       pax_force_retaddr
        ret;
 
 .L__enc_xor3:
@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
        popq %r13;
        popq %r14;
        popq %r15;
+       pax_force_retaddr
        ret;
 ENDPROC(__twofish_enc_blk_3way)
 
@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
        popq %r13;
        popq %r14;
        popq %r15;
+       pax_force_retaddr
        ret;
 ENDPROC(twofish_dec_blk_3way)
index a039d21986a21c87e9bdf9a41117a726ea3bf77f..524b8b2318d8fc900c6f5278ff49c5920ae36a28 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/alternative-asm.h>
 
 #define a_offset       0
 #define b_offset       4
@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
 
        popq    R1
        movq    $1,%rax
+       pax_force_retaddr
        ret
 ENDPROC(twofish_enc_blk)
 
@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
 
        popq    R1
        movq    $1,%rax
+       pax_force_retaddr
        ret
 ENDPROC(twofish_dec_blk)
index ae6aad1d24f79e37a16a022cdb4cf72597f67b71..719d6d98ffe902b095666601fdcf9c3b8e072863 100644 (file)
@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
        unsigned long dump_start, dump_size;
        struct user32 dump;
 
+       memset(&dump, 0, sizeof(dump));
+
        fs = get_fs();
        set_fs(KERNEL_DS);
        has_dumped = 1;
index f9e181aaba979190be8d3d18ebf437c2e3a22e69..300544cc8ef087576934fd3c93a8aaefcb4f9f91 100644 (file)
@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
        if (__get_user(set.sig[0], &frame->sc.oldmask)
            || (_COMPAT_NSIG_WORDS > 1
                && __copy_from_user((((char *) &set.sig) + 4),
-                                   &frame->extramask,
+                                   frame->extramask,
                                    sizeof(frame->extramask))))
                goto badframe;
 
@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
        sp -= frame_size;
        /* Align the stack pointer according to the i386 ABI,
         * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-       sp = ((sp + 4) & -16ul) - 4;
+       sp = ((sp - 12) & -16ul) - 4;
        return (void __user *) sp;
 }
 
@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
        } else {
                /* Return stub is in 32bit vsyscall page */
                if (current->mm->context.vdso)
-                       restorer = current->mm->context.vdso +
-                               selected_vdso32->sym___kernel_sigreturn;
+                       restorer = (void __force_user *)(current->mm->context.vdso +
+                               selected_vdso32->sym___kernel_sigreturn);
                else
-                       restorer = &frame->retcode;
+                       restorer = frame->retcode;
        }
 
        put_user_try {
@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
                 * These are actually not used anymore, but left because some
                 * gdb versions depend on them as a marker.
                 */
-               put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
+               put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
        } put_user_catch(err);
 
        if (err)
@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
                0xb8,
                __NR_ia32_rt_sigreturn,
                0x80cd,
-               0,
+               0
        };
 
        frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
 
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
+               else if (current->mm->context.vdso)
+                       /* Return stub is in 32bit vsyscall page */
+                       restorer = (void __force_user *)(current->mm->context.vdso +
+                               selected_vdso32->sym___kernel_rt_sigreturn);
                else
-                       restorer = current->mm->context.vdso +
-                               selected_vdso32->sym___kernel_rt_sigreturn;
+                       restorer = frame->retcode;
                put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
 
                /*
                 * Not actually used anymore, but left because some gdb
                 * versions need it.
                 */
-               put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
+               put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
        } put_user_catch(err);
 
        err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
index 82e8a1d446583efaa5a3c0426887d3a47abc5ddd..4e998d50966538fbe03707929a933bcf3001f8f3 100644 (file)
 #include <asm/irqflags.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/pgtable.h>
 #include <linux/linkage.h>
 #include <linux/err.h>
+#include <asm/alternative-asm.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
         */
        .macro LOAD_ARGS32 offset, _r9=0
        .if \_r9
-       movl \offset+16(%rsp),%r9d
+       movl \offset+R9(%rsp),%r9d
        .endif
-       movl \offset+40(%rsp),%ecx
-       movl \offset+48(%rsp),%edx
-       movl \offset+56(%rsp),%esi
-       movl \offset+64(%rsp),%edi
+       movl \offset+RCX(%rsp),%ecx
+       movl \offset+RDX(%rsp),%edx
+       movl \offset+RSI(%rsp),%esi
+       movl \offset+RDI(%rsp),%edi
        movl %eax,%eax                  /* zero extension */
        .endm
        
@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
 ENDPROC(native_irq_enable_sysexit)
 #endif
 
+       .macro pax_enter_kernel_user
+       pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       call pax_enter_kernel_user
+#endif
+       .endm
+
+       .macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+       pushq %rax
+       pushq %r11
+       call pax_randomize_kstack
+       popq %r11
+       popq %rax
+#endif
+       .endm
+
+       .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+       call pax_erase_kstack
+#endif
+       .endm
+
 /*
  * 32bit SYSENTER instruction entry.
  *
@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
        CFI_REGISTER    rsp,rbp
        SWAPGS_UNSAFE_STACK
        movq    PER_CPU_VAR(kernel_stack), %rsp
-       addq    $(KERNEL_STACK_OFFSET),%rsp
-       /*
-        * No need to follow this irqs on/off section: the syscall
-        * disabled irqs, here we enable it straight after entry:
-        */
-       ENABLE_INTERRUPTS(CLBR_NONE)
        movl    %ebp,%ebp               /* zero extension */
        pushq_cfi $__USER32_DS
        /*CFI_REL_OFFSET ss,0*/
@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
        CFI_REL_OFFSET rsp,0
        pushfq_cfi
        /*CFI_REL_OFFSET rflags,0*/
-       movl    TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
-       CFI_REGISTER rip,r10
+       orl     $X86_EFLAGS_IF,(%rsp)
+       GET_THREAD_INFO(%r11)
+       movl    TI_sysenter_return(%r11), %r11d
+       CFI_REGISTER rip,r11
        pushq_cfi $__USER32_CS
        /*CFI_REL_OFFSET cs,0*/
        movl    %eax, %eax
-       pushq_cfi %r10
+       pushq_cfi %r11
        CFI_REL_OFFSET rip,0
        pushq_cfi %rax
        cld
        SAVE_ARGS 0,1,0
+       pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
+       /*
+        * No need to follow this irqs on/off section: the syscall
+        * disabled irqs, here we enable it straight after entry:
+        */
+       ENABLE_INTERRUPTS(CLBR_NONE)
        /* no need to do an access_ok check here because rbp has been
           32bit zero extended */ 
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       addq    pax_user_shadow_base,%rbp
+       ASM_PAX_OPEN_USERLAND
+#endif
+
        ASM_STAC
 1:     movl    (%rbp),%ebp
        _ASM_EXTABLE(1b,ia32_badarg)
        ASM_CLAC
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       ASM_PAX_CLOSE_USERLAND
+#endif
+
        /*
         * Sysenter doesn't filter flags, so we need to clear NT
         * ourselves.  To save a few cycles, we can check whether
@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
        jnz sysenter_fix_flags
 sysenter_flags_fixed:
 
-       orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       GET_THREAD_INFO(%r11)
+       orl    $TS_COMPAT,TI_status(%r11)
+       testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
        CFI_REMEMBER_STATE
        jnz  sysenter_tracesys
        cmpq    $(IA32_NR_syscalls-1),%rax
@@ -172,15 +218,18 @@ sysenter_do_call:
 sysenter_dispatch:
        call    *ia32_sys_call_table(,%rax,8)
        movq    %rax,RAX-ARGOFFSET(%rsp)
+       GET_THREAD_INFO(%r11)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       testl   $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl   $_TIF_ALLWORK_MASK,TI_flags(%r11)
        jnz     sysexit_audit
 sysexit_from_sys_call:
-       andl    $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       pax_exit_kernel_user
+       pax_erase_kstack
+       andl    $~TS_COMPAT,TI_status(%r11)
        /* clear IF, that popfq doesn't enable interrupts early */
-       andl  $~0x200,EFLAGS-R11(%rsp) 
-       movl    RIP-R11(%rsp),%edx              /* User %eip */
+       andl  $~X86_EFLAGS_IF,EFLAGS(%rsp)
+       movl    RIP(%rsp),%edx          /* User %eip */
        CFI_REGISTER rip,rdx
        RESTORE_ARGS 0,24,0,0,0,0
        xorq    %r8,%r8
@@ -205,6 +254,9 @@ sysexit_from_sys_call:
        movl %ebx,%esi                  /* 2nd arg: 1st syscall arg */
        movl %eax,%edi                  /* 1st arg: syscall number */
        call __audit_syscall_entry
+
+       pax_erase_kstack
+
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
        cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
@@ -216,7 +268,7 @@ sysexit_from_sys_call:
        .endm
 
        .macro auditsys_exit exit
-       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
        jnz ia32_ret_from_sys_call
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
@@ -227,11 +279,12 @@ sysexit_from_sys_call:
 1:     setbe %al               /* 1 if error, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
        call __audit_syscall_exit
+       GET_THREAD_INFO(%r11)
        movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl %edi,TI_flags(%r11)
        jz \exit
        CLEAR_RREGS -ARGOFFSET
        jmp int_with_check
@@ -253,7 +306,7 @@ sysenter_fix_flags:
 
 sysenter_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
        jz      sysenter_auditsys
 #endif
        SAVE_REST
@@ -265,6 +318,9 @@ sysenter_tracesys:
        RESTORE_REST
        cmpq    $(IA32_NR_syscalls-1),%rax
        ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
+       pax_erase_kstack
+
        jmp     sysenter_do_call
        CFI_ENDPROC
 ENDPROC(ia32_sysenter_target)
@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(ia32_cstar_target)
        CFI_STARTPROC32 simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
+       CFI_DEF_CFA     rsp,0
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
        movl    %esp,%r8d
        CFI_REGISTER    rsp,r8
        movq    PER_CPU_VAR(kernel_stack),%rsp
+       SAVE_ARGS 8*6,0,0
+       pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs and here we enable it straight after entry:
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
-       SAVE_ARGS 8,0,0
        movl    %eax,%eax       /* zero extension */
        movq    %rax,ORIG_RAX-ARGOFFSET(%rsp)
        movq    %rcx,RIP-ARGOFFSET(%rsp)
@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
        /* no need to do an access_ok check here because r8 has been
           32bit zero extended */ 
        /* hardware stack frame is complete now */      
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       ASM_PAX_OPEN_USERLAND
+       movq    pax_user_shadow_base,%r8
+       addq    RSP-ARGOFFSET(%rsp),%r8
+#endif
+
        ASM_STAC
 1:     movl    (%r8),%r9d
        _ASM_EXTABLE(1b,ia32_badarg)
        ASM_CLAC
-       orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       ASM_PAX_CLOSE_USERLAND
+#endif
+
+       GET_THREAD_INFO(%r11)
+       orl   $TS_COMPAT,TI_status(%r11)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
        CFI_REMEMBER_STATE
        jnz   cstar_tracesys
        cmpq $IA32_NR_syscalls-1,%rax
@@ -335,13 +410,16 @@ cstar_do_call:
 cstar_dispatch:
        call *ia32_sys_call_table(,%rax,8)
        movq %rax,RAX-ARGOFFSET(%rsp)
+       GET_THREAD_INFO(%r11)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
        jnz sysretl_audit
 sysretl_from_sys_call:
-       andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+       pax_exit_kernel_user
+       pax_erase_kstack
+       andl $~TS_COMPAT,TI_status(%r11)
+       RESTORE_ARGS 0,-ORIG_RAX,0,0,0
        movl RIP-ARGOFFSET(%rsp),%ecx
        CFI_REGISTER rip,rcx
        movl EFLAGS-ARGOFFSET(%rsp),%r11d       
@@ -368,7 +446,7 @@ sysretl_audit:
 
 cstar_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-       testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
        jz cstar_auditsys
 #endif
        xchgl %r9d,%ebp
@@ -382,11 +460,19 @@ cstar_tracesys:
        xchgl %ebp,%r9d
        cmpq $(IA32_NR_syscalls-1),%rax
        ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+
+       pax_erase_kstack
+
        jmp cstar_do_call
 END(ia32_cstar_target)
                                
 ia32_badarg:
        ASM_CLAC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       ASM_PAX_CLOSE_USERLAND
+#endif
+
        movq $-EFAULT,%rax
        jmp ia32_sysret
        CFI_ENDPROC
@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
        CFI_REL_OFFSET  rip,RIP-RIP
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        SWAPGS
-       /*
-        * No need to follow this irqs on/off section: the syscall
-        * disabled irqs and here we enable it straight after entry:
-        */
-       ENABLE_INTERRUPTS(CLBR_NONE)
        movl %eax,%eax
        pushq_cfi %rax
        cld
        /* note the registers are not zero extended to the sf.
           this could be a problem. */
        SAVE_ARGS 0,1,0
-       orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
+       /*
+        * No need to follow this irqs on/off section: the syscall
+        * disabled irqs and here we enable it straight after entry:
+        */
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       GET_THREAD_INFO(%r11)
+       orl   $TS_COMPAT,TI_status(%r11)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
        jnz ia32_tracesys
        cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
@@ -458,6 +551,9 @@ ia32_tracesys:
        RESTORE_REST
        cmpq $(IA32_NR_syscalls-1),%rax
        ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
+
+       pax_erase_kstack
+
        jmp ia32_do_call
 END(ia32_syscall)
 
index 8e0ceecdc95790d7a53eb3fe5bc1c3867bcb9e7f..af135047ff11782f8afd7f82c480e9b43bf6d9d0 100644 (file)
@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
  */
 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
 {
-       typeof(ubuf->st_uid) uid = 0;
-       typeof(ubuf->st_gid) gid = 0;
+       typeof(((struct stat64 *)0)->st_uid) uid = 0;
+       typeof(((struct stat64 *)0)->st_gid) gid = 0;
        SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
        if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
index 372231c22a47a46b1417e5c6739d88eb927f89fd..51b537db50a2e0792f4d3372c76e2dde92138107 100644 (file)
        .endm
 #endif
 
+#ifdef KERNEXEC_PLUGIN
+       .macro pax_force_retaddr_bts rip=0
+       btsq $63,\rip(%rsp)
+       .endm
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
+       .macro pax_force_retaddr rip=0, reload=0
+       btsq $63,\rip(%rsp)
+       .endm
+       .macro pax_force_fptr ptr
+       btsq $63,\ptr
+       .endm
+       .macro pax_set_fptr_mask
+       .endm
+#endif
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       .macro pax_force_retaddr rip=0, reload=0
+       .if \reload
+       pax_set_fptr_mask
+       .endif
+       orq %r12,\rip(%rsp)
+       .endm
+       .macro pax_force_fptr ptr
+       orq %r12,\ptr
+       .endm
+       .macro pax_set_fptr_mask
+       movabs $0x8000000000000000,%r12
+       .endm
+#endif
+#else
+       .macro pax_force_retaddr rip=0, reload=0
+       .endm
+       .macro pax_force_fptr ptr
+       .endm
+       .macro pax_force_retaddr_bts rip=0
+       .endm
+       .macro pax_set_fptr_mask
+       .endm
+#endif
+
 .macro altinstruction_entry orig alt feature orig_len alt_len
        .long \orig - .
        .long \alt - .
index 473bdbee378a10ac2030b586dc33d3be74de5a11..b1e33776c0f155325ba0184ccc3162c69551b1c4 100644 (file)
@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ".pushsection .discard,\"aw\",@progbits\n"                      \
        DISCARD_ENTRY(1)                                                \
        ".popsection\n"                                                 \
-       ".pushsection .altinstr_replacement, \"ax\"\n"                  \
+       ".pushsection .altinstr_replacement, \"a\"\n"                   \
        ALTINSTR_REPLACEMENT(newinstr, feature, 1)                      \
        ".popsection"
 
@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
        DISCARD_ENTRY(1)                                                \
        DISCARD_ENTRY(2)                                                \
        ".popsection\n"                                                 \
-       ".pushsection .altinstr_replacement, \"ax\"\n"                  \
+       ".pushsection .altinstr_replacement, \"a\"\n"                   \
        ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)                    \
        ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
        ".popsection"
index 465b309af25425dce160848ab8c32df14058ef35..ab7e51fc296f4538b046111a2f3150a79f871e64 100644 (file)
@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
index 20370c6db74bb6dd09bbb78d4ded0b7adfd27d3e..a2eb9b01accdbd02ac4c5511ddb3dcaef1d870b8 100644 (file)
@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
-               "lcall *%%cs:apm_bios_entry\n\t"
+               "lcall *%%ss:apm_bios_entry\n\t"
                "setc %%al\n\t"
                "popl %%ebp\n\t"
                "popl %%edi\n\t"
@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
-               "lcall *%%cs:apm_bios_entry\n\t"
+               "lcall *%%ss:apm_bios_entry\n\t"
                "setc %%bl\n\t"
                "popl %%ebp\n\t"
                "popl %%edi\n\t"
index 5e5cd123fdfbc2b0fe90cabc5d27948d3ded267a..51cdc93831da5ef802d13b055819ba822ff02068 100644 (file)
@@ -27,6 +27,17 @@ static inline int atomic_read(const atomic_t *v)
        return ACCESS_ONCE((v)->counter);
 }
 
+/**
+ * atomic_read_unchecked - read atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+       return ACCESS_ONCE((v)->counter);
+}
+
 /**
  * atomic_set - set atomic variable
  * @v: pointer of type atomic_t
@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
        v->counter = i;
 }
 
+/**
+ * atomic_set_unchecked - set atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+       v->counter = i;
+}
+
 /**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
  */
 static inline void atomic_add(int i, atomic_t *v)
 {
-       asm volatile(LOCK_PREFIX "addl %1,%0"
+       asm volatile(LOCK_PREFIX "addl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "subl %1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (v->counter)
+                    : "ir" (i));
+}
+
+/**
+ * atomic_add_unchecked - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "addl %1,%0\n"
                     : "+m" (v->counter)
                     : "ir" (i));
 }
@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
  */
 static inline void atomic_sub(int i, atomic_t *v)
 {
-       asm volatile(LOCK_PREFIX "subl %1,%0"
+       asm volatile(LOCK_PREFIX "subl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "addl %1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (v->counter)
+                    : "ir" (i));
+}
+
+/**
+ * atomic_sub_unchecked - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "subl %1,%0\n"
                     : "+m" (v->counter)
                     : "ir" (i));
 }
@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
  */
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl",  v->counter, "er", i, "%0", "e");
 }
 
 /**
@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
  */
 static inline void atomic_inc(atomic_t *v)
 {
-       asm volatile(LOCK_PREFIX "incl %0"
+       asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "decl %0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (v->counter));
+}
+
+/**
+ * atomic_inc_unchecked - increment atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "incl %0\n"
                     : "+m" (v->counter));
 }
 
@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
  */
 static inline void atomic_dec(atomic_t *v)
 {
-       asm volatile(LOCK_PREFIX "decl %0"
+       asm volatile(LOCK_PREFIX "decl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "incl %0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec_unchecked - decrement atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "decl %0\n"
                     : "+m" (v->counter));
 }
 
@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
  */
 static inline int atomic_dec_and_test(atomic_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
 }
 
 /**
@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
  */
 static inline int atomic_inc_and_test(atomic_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
+}
+
+/**
+ * atomic_inc_and_test_unchecked - increment and test
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+       GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
 }
 
 /**
@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
  */
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
 }
 
 /**
@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static inline int atomic_add_return(int i, atomic_t *v)
+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
+{
+       return i + xadd_check_overflow(&v->counter, i);
+}
+
+/**
+ * atomic_add_return_unchecked - add integer and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
 {
        return i + xadd(&v->counter, i);
 }
@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
  *
  * Atomically subtracts @i from @v and returns @v - @i
  */
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
 {
        return atomic_add_return(-i, v);
 }
 
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+       return atomic_add_return_unchecked(1, v);
+}
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
 {
        return cmpxchg(&v->counter, old, new);
 }
@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
        return xchg(&v->counter, new);
 }
 
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+       return xchg(&v->counter, new);
+}
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
  */
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-       int c, old;
+       int c, old, new;
        c = atomic_read(v);
        for (;;) {
-               if (unlikely(c == (u)))
+               if (unlikely(c == u))
                        break;
-               old = atomic_cmpxchg((v), c, c + (a));
+
+               asm volatile("addl %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                            "jno 0f\n"
+                            "subl %2,%0\n"
+                            "int $4\n0:\n"
+                            _ASM_EXTABLE(0b, 0b)
+#endif
+
+                            : "=r" (new)
+                            : "0" (c), "ir" (a));
+
+               old = atomic_cmpxchg(v, c, new);
                if (likely(old == c))
                        break;
                c = old;
@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+/**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+       int val, c = hint, new;
+
+       /* sanity test, should be removed by compiler if hint is a constant */
+       if (!hint)
+               return __atomic_add_unless(v, 1, 0);
+
+       do {
+               asm volatile("incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                            "jno 0f\n"
+                            "decl %0\n"
+                            "int $4\n0:\n"
+                            _ASM_EXTABLE(0b, 0b)
+#endif
+
+                            : "=r" (new)
+                            : "0" (c));
+
+               val = atomic_cmpxchg(v, c, new);
+               if (val == c)
+                       return 1;
+               c = val;
+       } while (c);
+
+       return 0;
+}
+
 /**
  * atomic_inc_short - increment of a short integer
  * @v: pointer to type int
@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
 }
 
 /* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)                          \
-       asm volatile(LOCK_PREFIX "andl %0,%1"                   \
-                    : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr)                            \
-       asm volatile(LOCK_PREFIX "orl %0,%1"                    \
-                    : : "r" ((unsigned)(mask)), "m" (*(addr))  \
-                    : "memory")
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "andl %1,%0"
+                    : "+m" (v->counter)
+                    : "r" (~(mask))
+                    : "memory");
+}
+
+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "andl %1,%0"
+                    : "+m" (v->counter)
+                    : "r" (~(mask))
+                    : "memory");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "orl %1,%0"
+                    : "+m" (v->counter)
+                    : "r" (mask)
+                    : "memory");
+}
+
+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "orl %1,%0"
+                    : "+m" (v->counter)
+                    : "r" (mask)
+                    : "memory");
+}
 
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
index b154de75c90cb54f3639bb49b04f6a01b3237fc4..bf18a5af8da0cd2d781903ba048876884a7865ad 100644 (file)
@@ -12,6 +12,14 @@ typedef struct {
        u64 __aligned(8) counter;
 } atomic64_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+       u64 __aligned(8) counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
+
 #define ATOMIC64_INIT(val)     { (val) }
 
 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
@@ -37,21 +45,31 @@ typedef struct {
        ATOMIC64_DECL_ONE(sym##_386)
 
 ATOMIC64_DECL_ONE(add_386);
+ATOMIC64_DECL_ONE(add_unchecked_386);
 ATOMIC64_DECL_ONE(sub_386);
+ATOMIC64_DECL_ONE(sub_unchecked_386);
 ATOMIC64_DECL_ONE(inc_386);
+ATOMIC64_DECL_ONE(inc_unchecked_386);
 ATOMIC64_DECL_ONE(dec_386);
+ATOMIC64_DECL_ONE(dec_unchecked_386);
 #endif
 
 #define alternative_atomic64(f, out, in...) \
        __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
 
 ATOMIC64_DECL(read);
+ATOMIC64_DECL(read_unchecked);
 ATOMIC64_DECL(set);
+ATOMIC64_DECL(set_unchecked);
 ATOMIC64_DECL(xchg);
 ATOMIC64_DECL(add_return);
+ATOMIC64_DECL(add_return_unchecked);
 ATOMIC64_DECL(sub_return);
+ATOMIC64_DECL(sub_return_unchecked);
 ATOMIC64_DECL(inc_return);
+ATOMIC64_DECL(inc_return_unchecked);
 ATOMIC64_DECL(dec_return);
+ATOMIC64_DECL(dec_return_unchecked);
 ATOMIC64_DECL(dec_if_positive);
 ATOMIC64_DECL(inc_not_zero);
 ATOMIC64_DECL(add_unless);
@@ -76,6 +94,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
        return cmpxchg64(&v->counter, o, n);
 }
 
+/**
+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_unchecked_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
+{
+       return cmpxchg64(&v->counter, o, n);
+}
+
 /**
  * atomic64_xchg - xchg atomic64 variable
  * @v: pointer to type atomic64_t
@@ -111,6 +144,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
                             : "eax", "edx", "memory");
 }
 
+/**
+ * atomic64_set_unchecked - set atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ * @n: value to assign
+ *
+ * Atomically sets the value of @v to @n.
+ */
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+       unsigned high = (unsigned)(i >> 32);
+       unsigned low = (unsigned)i;
+       alternative_atomic64(set, /* no output */,
+                            "S" (v), "b" (low), "c" (high)
+                            : "eax", "edx", "memory");
+}
+
 /**
  * atomic64_read - read atomic64 variable
  * @v: pointer to type atomic64_t
@@ -124,6 +173,19 @@ static inline long long atomic64_read(const atomic64_t *v)
        return r;
  }
 
+/**
+ * atomic64_read_unchecked - read atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically reads the value of @v and returns it.
+ */
+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
+{
+       long long r;
+       alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+       return r;
+ }
+
 /**
  * atomic64_add_return - add and return
  * @i: integer value to add
@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
        return i;
 }
 
+/**
+ * atomic64_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + *@v
+ */
+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
+{
+       alternative_atomic64(add_return_unchecked,
+                            ASM_OUTPUT2("+A" (i), "+c" (v)),
+                            ASM_NO_INPUT_CLOBBER("memory"));
+       return i;
+}
+
 /*
  * Other variants with different arithmetic operators:
  */
@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
        return a;
 }
 
+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+       long long a;
+       alternative_atomic64(inc_return_unchecked, "=&A" (a),
+                            "S" (v) : "memory", "ecx");
+       return a;
+}
+
 static inline long long atomic64_dec_return(atomic64_t *v)
 {
        long long a;
@@ -181,6 +266,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
        return i;
 }
 
+/**
+ * atomic64_add_unchecked - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
+{
+       __alternative_atomic64(add_unchecked, add_return_unchecked,
+                              ASM_OUTPUT2("+A" (i), "+c" (v)),
+                              ASM_NO_INPUT_CLOBBER("memory"));
+       return i;
+}
+
 /**
  * atomic64_sub - subtract the atomic64 variable
  * @i: integer value to subtract
index f8d273e18516dedf885bbafb16224c189913e14f..02f39f311410b26acc25ecc851fc1f88f873799e 100644 (file)
@@ -21,6 +21,18 @@ static inline long atomic64_read(const atomic64_t *v)
        return ACCESS_ONCE((v)->counter);
 }
 
+/**
+ * atomic64_read_unchecked - read atomic64 variable
+ * @v: pointer of type atomic64_unchecked_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+       return ACCESS_ONCE((v)->counter);
+}
+
 /**
  * atomic64_set - set atomic64 variable
  * @v: pointer to type atomic64_t
@@ -33,6 +45,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
        v->counter = i;
 }
 
+/**
+ * atomic64_set_unchecked - set atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+       v->counter = i;
+}
+
 /**
  * atomic64_add - add integer to atomic64 variable
  * @i: integer value to add
@@ -41,6 +65,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
  * Atomically adds @i to @v.
  */
 static inline void atomic64_add(long i, atomic64_t *v)
+{
+       asm volatile(LOCK_PREFIX "addq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "subq %1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_add_unchecked - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
 {
        asm volatile(LOCK_PREFIX "addq %1,%0"
                     : "=m" (v->counter)
@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
  */
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
-       asm volatile(LOCK_PREFIX "subq %1,%0"
+       asm volatile(LOCK_PREFIX "subq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "addq %1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "=m" (v->counter)
+                    : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub_unchecked - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "subq %1,%0\n"
                     : "=m" (v->counter)
                     : "er" (i), "m" (v->counter));
 }
@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
  */
 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
 }
 
 /**
@@ -82,6 +150,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
  * Atomically increments @v by 1.
  */
 static inline void atomic64_inc(atomic64_t *v)
+{
+       asm volatile(LOCK_PREFIX "incq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "decq %0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+}
+
+/**
+ * atomic64_inc_unchecked - increment atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
  */
 static inline void atomic64_dec(atomic64_t *v)
 {
-       asm volatile(LOCK_PREFIX "decq %0"
+       asm volatile(LOCK_PREFIX "decq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX "incq %0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "=m" (v->counter)
+                    : "m" (v->counter));
+}
+
+/**
+ * atomic64_dec_unchecked - decrement atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
+       asm volatile(LOCK_PREFIX "decq %0\n"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
  */
 static inline int atomic64_dec_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
 }
 
 /**
@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
  */
 static inline int atomic64_inc_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
 }
 
 /**
@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
  */
 static inline int atomic64_add_negative(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq",  v->counter, "er", i, "%0", "s");
 }
 
 /**
@@ -149,6 +259,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
  * Atomically adds @i to @v and returns @i + @v
  */
 static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+       return i + xadd_check_overflow(&v->counter, i);
+}
+
+/**
+ * atomic64_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
 {
        return i + xadd(&v->counter, i);
 }
@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
 }
 
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+       return atomic64_add_return_unchecked(1, v);
+}
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
        return cmpxchg(&v->counter, old, new);
 }
 
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
+{
+       return cmpxchg(&v->counter, old, new);
+}
+
 static inline long atomic64_xchg(atomic64_t *v, long new)
 {
        return xchg(&v->counter, new);
@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  */
 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-       long c, old;
+       long c, old, new;
        c = atomic64_read(v);
        for (;;) {
-               if (unlikely(c == (u)))
+               if (unlikely(c == u))
                        break;
-               old = atomic64_cmpxchg((v), c, c + (a));
+
+               asm volatile("add %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                            "jno 0f\n"
+                            "sub %2,%0\n"
+                            "int $4\n0:\n"
+                            _ASM_EXTABLE(0b, 0b)
+#endif
+
+                            : "=r" (new)
+                            : "0" (c), "ir" (a));
+
+               old = atomic64_cmpxchg(v, c, new);
                if (likely(old == c))
                        break;
                c = old;
        }
-       return c != (u);
+       return c != u;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
index 2ab1eb33106eec42eff90d27b98cb698b5c4c835..1e8cc5d64f902218c055a7f07f2420037fc21f40 100644 (file)
@@ -57,7 +57,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
@@ -74,7 +74,7 @@ do {                                                                  \
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        barrier();                                                      \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index cfe3b954d5e41cbd96be1fdb147c164b63581814..d01b1189c04b92c1d733356f62415409e30da200 100644 (file)
@@ -50,7 +50,7 @@
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)                 (1 << ((nr) & 7))
 
 /**
@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
  */
 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
 }
 
 /**
@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
  */
 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
 }
 
 /**
@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
  */
 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
 }
 
 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
 {
        asm("rep; bsf %1,%0"
                : "=r" (word)
@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
  *
  * Undefined if no zero exists, so code should check against ~0UL first.
  */
-static inline unsigned long ffz(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
 {
        asm("rep; bsf %1,%0"
                : "=r" (word)
@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long __fls(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
 {
        asm("bsr %1,%0"
            : "=r" (word)
@@ -434,7 +434,7 @@ static inline int ffs(int x)
  * set bit if value is nonzero. The last (most significant) bit is
  * at position 32.
  */
-static inline int fls(int x)
+static inline int __intentional_overflow(-1) fls(int x)
 {
        int r;
 
@@ -476,7 +476,7 @@ static inline int fls(int x)
  * at position 64.
  */
 #ifdef CONFIG_X86_64
-static __always_inline int fls64(__u64 x)
+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
 {
        int bitpos = -1;
        /*
index 4fa687a47a62d85d4fd84062c2a5fc7c622c1cc7..60f2d39a64415e7d395de7943688509facdfa006 100644 (file)
@@ -6,10 +6,15 @@
 #include <uapi/asm/boot.h>
 
 /* Physical address where kernel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
                                + (CONFIG_PHYSICAL_ALIGN - 1)) \
                                & ~(CONFIG_PHYSICAL_ALIGN - 1))
 
+#ifndef __ASSEMBLY__
+extern unsigned char __LOAD_PHYSICAL_ADDR[];
+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
+#endif
+
 /* Minimum kernel alignment, as a power of two */
 #ifdef CONFIG_X86_64
 #define MIN_KERNEL_ALIGN_LG2   PMD_SHIFT
index 48f99f15452e7d111e8bad21a4830d21231071cc..d78ebf9d1cfb7063e4409af5784ea670cbfad801 100644 (file)
@@ -5,12 +5,13 @@
 
 /* L1 cache line size */
 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_only __attribute__((__section__(".data..read_only")))
 
 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
 
 #ifdef CONFIG_X86_VSMP
 #ifdef CONFIG_SMP
index 76659b67fd11f6da9f9de43aacc74b3edd20697e..72b84395c9b782cc09bb11554fe2d6360d27b3a4 100644 (file)
@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
 #define RSP            152
 #define SS             160
 
-#define ARGOFFSET      R11
-#define SWFRAME                ORIG_RAX
+#define ARGOFFSET      R15
 
        .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
-       subq  $9*8+\addskip, %rsp
-       CFI_ADJUST_CFA_OFFSET   9*8+\addskip
-       movq_cfi rdi, 8*8
-       movq_cfi rsi, 7*8
-       movq_cfi rdx, 6*8
+       subq  $ORIG_RAX-ARGOFFSET+\addskip, %rsp
+       CFI_ADJUST_CFA_OFFSET   ORIG_RAX-ARGOFFSET+\addskip
+       movq_cfi rdi, RDI
+       movq_cfi rsi, RSI
+       movq_cfi rdx, RDX
 
        .if \save_rcx
-       movq_cfi rcx, 5*8
+       movq_cfi rcx, RCX
        .endif
 
        .if \rax_enosys
-       movq $-ENOSYS, 4*8(%rsp)
+       movq $-ENOSYS, RAX(%rsp)
        .else
-       movq_cfi rax, 4*8
+       movq_cfi rax, RAX
        .endif
 
        .if \save_r891011
-       movq_cfi r8,  3*8
-       movq_cfi r9,  2*8
-       movq_cfi r10, 1*8
-       movq_cfi r11, 0*8
+       movq_cfi r8,  R8
+       movq_cfi r9,  R9
+       movq_cfi r10, R10
+       movq_cfi r11, R11
        .endif
 
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       movq_cfi r12, R12
+#endif
+
        .endm
 
-#define ARG_SKIP       (9*8)
+#define ARG_SKIP       ORIG_RAX
 
        .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
                            rstor_r8910=1, rstor_rdx=1
+
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       movq_cfi_restore R12, r12
+#endif
+
        .if \rstor_r11
-       movq_cfi_restore 0*8, r11
+       movq_cfi_restore R11, r11
        .endif
 
        .if \rstor_r8910
-       movq_cfi_restore 1*8, r10
-       movq_cfi_restore 2*8, r9
-       movq_cfi_restore 3*8, r8
+       movq_cfi_restore R10, r10
+       movq_cfi_restore R9, r9
+       movq_cfi_restore R8, r8
        .endif
 
        .if \rstor_rax
-       movq_cfi_restore 4*8, rax
+       movq_cfi_restore RAX, rax
        .endif
 
        .if \rstor_rcx
-       movq_cfi_restore 5*8, rcx
+       movq_cfi_restore RCX, rcx
        .endif
 
        .if \rstor_rdx
-       movq_cfi_restore 6*8, rdx
+       movq_cfi_restore RDX, rdx
        .endif
 
-       movq_cfi_restore 7*8, rsi
-       movq_cfi_restore 8*8, rdi
+       movq_cfi_restore RSI, rsi
+       movq_cfi_restore RDI, rdi
 
-       .if ARG_SKIP+\addskip > 0
-       addq $ARG_SKIP+\addskip, %rsp
-       CFI_ADJUST_CFA_OFFSET   -(ARG_SKIP+\addskip)
+       .if ORIG_RAX+\addskip > 0
+       addq $ORIG_RAX+\addskip, %rsp
+       CFI_ADJUST_CFA_OFFSET   -(ORIG_RAX+\addskip)
        .endif
        .endm
 
-       .macro LOAD_ARGS offset, skiprax=0
-       movq \offset(%rsp),    %r11
-       movq \offset+8(%rsp),  %r10
-       movq \offset+16(%rsp), %r9
-       movq \offset+24(%rsp), %r8
-       movq \offset+40(%rsp), %rcx
-       movq \offset+48(%rsp), %rdx
-       movq \offset+56(%rsp), %rsi
-       movq \offset+64(%rsp), %rdi
+       .macro LOAD_ARGS skiprax=0
+       movq R11(%rsp),    %r11
+       movq R10(%rsp),  %r10
+       movq R9(%rsp), %r9
+       movq R8(%rsp), %r8
+       movq RCX(%rsp), %rcx
+       movq RDX(%rsp), %rdx
+       movq RSI(%rsp), %rsi
+       movq RDI(%rsp), %rdi
        .if \skiprax
        .else
-       movq \offset+72(%rsp), %rax
+       movq ORIG_RAX(%rsp), %rax
        .endif
        .endm
 
-#define REST_SKIP      (6*8)
-
        .macro SAVE_REST
-       subq $REST_SKIP, %rsp
-       CFI_ADJUST_CFA_OFFSET   REST_SKIP
-       movq_cfi rbx, 5*8
-       movq_cfi rbp, 4*8
-       movq_cfi r12, 3*8
-       movq_cfi r13, 2*8
-       movq_cfi r14, 1*8
-       movq_cfi r15, 0*8
+       movq_cfi rbx, RBX
+       movq_cfi rbp, RBP
+
+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       movq_cfi r12, R12
+#endif
+
+       movq_cfi r13, R13
+       movq_cfi r14, R14
+       movq_cfi r15, R15
        .endm
 
        .macro RESTORE_REST
-       movq_cfi_restore 0*8, r15
-       movq_cfi_restore 1*8, r14
-       movq_cfi_restore 2*8, r13
-       movq_cfi_restore 3*8, r12
-       movq_cfi_restore 4*8, rbp
-       movq_cfi_restore 5*8, rbx
-       addq $REST_SKIP, %rsp
-       CFI_ADJUST_CFA_OFFSET   -(REST_SKIP)
+       movq_cfi_restore R15, r15
+       movq_cfi_restore R14, r14
+       movq_cfi_restore R13, r13
+
+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       movq_cfi_restore R12, r12
+#endif
+
+       movq_cfi_restore RBP, rbp
+       movq_cfi_restore RBX, rbx
        .endm
 
        .macro SAVE_ALL
index f50de69517384b712c1266a3c98626b888d6953a..2b0a4587d33a6a4db358bc36d77e583ce8098d74 100644 (file)
@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
                                            int len, __wsum sum,
                                            int *src_err_ptr, int *dst_err_ptr);
 
+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
+                                                 int len, __wsum sum,
+                                                 int *src_err_ptr, int *dst_err_ptr);
+
+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
+                                                 int len, __wsum sum,
+                                                 int *src_err_ptr, int *dst_err_ptr);
+
 /*
  *     Note: when you get a NULL pointer exception here this means someone
  *     passed in an incorrect kernel address to one of these functions.
@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
 
        might_sleep();
        stac();
-       ret = csum_partial_copy_generic((__force void *)src, dst,
+       ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
                                        len, sum, err_ptr, NULL);
        clac();
 
@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
        might_sleep();
        if (access_ok(VERIFY_WRITE, dst, len)) {
                stac();
-               ret = csum_partial_copy_generic(src, (__force void *)dst,
+               ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
                                                len, sum, NULL, err_ptr);
                clac();
                return ret;
index 99c105d78b7e123eec41d9bb5b2c308e5205d514..2f667ac0d1009ac4768f4cd2915f5745fc6d3d15 100644 (file)
@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
        __compiletime_error("Bad argument size for cmpxchg");
 extern void __xadd_wrong_size(void)
        __compiletime_error("Bad argument size for xadd");
+extern void __xadd_check_overflow_wrong_size(void)
+       __compiletime_error("Bad argument size for xadd_check_overflow");
 extern void __add_wrong_size(void)
        __compiletime_error("Bad argument size for add");
+extern void __add_check_overflow_wrong_size(void)
+       __compiletime_error("Bad argument size for add_check_overflow");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
                __ret;                                                  \
        })
 
+#ifdef CONFIG_PAX_REFCOUNT
+#define __xchg_op_check_overflow(ptr, arg, op, lock)                   \
+       ({                                                              \
+               __typeof__ (*(ptr)) __ret = (arg);                      \
+               switch (sizeof(*(ptr))) {                               \
+               case __X86_CASE_L:                                      \
+                       asm volatile (lock #op "l %0, %1\n"             \
+                                     "jno 0f\n"                        \
+                                     "mov %0,%1\n"                     \
+                                     "int $4\n0:\n"                    \
+                                     _ASM_EXTABLE(0b, 0b)              \
+                                     : "+r" (__ret), "+m" (*(ptr))     \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               case __X86_CASE_Q:                                      \
+                       asm volatile (lock #op "q %q0, %1\n"            \
+                                     "jno 0f\n"                        \
+                                     "mov %0,%1\n"                     \
+                                     "int $4\n0:\n"                    \
+                                     _ASM_EXTABLE(0b, 0b)              \
+                                     : "+r" (__ret), "+m" (*(ptr))     \
+                                     : : "memory", "cc");              \
+                       break;                                          \
+               default:                                                \
+                       __ ## op ## _check_overflow_wrong_size();       \
+               }                                                       \
+               __ret;                                                  \
+       })
+#else
+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
+#endif
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  * Since this is generally used to protect other memory information, we
@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
 #define xadd_sync(ptr, inc)    __xadd((ptr), (inc), "lock; ")
 #define xadd_local(ptr, inc)   __xadd((ptr), (inc), "")
 
+#define __xadd_check_overflow(ptr, inc, lock)  __xchg_op_check_overflow((ptr), (inc), xadd, lock)
+#define xadd_check_overflow(ptr, inc)          __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
+
 #define __add(ptr, inc, lock)                                          \
        ({                                                              \
                __typeof__ (*(ptr)) __ret = (inc);                      \
index 59c6c401f79f16d9b98533275d66d437cbaeff0c..5e0b22ca7629431e69065893a49067addf0d7388 100644 (file)
@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
 typedef u32            compat_uint_t;
 typedef u32            compat_ulong_t;
 typedef u64 __attribute__((aligned(4))) compat_u64;
-typedef u32            compat_uptr_t;
+typedef u32            __user compat_uptr_t;
 
 struct compat_timespec {
        compat_time_t   tv_sec;
index aede2c347bde307d9b74aa4ff4887b2b0b05eac6..40d7a8fe75c599cf4a475a38c7f04fa52fef10e3 100644 (file)
 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
 #define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
-
+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
 #define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
 #define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
 #define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
 #define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
 #define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
 #define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
 #define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
 #define cpu_has_topoext                boot_cpu_has(X86_FEATURE_TOPOEXT)
+#define cpu_has_pcid           boot_cpu_has(X86_FEATURE_PCID)
 
 #if __GNUC__ >= 4
 extern void warn_pre_alternatives(void);
@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 
 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
        t_warn:
-               warn_pre_alternatives();
+               if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
+                       warn_pre_alternatives();
                return false;
 #endif
 
@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                             ".section .discard,\"aw\",@progbits\n"
                             " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
                             ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
+                            ".section .altinstr_replacement,\"a\"\n"
                             "3: movb $1,%0\n"
                             "4:\n"
                             ".previous\n"
@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
                         " .byte 2b - 1b\n"             /* src len */
                         " .byte 4f - 3f\n"             /* repl len */
                         ".previous\n"
-                        ".section .altinstr_replacement,\"ax\"\n"
+                        ".section .altinstr_replacement,\"a\"\n"
                         "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
                         "4:\n"
                         ".previous\n"
@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
                             ".section .discard,\"aw\",@progbits\n"
                             " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
                             ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
+                            ".section .altinstr_replacement,\"a\"\n"
                             "3: movb $0,%0\n"
                             "4:\n"
                             ".previous\n"
@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
                             ".section .discard,\"aw\",@progbits\n"
                             " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
                             ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
+                            ".section .altinstr_replacement,\"a\"\n"
                             "5: movb $1,%0\n"
                             "6:\n"
                             ".previous\n"
index a94b82e8f156f3888e0ab90ac879e39dd05ccec1..59ecefa8ce7a940ee5eb2c2ce2fff30b74d36c0d 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/desc_defs.h>
 #include <asm/ldt.h>
 #include <asm/mmu.h>
+#include <asm/pgtable.h>
 
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
 
        desc->type              = (info->read_exec_only ^ 1) << 1;
        desc->type             |= info->contents << 2;
+       desc->type             |= info->seg_not_present ^ 1;
 
        desc->s                 = 1;
        desc->dpl               = 0x3;
@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
 }
 
 extern struct desc_ptr idt_descr;
-extern gate_desc idt_table[];
-extern struct desc_ptr debug_idt_descr;
-extern gate_desc debug_idt_table[];
-
-struct gdt_page {
-       struct desc_struct gdt[GDT_ENTRIES];
-} __attribute__((aligned(PAGE_SIZE)));
-
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
+extern gate_desc idt_table[IDT_ENTRIES];
+extern const struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[IDT_ENTRIES];
 
+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
 {
-       return per_cpu(gdt_page, cpu).gdt;
+       return cpu_gdt_table[cpu];
 }
 
 #ifdef CONFIG_X86_64
@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
                             unsigned long base, unsigned dpl, unsigned flags,
                             unsigned short seg)
 {
-       gate->a = (seg << 16) | (base & 0xffff);
-       gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
+       gate->gate.offset_low   = base;
+       gate->gate.seg          = seg;
+       gate->gate.reserved     = 0;
+       gate->gate.type         = type;
+       gate->gate.s            = 0;
+       gate->gate.dpl          = dpl;
+       gate->gate.p            = 1;
+       gate->gate.offset_high  = base >> 16;
 }
 
 #endif
@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 
 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
 {
+       pax_open_kernel();
        memcpy(&idt[entry], gate, sizeof(*gate));
+       pax_close_kernel();
 }
 
 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
 {
+       pax_open_kernel();
        memcpy(&ldt[entry], desc, 8);
+       pax_close_kernel();
 }
 
 static inline void
@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
        default:        size = sizeof(*gdt);            break;
        }
 
+       pax_open_kernel();
        memcpy(&gdt[entry], desc, size);
+       pax_close_kernel();
 }
 
 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
 
 static inline void native_load_tr_desc(void)
 {
+       pax_open_kernel();
        asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+       pax_close_kernel();
 }
 
 static inline void native_load_gdt(const struct desc_ptr *dtr)
@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
        struct desc_struct *gdt = get_cpu_gdt_table(cpu);
        unsigned int i;
 
+       pax_open_kernel();
        for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
                gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
+       pax_close_kernel();
 }
 
 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
        preempt_enable();
 }
 
-static inline unsigned long get_desc_base(const struct desc_struct *desc)
+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
 {
        return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
 }
@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
 }
 
 #ifdef CONFIG_X86_64
-static inline void set_nmi_gate(int gate, void *addr)
+static inline void set_nmi_gate(int gate, const void *addr)
 {
        gate_desc s;
 
@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
 #endif
 
 #ifdef CONFIG_TRACING
-extern struct desc_ptr trace_idt_descr;
-extern gate_desc trace_idt_table[];
+extern const struct desc_ptr trace_idt_descr;
+extern gate_desc trace_idt_table[IDT_ENTRIES];
 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
 {
        write_idt_entry(trace_idt_table, entry, gate);
 }
 
-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
                                   unsigned dpl, unsigned ist, unsigned seg)
 {
        gate_desc s;
@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
 #endif
 
-static inline void _set_gate(int gate, unsigned type, void *addr,
+static inline void _set_gate(int gate, unsigned type, const void *addr,
                             unsigned dpl, unsigned ist, unsigned seg)
 {
        gate_desc s;
@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
 #define set_intr_gate(n, addr)                                         \
        do {                                                            \
                BUG_ON((unsigned)n > 0xFF);                             \
-               _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0,        \
+               _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0,  \
                          __KERNEL_CS);                                 \
-               _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
+               _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
                                0, 0, __KERNEL_CS);                     \
        } while (0)
 
@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
 /*
  * This routine sets up an interrupt gate at directory privilege level 3.
  */
-static inline void set_system_intr_gate(unsigned int n, void *addr)
+static inline void set_system_intr_gate(unsigned int n, const void *addr)
 {
        BUG_ON((unsigned)n > 0xFF);
        _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
 }
 
-static inline void set_system_trap_gate(unsigned int n, void *addr)
+static inline void set_system_trap_gate(unsigned int n, const void *addr)
 {
        BUG_ON((unsigned)n > 0xFF);
        _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
 }
 
-static inline void set_trap_gate(unsigned int n, void *addr)
+static inline void set_trap_gate(unsigned int n, const void *addr)
 {
        BUG_ON((unsigned)n > 0xFF);
        _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
 {
        BUG_ON((unsigned)n > 0xFF);
-       _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
+       _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
 }
 
-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
 {
        BUG_ON((unsigned)n > 0xFF);
        _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
 }
 
-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
 {
        BUG_ON((unsigned)n > 0xFF);
        _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
        else
                load_idt((const struct desc_ptr *)&idt_descr);
 }
+
+#ifdef CONFIG_X86_32
+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
+{
+       struct desc_struct d;
+
+       if (likely(limit))
+               limit = (limit - 1UL) >> PAGE_SHIFT;
+       pack_descriptor(&d, base, limit, 0xFB, 0xC);
+       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
+}
+#endif
+
 #endif /* _ASM_X86_DESC_H */
index 278441f39856edf31edca4f5391479c356b19a61..b95a174dfc2d69d416c7b9e57f795467bd19a8a4 100644 (file)
@@ -31,6 +31,12 @@ struct desc_struct {
                        unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
                        unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
                };
+               struct {
+                       u16 offset_low;
+                       u16 seg;
+                       unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
+                       unsigned offset_high: 16;
+               } gate;
        };
 } __attribute__((packed));
 
index ced283ac79dfff3ca6c580da509136845ed1ce5b..ffe04ccf6bd1b63f84528bcfb563ca14959f977d 100644 (file)
@@ -39,7 +39,7 @@
        __mod;                                                  \
 })
 
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
        union {
                u64 v64;
index ca3347a9dab5211399e9e93a5e53d71ca1943095..1a5082a53e29909de86467e555c21dca2193f44b 100644 (file)
@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
 
 #include <asm/vdso.h>
 
-#ifdef CONFIG_X86_64
-extern unsigned int vdso64_enabled;
-#endif
 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
 extern unsigned int vdso32_enabled;
 #endif
@@ -249,7 +246,25 @@ extern int force_personality32;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
+#ifdef CONFIG_PAX_SEGMEXEC
+#define ELF_ET_DYN_BASE                ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
+#else
 #define ELF_ET_DYN_BASE                (TASK_SIZE / 3 * 2)
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+#ifdef CONFIG_X86_32
+#define PAX_ELF_ET_DYN_BASE    0x10000000UL
+
+#define PAX_DELTA_MMAP_LEN     (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
+#define PAX_DELTA_STACK_LEN    (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
+#else
+#define PAX_ELF_ET_DYN_BASE    0x400000UL
+
+#define PAX_DELTA_MMAP_LEN     ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
+#define PAX_DELTA_STACK_LEN    ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
+#endif
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
@@ -298,17 +313,13 @@ do {                                                                      \
 
 #define ARCH_DLINFO                                                    \
 do {                                                                   \
-       if (vdso64_enabled)                                             \
-               NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
-                           (unsigned long __force)current->mm->context.vdso); \
+       NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
 } while (0)
 
 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
 #define ARCH_DLINFO_X32                                                        \
 do {                                                                   \
-       if (vdso64_enabled)                                             \
-               NEW_AUX_ENT(AT_SYSINFO_EHDR,                            \
-                           (unsigned long __force)current->mm->context.vdso); \
+       NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);        \
 } while (0)
 
 #define AT_SYSINFO             32
@@ -323,10 +334,10 @@ else                                                                      \
 
 #endif /* !CONFIG_X86_32 */
 
-#define VDSO_CURRENT_BASE      ((unsigned long)current->mm->context.vdso)
+#define VDSO_CURRENT_BASE      (current->mm->context.vdso)
 
 #define VDSO_ENTRY                                                     \
-       ((unsigned long)current->mm->context.vdso +                     \
+       (current->mm->context.vdso +                                    \
         selected_vdso32->sym___kernel_vsyscall)
 
 struct linux_binprm;
@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
                                              int uses_interp);
 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 /*
  * True on X86_32 or when emulating IA32 on X86_64
  */
index 77a99ac06d0070b93e90199fa59615af160e4c73..39ff7f5fabc1de93b226350e8e9ec34df3eab0cd 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _ASM_X86_EMERGENCY_RESTART_H
 #define _ASM_X86_EMERGENCY_RESTART_H
 
-extern void machine_emergency_restart(void);
+extern void machine_emergency_restart(void) __noreturn;
 
 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
index 1c7eefe3250295762c258a66af22a05d53882334..d0e4702883b9f213017b14713c7e86b853094c6f 100644 (file)
@@ -229,18 +229,18 @@ static struct fd_routine_l {
        int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
 } fd_routine[] = {
        {
-               request_dma,
-               free_dma,
-               get_dma_residue,
-               dma_mem_alloc,
-               hard_dma_setup
+               ._request_dma = request_dma,
+               ._free_dma = free_dma,
+               ._get_dma_residue = get_dma_residue,
+               ._dma_mem_alloc = dma_mem_alloc,
+               ._dma_setup = hard_dma_setup
        },
        {
-               vdma_request_dma,
-               vdma_nop,
-               vdma_get_dma_residue,
-               vdma_mem_alloc,
-               vdma_dma_setup
+               ._request_dma = vdma_request_dma,
+               ._free_dma = vdma_nop,
+               ._get_dma_residue = vdma_get_dma_residue,
+               ._dma_mem_alloc = vdma_mem_alloc,
+               ._dma_setup = vdma_dma_setup
        }
 };
 
index e97622f577229e431cd2f49c7b3aa30bb9f31ce8..d0ba77a5be89fbb2e320609c1093dbd10b6d03d1 100644 (file)
@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
 #define user_insn(insn, output, input...)                              \
 ({                                                                     \
        int err;                                                        \
+       pax_open_userland();                                            \
        asm volatile(ASM_STAC "\n"                                      \
-                    "1:" #insn "\n\t"                                  \
+                    "1:"                                               \
+                    __copyuser_seg                                     \
+                    #insn "\n\t"                                       \
                     "2: " ASM_CLAC "\n"                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:  movl $-1,%[err]\n"                            \
@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
                     _ASM_EXTABLE(1b, 3b)                               \
                     : [err] "=r" (err), output                         \
                     : "0"(0), input);                                  \
+       pax_close_userland();                                           \
        err;                                                            \
 })
 
@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
                        "fnclex\n\t"
                        "emms\n\t"
                        "fildl %P[addr]"        /* set F?P to defined value */
-                       : : [addr] "m" (tsk->thread.fpu.has_fpu));
+                       : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
        }
 
        return fpu_restore_checking(&tsk->thread.fpu);
index b4c1f545343663057460376157de5d9d55a6f528..e290c0897c419980e5c759218d634bf5f3096a97 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/smap.h>
 
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)    \
+       typecheck(u32 __user *, uaddr);                         \
        asm volatile("\t" ASM_STAC "\n"                         \
                     "1:\t" insn "\n"                           \
                     "2:\t" ASM_CLAC "\n"                       \
                     "\tjmp\t2b\n"                              \
                     "\t.previous\n"                            \
                     _ASM_EXTABLE(1b, 3b)                       \
-                    : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
+                    : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))    \
                     : "i" (-EFAULT), "0" (oparg), "1" (0))
 
 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)    \
+       typecheck(u32 __user *, uaddr);                         \
        asm volatile("\t" ASM_STAC "\n"                         \
                     "1:\tmovl  %2, %0\n"                       \
                     "\tmovl\t%0, %3\n"                         \
                     "\t" insn "\n"                             \
-                    "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"     \
+                    "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n"       \
                     "\tjnz\t1b\n"                              \
                     "3:\t" ASM_CLAC "\n"                       \
                     "\t.section .fixup,\"ax\"\n"               \
@@ -38,7 +40,7 @@
                     _ASM_EXTABLE(1b, 4b)                       \
                     _ASM_EXTABLE(2b, 4b)                       \
                     : "=&a" (oldval), "=&r" (ret),             \
-                      "+m" (*uaddr), "=&r" (tem)               \
+                      "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem)  \
                     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 
        pagefault_disable();
 
+       pax_open_userland();
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
+               __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+               __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
                                   uaddr, oparg);
                break;
        case FUTEX_OP_OR:
@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        default:
                ret = -ENOSYS;
        }
+       pax_close_userland();
 
        pagefault_enable();
 
index 9662290e0b2075ab42608af776abbe4a4219b6fd..49ca5e5484db9d97f4b8d68b012f727f1f55c5d3 100644 (file)
@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
 #endif /* CONFIG_X86_LOCAL_APIC */
 
 /* Statistics */
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
+extern atomic_unchecked_t irq_err_count;
+extern atomic_unchecked_t irq_mis_count;
 
 /* EISA */
 extern void eisa_set_level_irq(unsigned int irq);
index ccffa53750a89283feae5c1ea8b10f7e99cf57ea..3c90c87e3c9a8c991fc4a885c968dd6d5629bab1 100644 (file)
@@ -62,7 +62,7 @@ struct legacy_pic {
        void (*init)(int auto_eoi);
        int (*irq_pending)(unsigned int irq);
        void (*make_irq)(unsigned int irq);
-};
+} __do_const;
 
 extern struct legacy_pic *legacy_pic;
 extern struct legacy_pic null_legacy_pic;
index 34a5b93704d3ecb1d98190f6a12437a9acab7204..27e40a6bb0d84a372b08e948f7e7cbabe5ae13d1 100644 (file)
@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
 "m" (*(volatile type __force *)addr) barrier); }
 
 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
 
 build_mmio_read(__readb, "b", unsigned char, "=q", )
-build_mmio_read(__readw, "w", unsigned short, "=r", )
-build_mmio_read(__readl, "l", unsigned int, "=r", )
+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
 
 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
  *     this function
  */
 
-static inline phys_addr_t virt_to_phys(volatile void *address)
+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
 {
        return __pa(address);
 }
@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
        return ioremap_nocache(offset, size);
 }
 
-extern void iounmap(volatile void __iomem *addr);
+extern void iounmap(const volatile void __iomem *addr);
 
 extern void set_iounmap_nonlazy(void);
 
@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
 
 #include <linux/vmalloc.h>
 
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
+{
+       return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+       return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
+}
+
 /*
  * Convert a virtual cached pointer to an uncached pointer
  */
index 0a8b519226b8feb37368ffbc4ca81011bc031fde..80e7d5bcaf0c8161707ac099409c8a59a602183a 100644 (file)
@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
        sti;                                    \
        sysexit
 
+#define GET_CR0_INTO_RDI               mov %cr0, %rdi
+#define SET_RDI_INTO_CR0               mov %rdi, %cr0
+#define GET_CR3_INTO_RDI               mov %cr3, %rdi
+#define SET_RDI_INTO_CR3               mov %rdi, %cr3
+
 #else
 #define INTERRUPT_RETURN               iret
 #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
index 4421b5da409d6424d91eac6fc91231d8e92d3658..8543006eeb222f042457799b73c499a70516c5e5 100644 (file)
@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVEJUMP_SIZE 5
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
-#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR)                                          \
-       (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-                             THREAD_SIZE - (unsigned long)(ADDR)))    \
-        ? (MAX_STACK_SIZE)                                            \
-        : (((unsigned long)current_thread_info()) +                   \
-           THREAD_SIZE - (unsigned long)(ADDR)))
+#define MAX_STACK_SIZE 64UL
+#define MIN_STACK_SIZE(ADDR)   min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
 
 #define flush_insn_slot(p)     do { } while (0)
 
index d89c6b828c96492a414fe9a3d92ca7ec3791b53c..e711c69329c619ea4e4e926995fb6d9191f60aee 100644 (file)
@@ -51,7 +51,7 @@
                          | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
 
 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
-#define CR3_PCID_INVD           (1UL << 63)
+#define CR3_PCID_INVD           (1ULL << 63)
 #define CR4_RESERVED_BITS                                               \
        (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
                          | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
index 4ad6560847b1f5f7c20644b52e27505339880e79..75c7bddcbd6ab757050e8b35af5f1d772ebae222 100644 (file)
@@ -10,33 +10,97 @@ typedef struct {
        atomic_long_t a;
 } local_t;
 
+typedef struct {
+       atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)        atomic_long_read_unchecked(&(l)->a)
 #define local_set(l, i)        atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l, i)      atomic_long_set_unchecked(&(l)->a, (i))
 
 static inline void local_inc(local_t *l)
 {
-       asm volatile(_ASM_INC "%0"
+       asm volatile(_ASM_INC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    _ASM_DEC "%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (l->a.counter));
+}
+
+static inline void local_inc_unchecked(local_unchecked_t *l)
+{
+       asm volatile(_ASM_INC "%0\n"
                     : "+m" (l->a.counter));
 }
 
 static inline void local_dec(local_t *l)
 {
-       asm volatile(_ASM_DEC "%0"
+       asm volatile(_ASM_DEC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    _ASM_INC "%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (l->a.counter));
+}
+
+static inline void local_dec_unchecked(local_unchecked_t *l)
+{
+       asm volatile(_ASM_DEC "%0\n"
                     : "+m" (l->a.counter));
 }
 
 static inline void local_add(long i, local_t *l)
 {
-       asm volatile(_ASM_ADD "%1,%0"
+       asm volatile(_ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    _ASM_SUB "%1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+}
+
+static inline void local_add_unchecked(long i, local_unchecked_t *l)
+{
+       asm volatile(_ASM_ADD "%1,%0\n"
                     : "+m" (l->a.counter)
                     : "ir" (i));
 }
 
 static inline void local_sub(long i, local_t *l)
 {
-       asm volatile(_ASM_SUB "%1,%0"
+       asm volatile(_ASM_SUB "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    _ASM_ADD "%1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+m" (l->a.counter)
+                    : "ir" (i));
+}
+
+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
+{
+       asm volatile(_ASM_SUB "%1,%0\n"
                     : "+m" (l->a.counter)
                     : "ir" (i));
 }
@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
  */
 static inline int local_sub_and_test(long i, local_t *l)
 {
-       GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
 }
 
 /**
@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
  */
 static inline int local_dec_and_test(local_t *l)
 {
-       GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+       GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
 }
 
 /**
@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
  */
 static inline int local_inc_and_test(local_t *l)
 {
-       GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+       GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
 }
 
 /**
@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
  */
 static inline int local_add_negative(long i, local_t *l)
 {
-       GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
 }
 
 /**
@@ -103,6 +167,30 @@ static inline int local_add_negative(long i, local_t *l)
  * Atomically adds @i to @l and returns @i + @l
  */
 static inline long local_add_return(long i, local_t *l)
+{
+       long __i = i;
+       asm volatile(_ASM_XADD "%0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    _ASM_MOV "%0,%1\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
+                    : "+r" (i), "+m" (l->a.counter)
+                    : : "memory");
+       return i + __i;
+}
+
+/**
+ * local_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_unchecked_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
 {
        long __i = i;
        asm volatile(_ASM_XADD "%0, %1;"
@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
 
 #define local_cmpxchg(l, o, n) \
        (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+       (cmpxchg_local(&((l)->a.counter), (o), (n)))
 /* Always has a lock prefix */
 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
 
index 876e74e8eec76696b5523210292e71e1c861d5f6..e20bfb19e1a66dd304b3b77cc2be411b121e0548 100644 (file)
@@ -9,7 +9,7 @@
  * we put the segment information here.
  */
 typedef struct {
-       void *ldt;
+       struct desc_struct *ldt;
        int size;
 
 #ifdef CONFIG_X86_64
@@ -18,7 +18,19 @@ typedef struct {
 #endif
 
        struct mutex lock;
-       void __user *vdso;
+       unsigned long vdso;
+
+#ifdef CONFIG_X86_32
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+       unsigned long user_cs_base;
+       unsigned long user_cs_limit;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+       cpumask_t cpu_user_cs_mask;
+#endif
+
+#endif
+#endif
 } mm_context_t;
 
 #ifdef CONFIG_SMP
index 4b75d591eb5ed1e4757ef8b658d08a2ad02a84cd..8ffacb68dea6e048cc1558bd4f583b8a201f8d8b 100644 (file)
@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       if (!(static_cpu_has(X86_FEATURE_PCID))) {
+               unsigned int i;
+               pgd_t *pgd;
+
+               pax_open_kernel();
+               pgd = get_cpu_pgd(smp_processor_id(), kernel);
+               for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
+                       set_pgd_batched(pgd+i, native_make_pgd(0));
+               pax_close_kernel();
+       }
+#endif
+
 #ifdef CONFIG_SMP
        if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
                this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
 {
        unsigned cpu = smp_processor_id();
+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+       int tlbstate = TLBSTATE_OK;
+#endif
 
        if (likely(prev != next)) {
 #ifdef CONFIG_SMP
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+               tlbstate = this_cpu_read(cpu_tlbstate.state);
+#endif
                this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
                this_cpu_write(cpu_tlbstate.active_mm, next);
 #endif
                cpumask_set_cpu(cpu, mm_cpumask(next));
 
                /* Re-load page tables */
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               pax_open_kernel();
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               if (static_cpu_has(X86_FEATURE_PCID))
+                       __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
+               else
+#endif
+
+               __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
+               __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
+               pax_close_kernel();
+               BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               if (static_cpu_has(X86_FEATURE_PCID)) {
+                       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+                               u64 descriptor[2];
+                               descriptor[0] = PCID_USER;
+                               asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
+                               if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
+                                       descriptor[0] = PCID_KERNEL;
+                                       asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
+                               }
+                       } else {
+                               write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+                               if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
+                                       write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+                               else
+                                       write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+                       }
+               } else
+#endif
+
+                       load_cr3(get_cpu_pgd(cpu, kernel));
+#else
                load_cr3(next->pgd);
+#endif
                trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 
                /* Stop flush ipis for the previous mm */
@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                 */
                if (unlikely(prev->context.ldt != next->context.ldt))
                        load_LDT_nolock(&next->context);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+               if (!(__supported_pte_mask & _PAGE_NX)) {
+                       smp_mb__before_atomic();
+                       cpu_clear(cpu, prev->context.cpu_user_cs_mask);
+                       smp_mb__after_atomic();
+                       cpu_set(cpu, next->context.cpu_user_cs_mask);
+               }
+#endif
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+               if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
+                            prev->context.user_cs_limit != next->context.user_cs_limit))
+                       set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#ifdef CONFIG_SMP
+               else if (unlikely(tlbstate != TLBSTATE_OK))
+                       set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#endif
+#endif
+
        }
+       else {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               pax_open_kernel();
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               if (static_cpu_has(X86_FEATURE_PCID))
+                       __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
+               else
+#endif
+
+               __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
+               __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
+               pax_close_kernel();
+               BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               if (static_cpu_has(X86_FEATURE_PCID)) {
+                       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+                               u64 descriptor[2];
+                               descriptor[0] = PCID_USER;
+                               asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
+                               if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
+                                       descriptor[0] = PCID_KERNEL;
+                                       asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
+                               }
+                       } else {
+                               write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+                               if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
+                                       write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+                               else
+                                       write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+                       }
+               } else
+#endif
+
+                       load_cr3(get_cpu_pgd(cpu, kernel));
+#endif
+
 #ifdef CONFIG_SMP
-         else {
                this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
                BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
 
@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                         * tlb flush IPI delivery. We must reload CR3
                         * to make sure to use no freed page tables.
                         */
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
                        load_cr3(next->pgd);
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+#endif
+
                        load_LDT_nolock(&next->context);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+                       if (!(__supported_pte_mask & _PAGE_NX))
+                               cpu_set(cpu, next->context.cpu_user_cs_mask);
+#endif
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+#ifdef CONFIG_PAX_PAGEEXEC
+                       if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
+#endif
+                               set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#endif
+
                }
-       }
 #endif
+       }
 }
 
 #define activate_mm(prev, next)                        \
index e3b7819caeef1c813c309c04519af9ef93183a9a..b257c643d17ea946855b358889751a65a7159d6e 100644 (file)
@@ -5,6 +5,7 @@
 
 #ifdef CONFIG_X86_64
 /* X86_64 does not define MODULE_PROC_FAMILY */
+#define MODULE_PROC_FAMILY ""
 #elif defined CONFIG_M486
 #define MODULE_PROC_FAMILY "486 "
 #elif defined CONFIG_M586
 #error unknown processor family
 #endif
 
-#ifdef CONFIG_X86_32
-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
+#else
+#define MODULE_PAX_KERNEXEC ""
 #endif
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#define MODULE_PAX_UDEREF "UDEREF "
+#else
+#define MODULE_PAX_UDEREF ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
+
 #endif /* _ASM_X86_MODULE_H */
index 5f2fc4441b11016608f476848e3a60bd1e08d7a5..106caa61eb4c7ede38b1278c161646f487819d7f 100644 (file)
@@ -36,26 +36,35 @@ enum {
 
 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
 
+struct nmiaction;
+
+struct nmiwork {
+       const struct nmiaction  *action;
+       u64                     max_duration;
+       struct irq_work         irq_work;
+};
+
 struct nmiaction {
        struct list_head        list;
        nmi_handler_t           handler;
-       u64                     max_duration;
-       struct irq_work         irq_work;
        unsigned long           flags;
        const char              *name;
-};
+       struct nmiwork          *work;
+} __do_const;
 
 #define register_nmi_handler(t, fn, fg, n, init...)    \
 ({                                                     \
-       static struct nmiaction init fn##_na = {        \
+       static struct nmiwork fn##_nw;                  \
+       static const struct nmiaction init fn##_na = {  \
                .handler = (fn),                        \
                .name = (n),                            \
                .flags = (fg),                          \
+               .work = &fn##_nw,                       \
        };                                              \
        __register_nmi_handler((t), &fn##_na);          \
 })
 
-int __register_nmi_handler(unsigned int, struct nmiaction *);
+int __register_nmi_handler(unsigned int, const struct nmiaction *);
 
 void unregister_nmi_handler(unsigned int, const char *);
 
index 802dde30c92877588be2d068a692cf9ed4418b7d..9183e68dcffa430a11134d70aa04233b8f6c49a8 100644 (file)
@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
        __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define __early_va(x)          ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
 
 #define __boot_va(x)           __va(x)
 #define __boot_pa(x)           __pa(x)
@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  * virt_to_page(kaddr) returns a valid pointer if and only if
  * virt_addr_valid(kaddr) returns true.
  */
-#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 extern bool __virt_addr_valid(unsigned long kaddr);
 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+#define virt_to_page(kaddr)    \
+       ({ \
+               const void *__kaddr = (const void *)(kaddr); \
+               BUG_ON(!virt_addr_valid(__kaddr)); \
+               pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
+       })
+#else
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #include <asm-generic/memory_model.h>
index b3bebf9e57466985cfcf6a54dd5d4410dd5417c8..e1f5d956f051eb6ec3dc44389dcf98136eedfb3d 100644 (file)
@@ -7,9 +7,9 @@
 
 /* duplicated to the one in bootmem.h */
 extern unsigned long max_pfn;
-extern unsigned long phys_base;
+extern const unsigned long phys_base;
 
-static inline unsigned long __phys_addr_nodebug(unsigned long x)
+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
 {
        unsigned long y = x - __START_KERNEL_map;
 
index 32444ae939ca90494e4cf913eb9ea8be1113f6a4..1a1624bba39851c664974f65b994db535009cd48 100644 (file)
@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
        return (pmd_t) { ret };
 }
 
-static inline pmdval_t pmd_val(pmd_t pmd)
+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
 {
        pmdval_t ret;
 
@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
                            val);
 }
 
+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
+{
+       pgdval_t val = native_pgd_val(pgd);
+
+       if (sizeof(pgdval_t) > sizeof(long))
+               PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
+                           val, (u64)val >> 32);
+       else
+               PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
+                           val);
+}
+
 static inline void pgd_clear(pgd_t *pgdp)
 {
        set_pgd(pgdp, __pgd(0));
@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
        pv_mmu_ops.set_fixmap(idx, phys, flags);
 }
 
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long pax_open_kernel(void)
+{
+       return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
+}
+
+static inline unsigned long pax_close_kernel(void)
+{
+       return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
+}
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
@@ -906,7 +933,7 @@ extern void default_banner(void);
 
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
-#define PARA_INDIRECT(addr)    *%cs:addr
+#define PARA_INDIRECT(addr)    *%ss:addr
 #endif
 
 #define INTERRUPT_RETURN                                               \
@@ -981,6 +1008,21 @@ extern void default_banner(void);
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
                  CLBR_NONE,                                            \
                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
+
+#define GET_CR0_INTO_RDI                               \
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+       mov %rax,%rdi
+
+#define SET_RDI_INTO_CR0                               \
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
+
+#define GET_CR3_INTO_RDI                               \
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
+       mov %rax,%rdi
+
+#define SET_RDI_INTO_CR3                               \
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
+
 #endif /* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
index 7549b8b369e47cf540d5dbb747b870ab42eb5b6a..f0edfda6f1ccc31c6ef8ebceae6fa3288c9e07db 100644 (file)
@@ -84,7 +84,7 @@ struct pv_init_ops {
         */
        unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
                          unsigned long addr, unsigned len);
-};
+} __no_const __no_randomize_layout;
 
 
 struct pv_lazy_ops {
@@ -92,13 +92,13 @@ struct pv_lazy_ops {
        void (*enter)(void);
        void (*leave)(void);
        void (*flush)(void);
-};
+} __no_randomize_layout;
 
 struct pv_time_ops {
        unsigned long long (*sched_clock)(void);
        unsigned long long (*steal_clock)(int cpu);
        unsigned long (*get_tsc_khz)(void);
-};
+} __no_const __no_randomize_layout;
 
 struct pv_cpu_ops {
        /* hooks for various privileged instructions */
@@ -192,7 +192,7 @@ struct pv_cpu_ops {
 
        void (*start_context_switch)(struct task_struct *prev);
        void (*end_context_switch)(struct task_struct *next);
-};
+} __no_const __no_randomize_layout;
 
 struct pv_irq_ops {
        /*
@@ -215,7 +215,7 @@ struct pv_irq_ops {
 #ifdef CONFIG_X86_64
        void (*adjust_exception_frame)(void);
 #endif
-};
+} __no_randomize_layout;
 
 struct pv_apic_ops {
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -223,7 +223,7 @@ struct pv_apic_ops {
                                 unsigned long start_eip,
                                 unsigned long start_esp);
 #endif
-};
+} __no_const __no_randomize_layout;
 
 struct pv_mmu_ops {
        unsigned long (*read_cr2)(void);
@@ -313,6 +313,7 @@ struct pv_mmu_ops {
        struct paravirt_callee_save make_pud;
 
        void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+       void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
 #endif /* PAGETABLE_LEVELS == 4 */
 #endif /* PAGETABLE_LEVELS >= 3 */
 
@@ -324,7 +325,13 @@ struct pv_mmu_ops {
           an mfn.  We can tell which is which from the index. */
        void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
                           phys_addr_t phys, pgprot_t flags);
-};
+
+#ifdef CONFIG_PAX_KERNEXEC
+       unsigned long (*pax_open_kernel)(void);
+       unsigned long (*pax_close_kernel)(void);
+#endif
+
+} __no_randomize_layout;
 
 struct arch_spinlock;
 #ifdef CONFIG_SMP
@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
 struct pv_lock_ops {
        struct paravirt_callee_save lock_spinning;
        void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
-};
+} __no_randomize_layout;
 
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
- * what to patch. */
+ * what to patch.
+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
+ */
+
 struct paravirt_patch_template {
        struct pv_init_ops pv_init_ops;
        struct pv_time_ops pv_time_ops;
@@ -349,7 +359,7 @@ struct paravirt_patch_template {
        struct pv_apic_ops pv_apic_ops;
        struct pv_mmu_ops pv_mmu_ops;
        struct pv_lock_ops pv_lock_ops;
-};
+} __no_randomize_layout;
 
 extern struct pv_info pv_info;
 extern struct pv_init_ops pv_init_ops;
index c4412e972bbd4a876d969ff7b3991bd54d1e942c..90e88c59580222b3b1eb65e3f5f168b354aecd90 100644 (file)
@@ -61,6 +61,13 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
 
 static inline void pmd_populate_kernel(struct mm_struct *mm,
                                       pmd_t *pmd, pte_t *pte)
+{
+       paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+       set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+}
+
+static inline void pmd_populate_user(struct mm_struct *mm,
+                                      pmd_t *pmd, pte_t *pte)
 {
        paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
        set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 
 #ifdef CONFIG_X86_PAE
 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+       pud_populate(mm, pudp, pmd);
+}
 #else  /* !CONFIG_X86_PAE */
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
        set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
 }
+
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+       set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
+}
 #endif /* CONFIG_X86_PAE */
 
 #if PAGETABLE_LEVELS > 3
@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
        set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
 }
 
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
+       set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
+}
+
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
index 206a87fdd22dacf37ea5549c802aac75fcfca72c..1623b063f2bb97202374bd96ff886c16ca8c7842 100644 (file)
@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+       pax_open_kernel();
        *pmdp = pmd;
+       pax_close_kernel();
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
index 81bb91b49a88fa22b2c07fae620e76d674692419..9392125659e4953e5f5d1eb11583ed34783a695a 100644 (file)
@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+       pax_open_kernel();
        set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
+       pax_close_kernel();
 }
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
+       pax_open_kernel();
        set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
+       pax_close_kernel();
 }
 
 /*
index e8a5454acc9922437a5335503e08ac9246ccaf38..15393593f0221d32a8e53c5be51d8aead4fdb793 100644 (file)
@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 
 #ifndef __PAGETABLE_PUD_FOLDED
 #define set_pgd(pgdp, pgd)             native_set_pgd(pgdp, pgd)
+#define set_pgd_batched(pgdp, pgd)     native_set_pgd_batched(pgdp, pgd)
 #define pgd_clear(pgd)                 native_pgd_clear(pgd)
 #endif
 
@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
 
 #define arch_end_context_switch(prev)  do {} while(0)
 
+#define pax_open_kernel()      native_pax_open_kernel()
+#define pax_close_kernel()     native_pax_close_kernel()
 #endif /* CONFIG_PARAVIRT */
 
+#define  __HAVE_ARCH_PAX_OPEN_KERNEL
+#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
+
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long native_pax_open_kernel(void)
+{
+       unsigned long cr0;
+
+       preempt_disable();
+       barrier();
+       cr0 = read_cr0() ^ X86_CR0_WP;
+       BUG_ON(cr0 & X86_CR0_WP);
+       write_cr0(cr0);
+       barrier();
+       return cr0 ^ X86_CR0_WP;
+}
+
+static inline unsigned long native_pax_close_kernel(void)
+{
+       unsigned long cr0;
+
+       barrier();
+       cr0 = read_cr0() ^ X86_CR0_WP;
+       BUG_ON(!(cr0 & X86_CR0_WP));
+       write_cr0(cr0);
+       barrier();
+       preempt_enable_no_resched();
+       return cr0 ^ X86_CR0_WP;
+}
+#else
+static inline unsigned long native_pax_open_kernel(void) { return 0; }
+static inline unsigned long native_pax_close_kernel(void) { return 0; }
+#endif
+
 /*
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
+static inline int pte_user(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_USER;
+}
+
 static inline int pte_dirty(pte_t pte)
 {
        return pte_flags(pte) & _PAGE_DIRTY;
@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
        return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+       return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
 #define pte_page(pte)  pfn_to_page(pte_pfn(pte))
 
 static inline int pmd_large(pmd_t pte)
@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
        return pte_clear_flags(pte, _PAGE_RW);
 }
 
+static inline pte_t pte_mkread(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_USER);
+}
+
 static inline pte_t pte_mkexec(pte_t pte)
 {
-       return pte_clear_flags(pte, _PAGE_NX);
+#ifdef CONFIG_X86_PAE
+       if (__supported_pte_mask & _PAGE_NX)
+               return pte_clear_flags(pte, _PAGE_NX);
+       else
+#endif
+               return pte_set_flags(pte, _PAGE_USER);
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+#ifdef CONFIG_X86_PAE
+       if (__supported_pte_mask & _PAGE_NX)
+               return pte_set_flags(pte, _PAGE_NX);
+       else
+#endif
+               return pte_clear_flags(pte, _PAGE_USER);
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
 #endif
 
 #ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
+enum cpu_pgd_type {kernel = 0, user = 1};
+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
+{
+       return cpu_pgd[cpu][type];
+}
+#endif
+
 #include <linux/mm_types.h>
 #include <linux/mmdebug.h>
 #include <linux/log2.h>
@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pud_page(pud)          pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+#define pud_page(pud)          pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /* Find an entry in the second-level page table.. */
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pgd_page(pgd)          pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd)          pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /* to find an entry in a page-table-directory. */
 static inline unsigned long pud_index(unsigned long address)
@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
 
 static inline int pgd_bad(pgd_t pgd)
 {
-       return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+       return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
 }
 
 static inline int pgd_none(pgd_t pgd)
@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
  * pgd_offset() returns a (pgd_t *)
  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
+#endif
+
 /*
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
 #define KERNEL_PGD_BOUNDARY    pgd_index(PAGE_OFFSET)
 #define KERNEL_PGD_PTRS                (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 
+#ifdef CONFIG_X86_32
+#define USER_PGD_PTRS          KERNEL_PGD_BOUNDARY
+#else
+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
+#define USER_PGD_PTRS          (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#ifdef __ASSEMBLY__
+#define pax_user_shadow_base   pax_user_shadow_base(%rip)
+#else
+extern unsigned long pax_user_shadow_base;
+extern pgdval_t clone_pgd_mask;
+#endif
+#endif
+
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern int direct_gbpages;
@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  * dst and src can be on the same page, but the range must not overlap,
  * and must not cross a page boundary.
  */
-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
 {
-       memcpy(dst, src, count * sizeof(pgd_t));
+       pax_open_kernel();
+       while (count--)
+               *dst++ = *src++;
+       pax_close_kernel();
 }
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
+#else
+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
+#endif
+
 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
 static inline int page_level_shift(enum pg_level level)
 {
index b6c0b404898a71330d8c1cd923bd268dd2a64adc..3535d47518e667b264abe762078a74139c8d7f3a 100644 (file)
@@ -25,9 +25,6 @@
 struct mm_struct;
 struct vm_area_struct;
 
-extern pgd_t swapper_pg_dir[1024];
-extern pgd_t initial_page_table[1024];
-
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
 void paging_init(void);
@@ -45,6 +42,12 @@ void paging_init(void);
 # include <asm/pgtable-2level.h>
 #endif
 
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t initial_page_table[PTRS_PER_PGD];
+#ifdef CONFIG_X86_PAE
+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
+#endif
+
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)kmap_atomic(pmd_page(*(dir))) +               \
@@ -59,12 +62,17 @@ void paging_init(void);
 /* Clear a kernel PTE and flush it from the TLB */
 #define kpte_clear_flush(ptep, vaddr)          \
 do {                                           \
+       pax_open_kernel();                      \
        pte_clear(&init_mm, (vaddr), (ptep));   \
+       pax_close_kernel();                     \
        __flush_tlb_one((vaddr));               \
 } while (0)
 
 #endif /* !__ASSEMBLY__ */
 
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
 /*
  * kern_addr_valid() is (1) for FLATMEM and (0) for
  * SPARSEMEM and DISCONTIGMEM
index 9fb2f2bc8245b5f24ececb2c97e4227e5abe8f5f..b04b4bf9019d161f80f58d1cc6a93d5c680441e5 100644 (file)
@@ -8,7 +8,7 @@
  */
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level_types.h>
-# define PMD_SIZE      (1UL << PMD_SHIFT)
+# define PMD_SIZE      (_AC(1, UL) << PMD_SHIFT)
 # define PMD_MASK      (~(PMD_SIZE - 1))
 #else
 # include <asm/pgtable-2level_types.h>
@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
 # define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
 #endif
 
+#ifdef CONFIG_PAX_KERNEXEC
+#ifndef __ASSEMBLY__
+extern unsigned char MODULES_EXEC_VADDR[];
+extern unsigned char MODULES_EXEC_END[];
+#endif
+#include <asm/boot.h>
+#define ktla_ktva(addr)                (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
+#define ktva_ktla(addr)                (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
+#else
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+#endif
+
 #define MODULES_VADDR  VMALLOC_START
 #define MODULES_END    VMALLOC_END
 #define MODULES_LEN    (MODULES_VADDR - MODULES_END)
index 4572b2f302379ea04da04d99ab5933b49b6ae063..44301130c92cde2f740322d6cada5e4e0fbd9f02 100644 (file)
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
+extern pud_t level3_vmalloc_start_pgt[512];
+extern pud_t level3_vmalloc_end_pgt[512];
+extern pud_t level3_vmemmap_pgt[512];
+extern pud_t level2_vmemmap_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
-extern pmd_t level2_ident_pgt[512];
+extern pmd_t level2_ident_pgt[512*2];
 extern pte_t level1_fixmap_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pte_t level1_vsyscall_pgt[512];
+extern pgd_t init_level4_pgt[512];
 
 #define swapper_pg_dir init_level4_pgt
 
@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+       pax_open_kernel();
        *pmdp = pmd;
+       pax_close_kernel();
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
+       pax_open_kernel();
        *pudp = pud;
+       pax_close_kernel();
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -107,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
 }
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       pax_open_kernel();
+       *pgdp = pgd;
+       pax_close_kernel();
+}
+
+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
 {
        *pgdp = pgd;
 }
index 602b6028c5b6e30c96d57031b85ec2f04793abd0..acb53edab06e2696c4bf0a51d99dea79c11d9ee6 100644 (file)
@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
 #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
 #define MODULES_END      _AC(0xffffffffff000000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
+#define MODULES_EXEC_VADDR MODULES_VADDR
+#define MODULES_EXEC_END MODULES_END
 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
 #define EFI_VA_START    ( -4 * (_AC(1, UL) << 30))
 #define EFI_VA_END      (-68 * (_AC(1, UL) << 30))
 
+#define ktla_ktva(addr)                (addr)
+#define ktva_ktla(addr)                (addr)
+
 #define EARLY_DYNAMIC_PAGE_TABLES      64
 
 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
index 25bcd4a89517af4839c639741c04c6cdfc8640fb..bf3f815ba38a6a7e74e503f78fe44ebbecc7cc5d 100644 (file)
 
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
-#else
+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
 #define _PAGE_NX       (_AT(pteval_t, 0))
+#else
+#define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
 #endif
 
 #define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
@@ -167,6 +169,9 @@ enum page_cache_mode {
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
                                         _PAGE_ACCESSED)
 
+#define PAGE_READONLY_NOEXEC PAGE_READONLY
+#define PAGE_SHARED_NOEXEC PAGE_SHARED
+
 #define __PAGE_KERNEL_EXEC                                             \
        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
 #define __PAGE_KERNEL          (__PAGE_KERNEL_EXEC | _PAGE_NX)
@@ -174,7 +179,7 @@ enum page_cache_mode {
 #define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
 #define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 #define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_NOCACHE)
-#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
+#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR             (__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -220,7 +225,7 @@ enum page_cache_mode {
 #ifdef CONFIG_X86_64
 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
 #else
-#define PTE_IDENT_ATTR  0x003          /* PRESENT+RW */
+#define PTE_IDENT_ATTR  0x063          /* PRESENT+RW+DIRTY+ACCESSED */
 #define PDE_IDENT_ATTR  0x063          /* PRESENT+RW+DIRTY+ACCESSED */
 #define PGD_IDENT_ATTR  0x001          /* PRESENT (no other attributes) */
 #endif
@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
 {
        return native_pgd_val(pgd) & PTE_FLAGS_MASK;
 }
+#endif
 
+#if PAGETABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+
+#if PAGETABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#endif
+
+#ifndef __ASSEMBLY__
 #if PAGETABLE_LEVELS > 3
 typedef struct { pudval_t pud; } pud_t;
 
@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
        return pud.pud;
 }
 #else
-#include <asm-generic/pgtable-nopud.h>
-
 static inline pudval_t native_pud_val(pud_t pud)
 {
        return native_pgd_val(pud.pgd);
@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
        return pmd.pmd;
 }
 #else
-#include <asm-generic/pgtable-nopmd.h>
-
 static inline pmdval_t native_pmd_val(pmd_t pmd)
 {
        return native_pgd_val(pmd.pud.pgd);
@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
 
 extern pteval_t __supported_pte_mask;
 extern void set_nx(void);
-extern int nx_enabled;
 
 #define pgprot_writecombine    pgprot_writecombine
 extern pgprot_t pgprot_writecombine(pgprot_t prot);
index 8f32718425339f426778bbe09e2c468e5ff63814..368fb29d93c960a38bce5be45e898998d85d590f 100644 (file)
@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
  */
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-       GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+       GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
 }
 
 /*
index a092a0cce0b759a20ce906efc03e3cb5e8d1d2af..8e9640b4e78225272d41464273fa7d2f616c4c33 100644 (file)
@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
-};
+} __randomize_layout;
 
 #define X86_VENDOR_INTEL       0
 #define X86_VENDOR_CYRIX       1
@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
            : "memory");
 }
 
+/* invpcid (%rdx),%rax */
+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
+
+#define INVPCID_SINGLE_ADDRESS 0UL
+#define INVPCID_SINGLE_CONTEXT 1UL
+#define INVPCID_ALL_GLOBAL     2UL
+#define INVPCID_ALL_NONGLOBAL  3UL
+
+#define PCID_KERNEL            0UL
+#define PCID_USER              1UL
+#define PCID_NOFLUSH           (1UL << 63)
+
 static inline void load_cr3(pgd_t *pgdir)
 {
-       write_cr3(__pa(pgdir));
+       write_cr3(__pa(pgdir) | PCID_KERNEL);
 }
 
 #ifdef CONFIG_X86_32
@@ -282,7 +294,7 @@ struct tss_struct {
 
 } ____cacheline_aligned;
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
+extern struct tss_struct init_tss[NR_CPUS];
 
 /*
  * Save the original ist values for checking stack pointers during debugging
@@ -479,6 +491,7 @@ struct thread_struct {
        unsigned short          ds;
        unsigned short          fsindex;
        unsigned short          gsindex;
+       unsigned short          ss;
 #endif
 #ifdef CONFIG_X86_32
        unsigned long           ip;
@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
 extern unsigned long mmu_cr4_features;
 extern u32 *trampoline_cr4_features;
 
-static inline void set_in_cr4(unsigned long mask)
-{
-       unsigned long cr4;
-
-       mmu_cr4_features |= mask;
-       if (trampoline_cr4_features)
-               *trampoline_cr4_features = mmu_cr4_features;
-       cr4 = read_cr4();
-       cr4 |= mask;
-       write_cr4(cr4);
-}
-
-static inline void clear_in_cr4(unsigned long mask)
-{
-       unsigned long cr4;
-
-       mmu_cr4_features &= ~mask;
-       if (trampoline_cr4_features)
-               *trampoline_cr4_features = mmu_cr4_features;
-       cr4 = read_cr4();
-       cr4 &= ~mask;
-       write_cr4(cr4);
-}
+extern void set_in_cr4(unsigned long mask);
+extern void clear_in_cr4(unsigned long mask);
 
 typedef struct {
        unsigned long           seg;
@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
  */
 #define TASK_SIZE              PAGE_OFFSET
 #define TASK_SIZE_MAX          TASK_SIZE
+
+#ifdef CONFIG_PAX_SEGMEXEC
+#define SEGMEXEC_TASK_SIZE     (TASK_SIZE / 2)
+#define STACK_TOP              ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
+#else
 #define STACK_TOP              TASK_SIZE
-#define STACK_TOP_MAX          STACK_TOP
+#endif
+
+#define STACK_TOP_MAX          TASK_SIZE
 
 #define INIT_THREAD  {                                                   \
-       .sp0                    = sizeof(init_stack) + (long)&init_stack, \
+       .sp0                    = sizeof(init_stack) + (long)&init_stack - 8, \
        .vm86_info              = NULL,                                   \
        .sysenter_cs            = __KERNEL_CS,                            \
        .io_bitmap_ptr          = NULL,                                   \
@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
  */
 #define INIT_TSS  {                                                      \
        .x86_tss = {                                                      \
-               .sp0            = sizeof(init_stack) + (long)&init_stack, \
+               .sp0            = sizeof(init_stack) + (long)&init_stack - 8, \
                .ss0            = __KERNEL_DS,                            \
                .ss1            = __KERNEL_CS,                            \
                .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info)                                                 \
-({                                                                     \
-       unsigned long *__ptr = (unsigned long *)(info);                 \
-       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
-})
+#define KSTK_TOP(info)         ((container_of(info, struct task_struct, tinfo))->thread.sp0)
 
 /*
  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 #define task_pt_regs(task)                                             \
 ({                                                                     \
        struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
+       __regs__ = (struct pt_regs *)((task)->thread.sp0);              \
        __regs__ - 1;                                                   \
 })
 
@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  * particular problem by preventing anything from being mapped
  * at the maximum canonical address.
  */
-#define TASK_SIZE_MAX  ((1UL << 47) - PAGE_SIZE)
+#define TASK_SIZE_MAX  ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
 #define IA32_PAGE_OFFSET       ((current->personality & ADDR_LIMIT_3GB) ? \
-                                       0xc0000000 : 0xFFFFe000)
+                                       0xc0000000 : 0xFFFFf000)
 
 #define TASK_SIZE              (test_thread_flag(TIF_ADDR32) ? \
                                        IA32_PAGE_OFFSET : TASK_SIZE_MAX)
@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 #define STACK_TOP_MAX          TASK_SIZE_MAX
 
 #define INIT_THREAD  { \
-       .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+       .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
 }
 
 #define INIT_TSS  { \
-       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
 }
 
 /*
@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  */
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
 
+#ifdef CONFIG_PAX_SEGMEXEC
+#define SEGMEXEC_TASK_UNMAPPED_BASE    (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
+#endif
+
 #define KSTK_EIP(task)         (task_pt_regs(task)->ip)
 
 /* Get/set a process' ability to use the timestamp counter instruction */
@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
        return 0;
 }
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
 #define xen_set_default_idle 0
 #endif
 
-void stop_this_cpu(void *dummy);
+void stop_this_cpu(void *dummy) __noreturn;
 void df_debug(struct pt_regs *regs, long error_code);
 #endif /* _ASM_X86_PROCESSOR_H */
index 86fc2bb82287a687bd8ca0e976e8d32761678480..bd5049a2c54939dd2a7dd603e79924fa18b6f368 100644 (file)
@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
 }
 
 /*
- * user_mode_vm(regs) determines whether a register set came from user mode.
+ * user_mode(regs) determines whether a register set came from user mode.
  * This is true if V8086 mode was enabled OR if the register set was from
  * protected mode with RPL-3 CS value.  This tricky test checks that with
  * one comparison.  Many places in the kernel can bypass this full check
- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
+ * be used.
  */
-static inline int user_mode(struct pt_regs *regs)
+static inline int user_mode_novm(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
        return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
 #else
-       return !!(regs->cs & 3);
+       return !!(regs->cs & SEGMENT_RPL_MASK);
 #endif
 }
 
-static inline int user_mode_vm(struct pt_regs *regs)
+static inline int user_mode(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
        return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
                USER_RPL;
 #else
-       return user_mode(regs);
+       return user_mode_novm(regs);
 #endif
 }
 
@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
 #ifdef CONFIG_X86_64
 static inline bool user_64bit_mode(struct pt_regs *regs)
 {
+       unsigned long cs = regs->cs & 0xffff;
 #ifndef CONFIG_PARAVIRT
        /*
         * On non-paravirt systems, this is the only long mode CPL 3
         * selector.  We do not allow long mode selectors in the LDT.
         */
-       return regs->cs == __USER_CS;
+       return cs == __USER_CS;
 #else
        /* Headers are too twisted for this to go in paravirt.h. */
-       return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
+       return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
 #endif
 }
 
@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
         * Traps from the kernel do not save sp and ss.
         * Use the helper function to retrieve sp.
         */
-       if (offset == offsetof(struct pt_regs, sp) &&
-           regs->cs == __KERNEL_CS)
-               return kernel_stack_pointer(regs);
+       if (offset == offsetof(struct pt_regs, sp)) {
+               unsigned long cs = regs->cs & 0xffff;
+               if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
+                       return kernel_stack_pointer(regs);
+       }
 #endif
        return *(unsigned long *)((unsigned long)regs + offset);
 }
index ae0e241e228b809a054ff9c7699ad4483f581428..e80b10b9ed943be5dd1196b0a99467cfdbdf6fdb 100644 (file)
@@ -7,8 +7,8 @@
 #define queue_write_unlock queue_write_unlock
 static inline void queue_write_unlock(struct qrwlock *lock)
 {
-        barrier();
-        ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
+       barrier();
+       ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
 }
 #endif
 
index 9c6b890d5e7a0733ed7e92f1d3f1b1aa81bd349f..5305f53d4e1dd27787fde70f742706554ef49083 100644 (file)
@@ -22,16 +22,14 @@ struct real_mode_header {
 #endif
        /* APM/BIOS reboot */
        u32     machine_real_restart_asm;
-#ifdef CONFIG_X86_64
        u32     machine_real_restart_seg;
-#endif
 };
 
 /* This must match data at trampoline_32/64.S */
 struct trampoline_header {
 #ifdef CONFIG_X86_32
        u32 start;
-       u16 gdt_pad;
+       u16 boot_cs;
        u16 gdt_limit;
        u32 gdt_base;
 #else
index a82c4f1b4d83e96daf1d99126a290c45ffa38a6c..ac4505322b0c81dd2867bc26da02aeea2b2431e7 100644 (file)
@@ -6,13 +6,13 @@
 struct pt_regs;
 
 struct machine_ops {
-       void (*restart)(char *cmd);
-       void (*halt)(void);
-       void (*power_off)(void);
+       void (* __noreturn restart)(char *cmd);
+       void (* __noreturn halt)(void);
+       void (* __noreturn power_off)(void);
        void (*shutdown)(void);
        void (*crash_shutdown)(struct pt_regs *);
-       void (*emergency_restart)(void);
-};
+       void (* __noreturn emergency_restart)(void);
+} __no_const;
 
 extern struct machine_ops machine_ops;
 
index 8f7866a5b9a41df67475892353979c005ce8c09b..e442f20f5aa320bc8e8124c535abb094ab8d2b36 100644 (file)
@@ -3,7 +3,34 @@
 
 #ifdef CC_HAVE_ASM_GOTO
 
-#define __GEN_RMWcc(fullop, var, cc, ...)                              \
+#ifdef CONFIG_PAX_REFCOUNT
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                  \
+do {                                                                   \
+       asm_volatile_goto (fullop                                       \
+                       ";jno 0f\n"                                     \
+                       fullantiop                                      \
+                       ";int $4\n0:\n"                                 \
+                       _ASM_EXTABLE(0b, 0b)                            \
+                        ";j" cc " %l[cc_label]"                        \
+                       : : "m" (var), ## __VA_ARGS__                   \
+                       : "memory" : cc_label);                         \
+       return 0;                                                       \
+cc_label:                                                              \
+       return 1;                                                       \
+} while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                  \
+do {                                                                   \
+       asm_volatile_goto (fullop ";j" cc " %l[cc_label]"               \
+                       : : "m" (var), ## __VA_ARGS__                   \
+                       : "memory" : cc_label);                         \
+       return 0;                                                       \
+cc_label:                                                              \
+       return 1;                                                       \
+} while (0)
+#endif
+
+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)                    \
 do {                                                                   \
        asm_volatile_goto (fullop "; j" cc " %l[cc_label]"              \
                        : : "m" (var), ## __VA_ARGS__                   \
@@ -13,15 +40,46 @@ cc_label:                                                           \
        return 1;                                                       \
 } while (0)
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
-       __GEN_RMWcc(op " " arg0, var, cc)
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)                     \
+       __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+
+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc)                   \
+       __GEN_RMWcc_unchecked(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)         \
+       __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
 
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
-       __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)       \
+       __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
 
 #else /* !CC_HAVE_ASM_GOTO */
 
-#define __GEN_RMWcc(fullop, var, cc, ...)                              \
+#ifdef CONFIG_PAX_REFCOUNT
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                  \
+do {                                                                   \
+       char c;                                                         \
+       asm volatile (fullop                                            \
+                       ";jno 0f\n"                                     \
+                       fullantiop                                      \
+                       ";int $4\n0:\n"                                 \
+                       _ASM_EXTABLE(0b, 0b)                            \
+                       "; set" cc " %1"                                \
+                       : "+m" (var), "=qm" (c)                         \
+                       : __VA_ARGS__ : "memory");                      \
+       return c != 0;                                                  \
+} while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)                  \
+do {                                                                   \
+       char c;                                                         \
+       asm volatile (fullop "; set" cc " %1"                           \
+                       : "+m" (var), "=qm" (c)                         \
+                       : __VA_ARGS__ : "memory");                      \
+       return c != 0;                                                  \
+} while (0)
+#endif
+
+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)                    \
 do {                                                                   \
        char c;                                                         \
        asm volatile (fullop "; set" cc " %1"                           \
@@ -30,11 +88,17 @@ do {                                                                        \
        return c != 0;                                                  \
 } while (0)
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
-       __GEN_RMWcc(op " " arg0, var, cc)
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)                     \
+       __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+
+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc)                   \
+       __GEN_RMWcc_unchecked(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)         \
+       __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
 
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
-       __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)       \
+       __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
 
 #endif /* CC_HAVE_ASM_GOTO */
 
index cad82c9c2fdef9cb157285379f3aaa4d8532a222..2e5c5c1de1ed58eeff9e8d496aaf4b3c1db66bf1 100644 (file)
@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
 {
        asm volatile("# beginning down_read\n\t"
                     LOCK_PREFIX _ASM_INC "(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX _ASM_DEC "(%1)\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     /* adds 0x00000001 */
                     "  jns        1f\n"
                     "  call call_rwsem_down_read_failed\n"
@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                     "1:\n\t"
                     "  mov          %1,%2\n\t"
                     "  add          %3,%2\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    "sub %3,%2\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     "  jle          2f\n\t"
                     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
                     "  jnz          1b\n\t"
@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
        long tmp;
        asm volatile("# beginning down_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    "mov %1,(%2)\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     /* adds 0xffff0001, returns the old value */
                     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
                     /* was the active mask 0 before? */
@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
        long tmp;
        asm volatile("# beginning __up_read\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    "mov %1,(%2)\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     /* subtracts 1, returns the old value */
                     "  jns        1f\n\t"
                     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
        long tmp;
        asm volatile("# beginning __up_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    "mov %1,(%2)\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     /* subtracts 0xffff0001, returns the old value */
                     "  jns        1f\n\t"
                     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 {
        asm volatile("# beginning __downgrade_write\n\t"
                     LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     /*
                      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
                      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
  */
 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
-       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
+       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+                    "jno 0f\n"
+                    LOCK_PREFIX _ASM_SUB "%1,%0\n"
+                    "int $4\n0:\n"
+                    _ASM_EXTABLE(0b, 0b)
+#endif
+
                     : "+m" (sem->count)
                     : "er" (delta));
 }
@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  */
 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       return delta + xadd(&sem->count, delta);
+       return delta + xadd_check_overflow(&sem->count, delta);
 }
 
 #endif /* __KERNEL__ */
index db257a58571f0b47429e2dcc70cf628f1a968fe4..b91bc77ee43c193679c58150bdd6fb5f2a3556f1 100644 (file)
  *  26 - ESPFIX small SS
  *  27 - per-cpu                       [ offset to per-cpu data area ]
  *  28 - stack_canary-20               [ for stack protector ]
- *  29 - unused
- *  30 - unused
+ *  29 - PCI BIOS CS
+ *  30 - PCI BIOS DS
  *  31 - TSS for double fault handler
  */
+#define GDT_ENTRY_KERNEXEC_EFI_CS      (1)
+#define GDT_ENTRY_KERNEXEC_EFI_DS      (2)
+#define __KERNEXEC_EFI_CS      (GDT_ENTRY_KERNEXEC_EFI_CS*8)
+#define __KERNEXEC_EFI_DS      (GDT_ENTRY_KERNEXEC_EFI_DS*8)
+
 #define GDT_ENTRY_TLS_MIN      6
 #define GDT_ENTRY_TLS_MAX      (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
 
@@ -88,6 +93,8 @@
 
 #define GDT_ENTRY_KERNEL_CS            (GDT_ENTRY_KERNEL_BASE+0)
 
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS   (4)
+
 #define GDT_ENTRY_KERNEL_DS            (GDT_ENTRY_KERNEL_BASE+1)
 
 #define GDT_ENTRY_TSS                  (GDT_ENTRY_KERNEL_BASE+4)
 #define __KERNEL_STACK_CANARY          0
 #endif
 
+#define GDT_ENTRY_PCIBIOS_CS           (GDT_ENTRY_KERNEL_BASE+17)
+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
+
+#define GDT_ENTRY_PCIBIOS_DS           (GDT_ENTRY_KERNEL_BASE+18)
+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS      31
 
 /*
  */
 
 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
-#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
 
 
 #else
 #define __USER32_CS   (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
 #define __USER32_DS    __USER_DS
 
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
+
 #define GDT_ENTRY_TSS 8        /* needs two entries */
 #define GDT_ENTRY_LDT 10 /* needs two entries */
 #define GDT_ENTRY_TLS_MIN 12
 #define GDT_ENTRY_PER_CPU 15   /* Abused to load per CPU data from limit */
 #define __PER_CPU_SEG  (GDT_ENTRY_PER_CPU * 8 + 3)
 
+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
+
 /* TLS indexes for 64bit - hardcoded in arch_prctl */
 #define FS_TLS 0
 #define GS_TLS 1
 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
 
-#define GDT_ENTRIES 16
+#define GDT_ENTRIES 17
 
 #endif
 
 #define __KERNEL_CS    (GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEXEC_KERNEL_CS   (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
 #define __KERNEL_DS    (GDT_ENTRY_KERNEL_DS*8)
+#define __UDEREF_KERNEL_DS     (GDT_ENTRY_UDEREF_KERNEL_DS*8)
 #define __USER_DS      (GDT_ENTRY_DEFAULT_USER_DS*8+3)
 #define __USER_CS      (GDT_ENTRY_DEFAULT_USER_CS*8+3)
 #ifndef CONFIG_PARAVIRT
@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
        asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
-       return __limit + 1;
+       return __limit;
 }
 
 #endif /* !__ASSEMBLY__ */
index 8d3120f4e27053b3fae6d6334fe7a3a6dee33e82..352b44012c3e87769a81623aa7ba3379723305c7 100644 (file)
 
 #include <asm/alternative-asm.h>
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define ASM_PAX_OPEN_USERLAND                                  \
+       661: jmp 663f;                                          \
+       .pushsection .altinstr_replacement, "a" ;               \
+       662: pushq %rax; nop;                                   \
+       .popsection ;                                           \
+       .pushsection .altinstructions, "a" ;                    \
+       altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
+       .popsection ;                                           \
+       call __pax_open_userland;                               \
+       popq %rax;                                              \
+       663:
+
+#define ASM_PAX_CLOSE_USERLAND                                 \
+       661: jmp 663f;                                          \
+       .pushsection .altinstr_replacement, "a" ;               \
+       662: pushq %rax; nop;                                   \
+       .popsection;                                            \
+       .pushsection .altinstructions, "a" ;                    \
+       altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
+       .popsection;                                            \
+       call __pax_close_userland;                              \
+       popq %rax;                                              \
+       663:
+#else
+#define ASM_PAX_OPEN_USERLAND
+#define ASM_PAX_CLOSE_USERLAND
+#endif
+
 #ifdef CONFIG_X86_SMAP
 
 #define ASM_CLAC                                                       \
        661: ASM_NOP3 ;                                                 \
-       .pushsection .altinstr_replacement, "ax" ;                      \
+       .pushsection .altinstr_replacement, "a" ;                       \
        662: __ASM_CLAC ;                                               \
        .popsection ;                                                   \
        .pushsection .altinstructions, "a" ;                            \
@@ -38,7 +67,7 @@
 
 #define ASM_STAC                                                       \
        661: ASM_NOP3 ;                                                 \
-       .pushsection .altinstr_replacement, "ax" ;                      \
+       .pushsection .altinstr_replacement, "a" ;                       \
        662: __ASM_STAC ;                                               \
        .popsection ;                                                   \
        .pushsection .altinstructions, "a" ;                            \
 
 #include <asm/alternative.h>
 
+#define __HAVE_ARCH_PAX_OPEN_USERLAND
+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
+
+extern void __pax_open_userland(void);
+static __always_inline unsigned long pax_open_userland(void)
+{
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
+               :
+               : [open] "i" (__pax_open_userland)
+               : "memory", "rax");
+#endif
+
+       return 0;
+}
+
+extern void __pax_close_userland(void);
+static __always_inline unsigned long pax_close_userland(void)
+{
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
+               :
+               : [close] "i" (__pax_close_userland)
+               : "memory", "rax");
+#endif
+
+       return 0;
+}
+
 #ifdef CONFIG_X86_SMAP
 
 static __always_inline void clac(void)
index 8cd1cc3bc8356ffef29e349f08b3de43aeb79506..827e09e19c1dd767665a2dcbca0e5d7f9b682231 100644 (file)
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
 /* cpus sharing the last level cache: */
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
 
 static inline struct cpumask *cpu_sibling_mask(int cpu)
 {
@@ -78,7 +78,7 @@ struct smp_ops {
 
        void (*send_call_func_ipi)(const struct cpumask *mask);
        void (*send_call_func_single_ipi)(int cpu);
-};
+} __no_const;
 
 /* Globals due to paravirt */
 extern void set_cpu_sibling_map(int cpu);
@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
 extern int safe_smp_processor_id(void);
 
 #elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
-
-#define stack_smp_processor_id()                                       \
-({                                                             \
-       struct thread_info *ti;                                         \
-       __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));      \
-       ti->cpu;                                                        \
-})
+#define raw_smp_processor_id()         (this_cpu_read(cpu_number))
+#define stack_smp_processor_id()       raw_smp_processor_id()
 #define safe_smp_processor_id()                smp_processor_id()
 
 #endif
index 6a998598f172424f198bf7e034676a8c47b2eb9b..03cb8076a4c9bed42bf3fe28ed5e589e5b45e2ad 100644 (file)
@@ -47,7 +47,7 @@
  * head_32 for boot CPU and setup_per_cpu_areas() for others.
  */
 #define GDT_STACK_CANARY_INIT                                          \
-       [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
+       [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
 
 /*
  * Initialize the stackprotector canary value.
@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
 
 static inline void load_stack_canary_segment(void)
 {
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
        asm volatile ("mov %0, %%gs" : : "r" (0));
 #endif
 }
index 70bbe39043a9cda384e9afcd0a2f633cfde20931..4ae2bd448aa47efdeb99a36da1aca9aa48ff3f5d 100644 (file)
 
 extern int kstack_depth_to_print;
 
-struct thread_info;
+struct task_struct;
 struct stacktrace_ops;
 
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
-                                     unsigned long *stack,
-                                     unsigned long bp,
-                                     const struct stacktrace_ops *ops,
-                                     void *data,
-                                     unsigned long *end,
-                                     int *graph);
-
-extern unsigned long
-print_context_stack(struct thread_info *tinfo,
-                   unsigned long *stack, unsigned long bp,
-                   const struct stacktrace_ops *ops, void *data,
-                   unsigned long *end, int *graph);
-
-extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
-                      unsigned long *stack, unsigned long bp,
-                      const struct stacktrace_ops *ops, void *data,
-                      unsigned long *end, int *graph);
+typedef unsigned long walk_stack_t(struct task_struct *task,
+                                  void *stack_start,
+                                  unsigned long *stack,
+                                  unsigned long bp,
+                                  const struct stacktrace_ops *ops,
+                                  void *data,
+                                  unsigned long *end,
+                                  int *graph);
+
+extern walk_stack_t print_context_stack;
+extern walk_stack_t print_context_stack_bp;
 
 /* Generic stack tracer with callbacks */
 
@@ -40,7 +32,7 @@ struct stacktrace_ops {
        void (*address)(void *data, unsigned long address, int reliable);
        /* On negative return stop dumping */
        int (*stack)(void *data, char *name);
-       walk_stack_t    walk_stack;
+       walk_stack_t    *walk_stack;
 };
 
 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
index 751bf4b7bf114da12231a56f4217c2583ddeafb2..a1278b5ad523accd817cc0789bc2b3be84fcdc3d 100644 (file)
@@ -112,7 +112,7 @@ do {                                                                        \
             "call __switch_to\n\t"                                       \
             "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
             __switch_canary                                              \
-            "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
+            "movq "__percpu_arg([thread_info])",%%r8\n\t"                \
             "movq %%rax,%%rdi\n\t"                                       \
             "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
             "jnz   ret_from_fork\n\t"                                    \
@@ -123,7 +123,7 @@ do {                                                                        \
               [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
               [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
               [_tif_fork] "i" (_TIF_FORK),                               \
-              [thread_info] "i" (offsetof(struct task_struct, stack)),   \
+              [thread_info] "m" (current_tinfo),                         \
               [current_task] "m" (current_task)                          \
               __switch_canary_iparam                                     \
             : "memory", "cc" __EXTRA_CLOBBER)
index 547e344a6dc60d7db27d43c74d44c783326291bb..6be1175c0ad15bebb08c5f69919182c48c4257b6 100644 (file)
@@ -24,7 +24,6 @@ struct exec_domain;
 #include <linux/atomic.h>
 
 struct thread_info {
-       struct task_struct      *task;          /* main task structure */
        struct exec_domain      *exec_domain;   /* execution domain */
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
@@ -33,13 +32,13 @@ struct thread_info {
        mm_segment_t            addr_limit;
        struct restart_block    restart_block;
        void __user             *sysenter_return;
+       unsigned long           lowest_stack;
        unsigned int            sig_on_uaccess_error:1;
        unsigned int            uaccess_err:1;  /* uaccess failed */
 };
 
-#define INIT_THREAD_INFO(tsk)                  \
+#define INIT_THREAD_INFO                       \
 {                                              \
-       .task           = &tsk,                 \
        .exec_domain    = &default_exec_domain, \
        .flags          = 0,                    \
        .cpu            = 0,                    \
@@ -50,7 +49,7 @@ struct thread_info {
        },                                      \
 }
 
-#define init_thread_info       (init_thread_union.thread_info)
+#define init_thread_info       (init_thread_union.stack)
 #define init_stack             (init_thread_union.stack)
 
 #else /* !__ASSEMBLY__ */
@@ -91,6 +90,7 @@ struct thread_info {
 #define TIF_SYSCALL_TRACEPOINT 28      /* syscall tracepoint instrumentation */
 #define TIF_ADDR32             29      /* 32-bit address space on 64 bits */
 #define TIF_X32                        30      /* 32-bit native x86-64 binary */
+#define TIF_GRSEC_SETXID       31      /* update credentials on syscall entry/exit */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
@@ -115,17 +115,18 @@ struct thread_info {
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_ADDR32            (1 << TIF_ADDR32)
 #define _TIF_X32               (1 << TIF_X32)
+#define _TIF_GRSEC_SETXID      (1 << TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_enter() */
 #define _TIF_WORK_SYSCALL_ENTRY        \
        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |   \
         _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |     \
-        _TIF_NOHZ)
+        _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT \
        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP |    \
-        _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
+        _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK                                                 \
@@ -136,7 +137,7 @@ struct thread_info {
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK                                              \
        ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |       \
-       _TIF_NOHZ)
+       _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* Only used for 64 bit */
 #define _TIF_DO_NOTIFY_MASK                                            \
@@ -151,7 +152,6 @@ struct thread_info {
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
 
 #define STACK_WARN             (THREAD_SIZE/8)
-#define KERNEL_STACK_OFFSET    (5*(BITS_PER_LONG/8))
 
 /*
  * macros/functions for gaining access to the thread information structure
@@ -162,26 +162,18 @@ struct thread_info {
 
 DECLARE_PER_CPU(unsigned long, kernel_stack);
 
+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
+
 static inline struct thread_info *current_thread_info(void)
 {
-       struct thread_info *ti;
-       ti = (void *)(this_cpu_read_stable(kernel_stack) +
-                     KERNEL_STACK_OFFSET - THREAD_SIZE);
-       return ti;
+       return this_cpu_read_stable(current_tinfo);
 }
 
 #else /* !__ASSEMBLY__ */
 
 /* how to get the thread information struct from ASM */
 #define GET_THREAD_INFO(reg) \
-       _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
-       _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
-
-/*
- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
- * a certain register (to be used in assembler memory operands).
- */
-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
+       _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
 
 #endif
 
@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
+
+#define __HAVE_THREAD_FUNCTIONS
+#define task_thread_info(task) (&(task)->tinfo)
+#define task_stack_page(task)  ((task)->stack)
+#define setup_thread_stack(p, org) do {} while (0)
+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
+
 #endif
 #endif /* _ASM_X86_THREAD_INFO_H */
index 04905bfc508b9925c697687b7c1d5754827ba417..1178cdf1600fccf80f3cfb6f1d1313d4b0be85c5 100644 (file)
 
 static inline void __native_flush_tlb(void)
 {
+       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+               u64 descriptor[2];
+
+               descriptor[0] = PCID_KERNEL;
+               asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
+               return;
+       }
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       if (static_cpu_has(X86_FEATURE_PCID)) {
+               unsigned int cpu = raw_get_cpu();
+
+               native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+               native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+               raw_put_cpu_no_resched();
+               return;
+       }
+#endif
+
        native_write_cr3(native_read_cr3());
 }
 
 static inline void __native_flush_tlb_global_irq_disabled(void)
 {
-       unsigned long cr4;
-
-       cr4 = native_read_cr4();
-       /* clear PGE */
-       native_write_cr4(cr4 & ~X86_CR4_PGE);
-       /* write old PGE again and flush TLBs */
-       native_write_cr4(cr4);
+       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+               u64 descriptor[2];
+
+               descriptor[0] = PCID_KERNEL;
+               asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
+       } else {
+               unsigned long cr4;
+
+               cr4 = native_read_cr4();
+               /* clear PGE */
+               native_write_cr4(cr4 & ~X86_CR4_PGE);
+               /* write old PGE again and flush TLBs */
+               native_write_cr4(cr4);
+       }
 }
 
 static inline void __native_flush_tlb_global(void)
@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
 
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
+       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+               u64 descriptor[2];
+
+               descriptor[0] = PCID_KERNEL;
+               descriptor[1] = addr;
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
+                       if (addr < TASK_SIZE_MAX)
+                               descriptor[1] += pax_user_shadow_base;
+                       asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
+               }
+
+               descriptor[0] = PCID_USER;
+               descriptor[1] = addr;
+#endif
+
+               asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
+               return;
+       }
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       if (static_cpu_has(X86_FEATURE_PCID)) {
+               unsigned int cpu = raw_get_cpu();
+
+               native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
+               asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+               native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+               raw_put_cpu_no_resched();
+
+               if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
+                       addr += pax_user_shadow_base;
+       }
+#endif
+
        asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 }
 
index 0d592e0a5b84fa3c3738ce8400e34df5bb552eb0..7430aad2de14448b78c86fb9098aa6cadb0ab1a3 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/compiler.h>
 #include <linux/thread_info.h>
 #include <linux/string.h>
+#include <linux/spinlock.h>
 #include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/smap.h>
 
 #define get_ds()       (KERNEL_DS)
 #define get_fs()       (current_thread_info()->addr_limit)
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+void __set_fs(mm_segment_t x);
+void set_fs(mm_segment_t x);
+#else
 #define set_fs(x)      (current_thread_info()->addr_limit = (x))
+#endif
 
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type, addr, size) \
-       likely(!__range_not_ok(addr, size, user_addr_max()))
+extern int _cond_resched(void);
+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
+#define access_ok(type, addr, size)                                    \
+({                                                                     \
+       unsigned long __size = size;                                    \
+       unsigned long __addr = (unsigned long)addr;                     \
+       bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
+       if (__ret_ao && __size) {                                       \
+               unsigned long __addr_ao = __addr & PAGE_MASK;           \
+               unsigned long __end_ao = __addr + __size - 1;           \
+               if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) {     \
+                       while (__addr_ao <= __end_ao) {                 \
+                               char __c_ao;                            \
+                               __addr_ao += PAGE_SIZE;                 \
+                               if (__size > PAGE_SIZE)                 \
+                                       _cond_resched();                \
+                               if (__get_user(__c_ao, (char __user *)__addr))  \
+                                       break;                          \
+                               if (type != VERIFY_WRITE) {             \
+                                       __addr = __addr_ao;             \
+                                       continue;                       \
+                               }                                       \
+                               if (__put_user(__c_ao, (char __user *)__addr))  \
+                                       break;                          \
+                               __addr = __addr_ao;                     \
+                       }                                               \
+               }                                                       \
+       }                                                               \
+       __ret_ao;                                                       \
+})
 
 /*
  * The exception table consists of pairs of addresses relative to the
@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
        register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
+       pax_open_userland();                                            \
        asm volatile("call __get_user_%P3"                              \
                     : "=a" (__ret_gu), "=r" (__val_gu)                 \
                     : "0" (ptr), "i" (sizeof(*(ptr))));                \
        (x) = (__typeof__(*(ptr))) __val_gu;                            \
+       pax_close_userland();                                           \
        __ret_gu;                                                       \
 })
 
@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
                     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 
-
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg "gs;"
+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
+#else
+#define __copyuser_seg
+#define __COPYUSER_SET_ES
+#define __COPYUSER_RESTORE_ES
+#endif
 
 #ifdef CONFIG_X86_32
 #define __put_user_asm_u64(x, addr, err, errret)                       \
        asm volatile(ASM_STAC "\n"                                      \
-                    "1:        movl %%eax,0(%2)\n"                     \
-                    "2:        movl %%edx,4(%2)\n"                     \
+                    "1:        "__copyuser_seg"movl %%eax,0(%2)\n"     \
+                    "2:        "__copyuser_seg"movl %%edx,4(%2)\n"     \
                     "3: " ASM_CLAC "\n"                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "4:        movl %3,%0\n"                           \
@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
 #define __put_user_asm_ex_u64(x, addr)                                 \
        asm volatile(ASM_STAC "\n"                                      \
-                    "1:        movl %%eax,0(%1)\n"                     \
-                    "2:        movl %%edx,4(%1)\n"                     \
+                    "1:        "__copyuser_seg"movl %%eax,0(%1)\n"     \
+                    "2:        "__copyuser_seg"movl %%edx,4(%1)\n"     \
                     "3: " ASM_CLAC "\n"                                \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
                     _ASM_EXTABLE_EX(2b, 3b)                            \
@@ -257,7 +301,8 @@ extern void __put_user_8(void);
        __typeof__(*(ptr)) __pu_val;                            \
        __chk_user_ptr(ptr);                                    \
        might_fault();                                          \
-       __pu_val = x;                                           \
+       __pu_val = (x);                                         \
+       pax_open_userland();                                    \
        switch (sizeof(*(ptr))) {                               \
        case 1:                                                 \
                __put_user_x(1, __pu_val, ptr, __ret_pu);       \
@@ -275,6 +320,7 @@ extern void __put_user_8(void);
                __put_user_x(X, __pu_val, ptr, __ret_pu);       \
                break;                                          \
        }                                                       \
+       pax_close_userland();                                   \
        __ret_pu;                                               \
 })
 
@@ -355,8 +401,10 @@ do {                                                                       \
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+do {                                                                   \
+       pax_open_userland();                                            \
        asm volatile(ASM_STAC "\n"                                      \
-                    "1:        mov"itype" %2,%"rtype"1\n"              \
+                    "1:        "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
                     "2: " ASM_CLAC "\n"                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
@@ -364,8 +412,10 @@ do {                                                                       \
                     "  jmp 2b\n"                                       \
                     ".previous\n"                                      \
                     _ASM_EXTABLE(1b, 3b)                               \
-                    : "=r" (err), ltype(x)                             \
-                    : "m" (__m(addr)), "i" (errret), "0" (err))
+                    : "=r" (err), ltype (x)                            \
+                    : "m" (__m(addr)), "i" (errret), "0" (err));       \
+       pax_close_userland();                                           \
+} while (0)
 
 #define __get_user_size_ex(x, ptr, size)                               \
 do {                                                                   \
@@ -389,7 +439,7 @@ do {                                                                        \
 } while (0)
 
 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
-       asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
+       asm volatile("1:        "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
                     "2:\n"                                             \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
                     : ltype(x) : "m" (__m(addr)))
@@ -406,13 +456,24 @@ do {                                                                      \
        int __gu_err;                                                   \
        unsigned long __gu_val;                                         \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__typeof__(*(ptr)))__gu_val;                             \
        __gu_err;                                                       \
 })
 
 /* FIXME: this hack is definitely wrong -AK */
 struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct __user *)(x))
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define ____m(x)                                       \
+({                                                     \
+       unsigned long ____x = (unsigned long)(x);       \
+       if (____x < pax_user_shadow_base)               \
+               ____x += pax_user_shadow_base;          \
+       (typeof(x))____x;                               \
+})
+#else
+#define ____m(x) (x)
+#endif
+#define __m(x) (*(struct __large_struct __user *)____m(x))
 
 /*
  * Tell gcc we read from memory instead of writing: this is because
@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+do {                                                                   \
+       pax_open_userland();                                            \
        asm volatile(ASM_STAC "\n"                                      \
-                    "1:        mov"itype" %"rtype"1,%2\n"              \
+                    "1:        "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
                     "2: " ASM_CLAC "\n"                                \
                     ".section .fixup,\"ax\"\n"                         \
                     "3:        mov %3,%0\n"                            \
@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
                     ".previous\n"                                      \
                     _ASM_EXTABLE(1b, 3b)                               \
                     : "=r"(err)                                        \
-                    : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+                    : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
+       pax_close_userland();                                           \
+} while (0)
 
 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                        \
-       asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
+       asm volatile("1:        "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
                     "2:\n"                                             \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
                     : : ltype(x), "m" (__m(addr)))
@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
  */
 #define uaccess_try    do {                                            \
        current_thread_info()->uaccess_err = 0;                         \
+       pax_open_userland();                                            \
        stac();                                                         \
        barrier();
 
 #define uaccess_catch(err)                                             \
        clac();                                                         \
+       pax_close_userland();                                           \
        (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
 } while (0)
 
@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
  * On error, the variable @x is set to zero.
  */
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __get_user(x, ptr)     get_user((x), (ptr))
+#else
 #define __get_user(x, ptr)                                             \
        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#endif
 
 /**
  * __put_user: - Write a simple value into user space, with less checking.
@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
  * Returns zero on success, or -EFAULT on error.
  */
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __put_user(x, ptr)     put_user((x), (ptr))
+#else
 #define __put_user(x, ptr)                                             \
        __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#endif
 
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
 #define get_user_ex(x, ptr)    do {                                    \
        unsigned long __gue_val;                                        \
        __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
-       (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
+       (x) = (__typeof__(*(ptr)))__gue_val;                            \
 } while (0)
 
 #define put_user_try           uaccess_try
@@ -531,7 +606,7 @@ extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
 
 extern void __cmpxchg_wrong_size(void)
        __compiletime_error("Bad argument size for cmpxchg");
@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
        __typeof__(ptr) __uval = (uval);                                \
        __typeof__(*(ptr)) __old = (old);                               \
        __typeof__(*(ptr)) __new = (new);                               \
+       pax_open_userland();                                            \
        switch (size) {                                                 \
        case 1:                                                         \
        {                                                               \
                asm volatile("\t" ASM_STAC "\n"                         \
-                       "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
+                       "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
                        "2:\t" ASM_CLAC "\n"                            \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
                        "\t.previous\n"                                 \
                        _ASM_EXTABLE(1b, 3b)                            \
-                       : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
+                       : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
                        : "i" (-EFAULT), "q" (__new), "1" (__old)       \
                        : "memory"                                      \
                );                                                      \
@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
        case 2:                                                         \
        {                                                               \
                asm volatile("\t" ASM_STAC "\n"                         \
-                       "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
+                       "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
                        "2:\t" ASM_CLAC "\n"                            \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
                        "\t.previous\n"                                 \
                        _ASM_EXTABLE(1b, 3b)                            \
-                       : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
+                       : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
                        : "memory"                                      \
                );                                                      \
@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
        case 4:                                                         \
        {                                                               \
                asm volatile("\t" ASM_STAC "\n"                         \
-                       "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
+                       "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
                        "2:\t" ASM_CLAC "\n"                            \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
                        "\t.previous\n"                                 \
                        _ASM_EXTABLE(1b, 3b)                            \
-                       : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
+                       : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
                        : "memory"                                      \
                );                                                      \
@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
                        __cmpxchg_wrong_size();                         \
                                                                        \
                asm volatile("\t" ASM_STAC "\n"                         \
-                       "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
+                       "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
                        "2:\t" ASM_CLAC "\n"                            \
                        "\t.section .fixup, \"ax\"\n"                   \
                        "3:\tmov     %3, %0\n"                          \
                        "\tjmp     2b\n"                                \
                        "\t.previous\n"                                 \
                        _ASM_EXTABLE(1b, 3b)                            \
-                       : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
+                       : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
                        : "i" (-EFAULT), "r" (__new), "1" (__old)       \
                        : "memory"                                      \
                );                                                      \
@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
        default:                                                        \
                __cmpxchg_wrong_size();                                 \
        }                                                               \
+       pax_close_userland();                                           \
        *__uval = __old;                                                \
        __ret;                                                          \
 })
@@ -636,17 +713,6 @@ extern struct movsl_mask {
 
 #define ARCH_HAS_NOCACHE_UACCESS 1
 
-#ifdef CONFIG_X86_32
-# include <asm/uaccess_32.h>
-#else
-# include <asm/uaccess_64.h>
-#endif
-
-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
-                                          unsigned n);
-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
-                                        unsigned n);
-
 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 # define copy_user_diag __compiletime_error
 #else
@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
 extern void copy_user_diag("copy_from_user() buffer size is too small")
 copy_from_user_overflow(void);
 extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+copy_to_user_overflow(void);
 
 #undef copy_user_diag
 
@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
 
 extern void
 __compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
 
 #else
@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
 
 #endif
 
+#ifdef CONFIG_X86_32
+# include <asm/uaccess_32.h>
+#else
+# include <asm/uaccess_64.h>
+#endif
+
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       int sz = __compiletime_object_size(to);
+       size_t sz = __compiletime_object_size(to);
 
        might_fault();
 
@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
         * case, and do only runtime checking for non-constant sizes.
         */
 
-       if (likely(sz < 0 || sz >= n))
-               n = _copy_from_user(to, from, n);
-       else if(__builtin_constant_p(n))
-               copy_from_user_overflow();
-       else
-               __copy_from_user_overflow(sz, n);
+       if (likely(sz != (size_t)-1  && sz < n)) {
+                if(__builtin_constant_p(n))
+                       copy_from_user_overflow();
+               else
+                       __copy_from_user_overflow(sz, n);
+       } else if (access_ok(VERIFY_READ, from, n))
+               n = __copy_from_user(to, from, n);
+       else if ((long)n > 0)
+               memset(to, 0, n);
 
        return n;
 }
@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       int sz = __compiletime_object_size(from);
+       size_t sz = __compiletime_object_size(from);
 
        might_fault();
 
        /* See the comment in copy_from_user() above. */
-       if (likely(sz < 0 || sz >= n))
-               n = _copy_to_user(to, from, n);
-       else if(__builtin_constant_p(n))
-               copy_to_user_overflow();
-       else
-               __copy_to_user_overflow(sz, n);
+       if (likely(sz != (size_t)-1  && sz < n)) {
+                if(__builtin_constant_p(n))
+                       copy_to_user_overflow();
+               else
+                       __copy_to_user_overflow(sz, n);
+       } else if (access_ok(VERIFY_WRITE, to, n))
+               n = __copy_to_user(to, from, n);
 
        return n;
 }
index 3c03a5de64d30c01c1408953bf75c4970f54bcb1..edb68ae1efd4868b0a1099373a8720066e68f90e 100644 (file)
@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
  * anything, so this is accurate.
  */
 
-static __always_inline unsigned long __must_check
+static __always_inline __size_overflow(3) unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
+       check_object_size(from, n, true);
+
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        might_fault();
+
        return __copy_to_user_inatomic(to, from, n);
 }
 
-static __always_inline unsigned long
+static __always_inline __size_overflow(3) unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
+       if ((long)n < 0)
+               return n;
+
        /* Avoid zeroing the tail if the copy fails..
         * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
         * but as the zeroing behaviour is only significant when n is not
@@ -137,6 +146,12 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_fault();
+
+       if ((long)n < 0)
+               return n;
+
+       check_object_size(to, n, false);
+
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
                                const void __user *from, unsigned long n)
 {
        might_fault();
+
+       if ((long)n < 0)
+               return n;
+
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
@@ -181,7 +200,10 @@ static __always_inline unsigned long
 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
                                  unsigned long n)
 {
-       return __copy_from_user_ll_nocache_nozero(to, from, n);
+       if ((long)n < 0)
+               return n;
+
+       return __copy_from_user_ll_nocache_nozero(to, from, n);
 }
 
 #endif /* _ASM_X86_UACCESS_32_H */
index 12a26b979bf163008ebd1dc5b7cdff3ca5876f0b..c36fff5cdb38282a5128e0aad1c9463b67d9879b 100644 (file)
@@ -10,6 +10,9 @@
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/page.h>
+#include <asm/pgtable.h>
+
+#define set_fs(x)      (current_thread_info()->addr_limit = (x))
 
 /*
  * Copy To/From Userspace
@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
 __must_check unsigned long
 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
 
-static __always_inline __must_check unsigned long
-copy_user_generic(void *to, const void *from, unsigned len)
+static __always_inline __must_check __size_overflow(3) unsigned long
+copy_user_generic(void *to, const void *from, unsigned long len)
 {
        unsigned ret;
 
@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
 }
 
 __must_check unsigned long
-copy_in_user(void __user *to, const void __user *from, unsigned len);
+copy_in_user(void __user *to, const void __user *from, unsigned long len);
 
 static __always_inline __must_check
-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
 {
-       int ret = 0;
+       size_t sz = __compiletime_object_size(dst);
+       unsigned ret = 0;
+
+       if (size > INT_MAX)
+               return size;
+
+       check_object_size(dst, size, false);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (!access_ok_noprefault(VERIFY_READ, src, size))
+               return size;
+#endif
+
+       if (unlikely(sz != (size_t)-1 && sz < size)) {
+                if(__builtin_constant_p(size))
+                       copy_from_user_overflow();
+               else
+                       __copy_from_user_overflow(sz, size);
+               return size;
+       }
 
        if (!__builtin_constant_p(size))
-               return copy_user_generic(dst, (__force void *)src, size);
+               return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
        switch (size) {
-       case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+       case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
                              ret, "b", "b", "=q", 1);
                return ret;
-       case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+       case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
                              ret, "w", "w", "=r", 2);
                return ret;
-       case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+       case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
                              ret, "l", "k", "=r", 4);
                return ret;
-       case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+       case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
                              ret, "q", "", "=r", 8);
                return ret;
        case 10:
-               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+               __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
                               ret, "q", "", "=r", 10);
                if (unlikely(ret))
                        return ret;
                __get_user_asm(*(u16 *)(8 + (char *)dst),
-                              (u16 __user *)(8 + (char __user *)src),
+                              (const u16 __user *)(8 + (const char __user *)src),
                               ret, "w", "w", "=r", 2);
                return ret;
        case 16:
-               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+               __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
                               ret, "q", "", "=r", 16);
                if (unlikely(ret))
                        return ret;
                __get_user_asm(*(u64 *)(8 + (char *)dst),
-                              (u64 __user *)(8 + (char __user *)src),
+                              (const u64 __user *)(8 + (const char __user *)src),
                               ret, "q", "", "=r", 8);
                return ret;
        default:
-               return copy_user_generic(dst, (__force void *)src, size);
+               return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
        }
 }
 
 static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
 {
        might_fault();
        return __copy_from_user_nocheck(dst, src, size);
 }
 
 static __always_inline __must_check
-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
 {
-       int ret = 0;
+       size_t sz = __compiletime_object_size(src);
+       unsigned ret = 0;
+
+       if (size > INT_MAX)
+               return size;
+
+       check_object_size(src, size, true);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
+               return size;
+#endif
+
+       if (unlikely(sz != (size_t)-1 && sz < size)) {
+                if(__builtin_constant_p(size))
+                       copy_to_user_overflow();
+               else
+                       __copy_to_user_overflow(sz, size);
+               return size;
+       }
 
        if (!__builtin_constant_p(size))
-               return copy_user_generic((__force void *)dst, src, size);
+               return copy_user_generic((__force_kernel void *)____m(dst), src, size);
        switch (size) {
-       case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+       case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
                              ret, "b", "b", "iq", 1);
                return ret;
-       case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+       case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
                              ret, "w", "w", "ir", 2);
                return ret;
-       case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+       case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
                              ret, "l", "k", "ir", 4);
                return ret;
-       case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+       case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
                              ret, "q", "", "er", 8);
                return ret;
        case 10:
-               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+               __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
                               ret, "q", "", "er", 10);
                if (unlikely(ret))
                        return ret;
                asm("":::"memory");
-               __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+               __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
                               ret, "w", "w", "ir", 2);
                return ret;
        case 16:
-               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+               __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
                               ret, "q", "", "er", 16);
                if (unlikely(ret))
                        return ret;
                asm("":::"memory");
-               __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+               __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
                               ret, "q", "", "er", 8);
                return ret;
        default:
-               return copy_user_generic((__force void *)dst, src, size);
+               return copy_user_generic((__force_kernel void *)____m(dst), src, size);
        }
 }
 
 static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
 {
        might_fault();
        return __copy_to_user_nocheck(dst, src, size);
 }
 
 static __always_inline __must_check
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
-       int ret = 0;
+       unsigned ret = 0;
 
        might_fault();
+
+       if (size > INT_MAX)
+               return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (!access_ok_noprefault(VERIFY_READ, src, size))
+               return size;
+       if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
+               return size;
+#endif
+
        if (!__builtin_constant_p(size))
-               return copy_user_generic((__force void *)dst,
-                                        (__force void *)src, size);
+               return copy_user_generic((__force_kernel void *)____m(dst),
+                                        (__force_kernel const void *)____m(src), size);
        switch (size) {
        case 1: {
                u8 tmp;
-               __get_user_asm(tmp, (u8 __user *)src,
+               __get_user_asm(tmp, (const u8 __user *)src,
                               ret, "b", "b", "=q", 1);
                if (likely(!ret))
                        __put_user_asm(tmp, (u8 __user *)dst,
@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        }
        case 2: {
                u16 tmp;
-               __get_user_asm(tmp, (u16 __user *)src,
+               __get_user_asm(tmp, (const u16 __user *)src,
                               ret, "w", "w", "=r", 2);
                if (likely(!ret))
                        __put_user_asm(tmp, (u16 __user *)dst,
@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 
        case 4: {
                u32 tmp;
-               __get_user_asm(tmp, (u32 __user *)src,
+               __get_user_asm(tmp, (const u32 __user *)src,
                               ret, "l", "k", "=r", 4);
                if (likely(!ret))
                        __put_user_asm(tmp, (u32 __user *)dst,
@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
        }
        case 8: {
                u64 tmp;
-               __get_user_asm(tmp, (u64 __user *)src,
+               __get_user_asm(tmp, (const u64 __user *)src,
                               ret, "q", "", "=r", 8);
                if (likely(!ret))
                        __put_user_asm(tmp, (u64 __user *)dst,
@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
                return ret;
        }
        default:
-               return copy_user_generic((__force void *)dst,
-                                        (__force void *)src, size);
+               return copy_user_generic((__force_kernel void *)____m(dst),
+                                        (__force_kernel const void *)____m(src), size);
        }
 }
 
-static __must_check __always_inline int
-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+static __must_check __always_inline unsigned long
+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
 {
        return __copy_from_user_nocheck(dst, src, size);
 }
 
-static __must_check __always_inline int
-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+static __must_check __always_inline unsigned long
+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
 {
        return __copy_to_user_nocheck(dst, src, size);
 }
 
-extern long __copy_user_nocache(void *dst, const void __user *src,
-                               unsigned size, int zerorest);
+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+                               unsigned long size, int zerorest);
 
-static inline int
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+static inline unsigned long
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
 {
        might_fault();
+
+       if (size > INT_MAX)
+               return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (!access_ok_noprefault(VERIFY_READ, src, size))
+               return size;
+#endif
+
        return __copy_user_nocache(dst, src, size, 1);
 }
 
-static inline int
+static inline unsigned long
 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
-                                 unsigned size)
+                                 unsigned long size)
 {
+       if (size > INT_MAX)
+               return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (!access_ok_noprefault(VERIFY_READ, src, size))
+               return size;
+#endif
+
        return __copy_user_nocache(dst, src, size, 0);
 }
 
 unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
 
 #endif /* _ASM_X86_UACCESS_64_H */
index 5b238981542a2fa5b84134d65bf630b5a580fca5..77fdd78ba467f141642a8c4552cd23d209a6f7dc 100644 (file)
@@ -11,7 +11,7 @@
  * and shift, for example.
  */
 struct word_at_a_time {
-       const unsigned long one_bits, high_bits;
+       unsigned long one_bits, high_bits;
 };
 
 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
index f58a9c7a3c86658d6094be935fff23b50f785cc5..dc378042acb3175ac867ef92785bbaeecb58b971 100644 (file)
@@ -129,7 +129,7 @@ struct x86_init_ops {
        struct x86_init_timers          timers;
        struct x86_init_iommu           iommu;
        struct x86_init_pci             pci;
-};
+} __no_const;
 
 /**
  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
        void (*setup_percpu_clockev)(void);
        void (*early_percpu_clock_init)(void);
        void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
-};
+} __no_const;
 
 struct timespec;
 
@@ -168,7 +168,7 @@ struct x86_platform_ops {
        void (*save_sched_clock_state)(void);
        void (*restore_sched_clock_state)(void);
        void (*apic_post_init)(void);
-};
+} __no_const;
 
 struct pci_dev;
 struct msi_msg;
@@ -182,7 +182,7 @@ struct x86_msi_ops {
        void (*teardown_msi_irqs)(struct pci_dev *dev);
        void (*restore_msi_irqs)(struct pci_dev *dev);
        int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
-};
+} __no_const;
 
 struct IO_APIC_route_entry;
 struct io_apic_irq_attr;
@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
                                       unsigned int destination, int vector,
                                       struct io_apic_irq_attr *attr);
        void            (*eoi_ioapic_pin)(int apic, int pin, int vector);
-};
+} __no_const;
 
 extern struct x86_init_ops x86_init;
 extern struct x86_cpuinit_ops x86_cpuinit;
index 5eea09915a158cdbf7fc24570010e3c211e050ec..ff7ef8d289671157724cd04acce45afa2ab4a236 100644 (file)
@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
  * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
  *   cases needing an extended handling.
  */
-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
 {
        unsigned long mfn;
 
index c9a6d68b8d623b84d169f61c4680c194ea8d137a..cb57f42941153841699afdc84844486117462cd7 100644 (file)
@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
        if (unlikely(err))
                return -EFAULT;
 
+       pax_open_userland();
        __asm__ __volatile__(ASM_STAC "\n"
-                            "1:"XSAVE"\n"
+                            "1:"
+                            __copyuser_seg
+                            XSAVE"\n"
                             "2: " ASM_CLAC "\n"
                             xstate_fault
                             : "D" (buf), "a" (-1), "d" (-1), "0" (0)
                             : "memory");
+       pax_close_userland();
        return err;
 }
 
@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
 {
        int err = 0;
-       struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
+       struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
        u32 lmask = mask;
        u32 hmask = mask >> 32;
 
+       pax_open_userland();
        __asm__ __volatile__(ASM_STAC "\n"
-                            "1:"XRSTOR"\n"
+                            "1:"
+                            __copyuser_seg
+                            XRSTOR"\n"
                             "2: " ASM_CLAC "\n"
                             xstate_fault
                             : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
                             : "memory");       /* memory required? */
+       pax_close_userland();
        return err;
 }
 
index d993e33f523654cf5a985481d62e60810830f66b..8db1b187f82cd8eeec4e18ecc35fbebf7995eb99 100644 (file)
@@ -58,7 +58,7 @@ struct e820map {
 #define ISA_START_ADDRESS      0xa0000
 #define ISA_END_ADDRESS                0x100000
 
-#define BIOS_BEGIN             0x000a0000
+#define BIOS_BEGIN             0x000c0000
 #define BIOS_END               0x00100000
 
 #define BIOS_ROM_BASE          0xffe00000
index 7b0a55a8885115386f40a4207838e60ad66abc21..ad115bf779f36cde8e94af720187b52f7509d6e5 100644 (file)
@@ -49,7 +49,6 @@
 #define EFLAGS 144
 #define RSP 152
 #define SS 160
-#define ARGOFFSET R11
 #endif /* __ASSEMBLY__ */
 
 /* top of stack page */
index 5d4502c8b9835cb757d832b2a8884bffd65a5953..a567e09d229975318f3a2f4bc996044808600f57 100644 (file)
@@ -24,7 +24,7 @@ obj-y                 += time.o ioport.o ldt.o dumpstack.o nmi.o
 obj-y                  += setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y                  += probe_roms.o
-obj-$(CONFIG_X86_32)   += i386_ksyms_32.o
+obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
 obj-$(CONFIG_X86_64)   += mcount_64.o
 obj-y                  += syscall_$(BITS).o vsyscall_gtod.o
index b5ddc9649227165f90860e4eb707c8e04ad6a5c0..490b4e4528b4fee28c4d768abfb2a257ab9f7dcf 100644 (file)
@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
  * If your system is blacklisted here, but you find that acpi=force
  * works for you, please contact linux-acpi@vger.kernel.org
  */
-static struct dmi_system_id __initdata acpi_dmi_table[] = {
+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
        /*
         * Boxes that need ACPI disabled
         */
@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
 };
 
 /* second table for DMI checks that should run after early-quirks */
-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
        /*
         * HP laptops which use a DSDT reporting as HP/SB400/10000,
         * which includes some code which overrides all temperature
index 31368207837c2fbcd93f73d94ccf9c2d572d5cf5..e2c6577eb88258e55c920869393f736466946a88 100644 (file)
@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
 #else /* CONFIG_64BIT */
 #ifdef CONFIG_SMP
        stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
+
+       pax_open_kernel();
        early_gdt_descr.address =
                        (unsigned long)get_cpu_gdt_table(smp_processor_id());
+       pax_close_kernel();
+
        initial_gs = per_cpu_offset(smp_processor_id());
 #endif
        initial_code = (unsigned long)wakeup_long64;
index 665c6b7d2ea93c25740a4b18043d5602d33d9711..eae4d56b540aa0949dc20a2b816963005949c2b5 100644 (file)
@@ -29,13 +29,11 @@ wakeup_pmode_return:
        # and restore the stack ... but you need gdt for this to work
        movl    saved_context_esp, %esp
 
-       movl    %cs:saved_magic, %eax
-       cmpl    $0x12345678, %eax
+       cmpl    $0x12345678, saved_magic
        jne     bogus_magic
 
        # jump to place where we left off
-       movl    saved_eip, %eax
-       jmp     *%eax
+       jmp     *(saved_eip)
 
 bogus_magic:
        jmp     bogus_magic
index 703130f469ecf71978b9d67bf0fb50da9b31cbc9..27a155d43cf89da0d111c68ac172b99ec6c83c41 100644 (file)
@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
         */
        for (a = start; a < end; a++) {
                instr = (u8 *)&a->instr_offset + a->instr_offset;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+               if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
+                       instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
                replacement = (u8 *)&a->repl_offset + a->repl_offset;
                BUG_ON(a->replacementlen > a->instrlen);
                BUG_ON(a->instrlen > sizeof(insnbuf));
@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
                add_nops(insnbuf + a->replacementlen,
                         a->instrlen - a->replacementlen);
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
+                       instr = ktva_ktla(instr);
+#endif
+
                text_poke_early(instr, insnbuf, a->instrlen);
        }
 }
@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
        for (poff = start; poff < end; poff++) {
                u8 *ptr = (u8 *)poff + *poff;
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+               if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
+                       ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
                if (!*poff || ptr < text || ptr >= text_end)
                        continue;
                /* turn DS segment override prefix into lock prefix */
-               if (*ptr == 0x3e)
+               if (*ktla_ktva(ptr) == 0x3e)
                        text_poke(ptr, ((unsigned char []){0xf0}), 1);
        }
        mutex_unlock(&text_mutex);
@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
        for (poff = start; poff < end; poff++) {
                u8 *ptr = (u8 *)poff + *poff;
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+               if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
+                       ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
                if (!*poff || ptr < text || ptr >= text_end)
                        continue;
                /* turn lock prefix into DS segment override prefix */
-               if (*ptr == 0xf0)
+               if (*ktla_ktva(ptr) == 0xf0)
                        text_poke(ptr, ((unsigned char []){0x3E}), 1);
        }
        mutex_unlock(&text_mutex);
@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
 
                BUG_ON(p->len > MAX_PATCH_LEN);
                /* prep the buffer with the original instructions */
-               memcpy(insnbuf, p->instr, p->len);
+               memcpy(insnbuf, ktla_ktva(p->instr), p->len);
                used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
                                         (unsigned long)p->instr, p->len);
 
@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
        if (!uniproc_patched || num_possible_cpus() == 1)
                free_init_pages("SMP alternatives",
                                (unsigned long)__smp_locks,
-                               (unsigned long)__smp_locks_end);
+                               PAGE_ALIGN((unsigned long)__smp_locks_end));
 #endif
 
        apply_paravirt(__parainstructions, __parainstructions_end);
@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
  * instructions. And on the local CPU you need to be protected again NMI or MCE
  * handlers seeing an inconsistent instruction while you patch.
  */
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
+void *__kprobes text_poke_early(void *addr, const void *opcode,
                                              size_t len)
 {
        unsigned long flags;
        local_irq_save(flags);
-       memcpy(addr, opcode, len);
+
+       pax_open_kernel();
+       memcpy(ktla_ktva(addr), opcode, len);
        sync_core();
+       pax_close_kernel();
+
        local_irq_restore(flags);
        /* Could also do a CLFLUSH here to speed up CPU recovery; but
           that causes hangs on some VIA CPUs. */
@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
  */
 void *text_poke(void *addr, const void *opcode, size_t len)
 {
-       unsigned long flags;
-       char *vaddr;
+       unsigned char *vaddr = ktla_ktva(addr);
        struct page *pages[2];
-       int i;
+       size_t i;
 
        if (!core_kernel_text((unsigned long)addr)) {
-               pages[0] = vmalloc_to_page(addr);
-               pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
+               pages[0] = vmalloc_to_page(vaddr);
+               pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
        } else {
-               pages[0] = virt_to_page(addr);
+               pages[0] = virt_to_page(vaddr);
                WARN_ON(!PageReserved(pages[0]));
-               pages[1] = virt_to_page(addr + PAGE_SIZE);
+               pages[1] = virt_to_page(vaddr + PAGE_SIZE);
        }
        BUG_ON(!pages[0]);
-       local_irq_save(flags);
-       set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
-       if (pages[1])
-               set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
-       vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
-       memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-       clear_fixmap(FIX_TEXT_POKE0);
-       if (pages[1])
-               clear_fixmap(FIX_TEXT_POKE1);
-       local_flush_tlb();
-       sync_core();
-       /* Could also do a CLFLUSH here to speed up CPU recovery; but
-          that causes hangs on some VIA CPUs. */
+       text_poke_early(addr, opcode, len);
        for (i = 0; i < len; i++)
-               BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
-       local_irq_restore(flags);
+               BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
        return addr;
 }
 
@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
        if (likely(!bp_patching_in_progress))
                return 0;
 
-       if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
+       if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
                return 0;
 
        /* set up the specified breakpoint handler */
@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
  */
 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
 {
-       unsigned char int3 = 0xcc;
+       const unsigned char int3 = 0xcc;
 
        bp_int3_handler = handler;
        bp_int3_addr = (u8 *)addr + sizeof(int3);
index 29b5b18afa27dca80d384fade47906299a581e8f..3bdfc298741a8318fde91ec8a958dcd3174a2985 100644 (file)
@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
 /*
  * Debug level, exported for io_apic.c
  */
-unsigned int apic_verbosity;
+int apic_verbosity;
 
 int pic_mode;
 
@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
                apic_write(APIC_ESR, 0);
        v = apic_read(APIC_ESR);
        ack_APIC_irq();
-       atomic_inc(&irq_err_count);
+       atomic_inc_unchecked(&irq_err_count);
 
        apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
                    smp_processor_id(), v);
index de918c410eaed69863e9b95d26566c638d388c45..32eed23e93e05be6ca23bc30df19cc38dc3934ff 100644 (file)
@@ -154,7 +154,7 @@ static int flat_probe(void)
        return 1;
 }
 
-static struct apic apic_flat =  {
+static struct apic apic_flat __read_only =  {
        .name                           = "flat",
        .probe                          = flat_probe,
        .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
@@ -260,7 +260,7 @@ static int physflat_probe(void)
        return 0;
 }
 
-static struct apic apic_physflat =  {
+static struct apic apic_physflat __read_only =  {
 
        .name                           = "physical flat",
        .probe                          = physflat_probe,
index b205cdbdbe6a522e51bedc05f2bf86c39c141f66..d8503ff40d5aa98eae3e0e8d1afa1fbe6b6a9fad 100644 (file)
@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
        WARN_ON_ONCE(cpu_has_apic && !disable_apic);
 }
 
-struct apic apic_noop = {
+struct apic apic_noop __read_only = {
        .name                           = "noop",
        .probe                          = noop_probe,
        .acpi_madt_oem_check            = NULL,
index c4a8d63f8220cf09880d7b3e8a6ad3f42e7a6a25..fe893ac456185c6b462a8c4a11b9ae60d2679c87 100644 (file)
@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
        return dmi_bigsmp;
 }
 
-static struct apic apic_bigsmp = {
+static struct apic apic_bigsmp __read_only = {
 
        .name                           = "bigsmp",
        .probe                          = probe_bigsmp,
index 3f5f60406ab17659e294e725fbe18655c94048ac..309c0e6aed8e7efd4050cf6f87b846384b8ae8a4 100644 (file)
@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
        return ret;
 }
 
-atomic_t irq_mis_count;
+atomic_unchecked_t irq_mis_count;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
         * at the cpu.
         */
        if (!(v & (1 << (i & 0x1f)))) {
-               atomic_inc(&irq_mis_count);
+               atomic_inc_unchecked(&irq_mis_count);
 
                eoi_ioapic_irq(irq, cfg);
        }
index bda488680dbc1a42cdfe4fb175aa6dae73cb612d..f9c71954656ade822fd64c3740ae5d7ba29bcd28 100644 (file)
@@ -72,7 +72,7 @@ static int probe_default(void)
        return 1;
 }
 
-static struct apic apic_default = {
+static struct apic apic_default __read_only = {
 
        .name                           = "default",
        .probe                          = probe_default,
index 6cedd79145813cc792c891573ac03b3e18d9e529..023ff8e9a2e5e61d774aba4f8a74d22438b41796 100644 (file)
@@ -21,7 +21,7 @@
 
 static DEFINE_RAW_SPINLOCK(vector_lock);
 
-void lock_vector_lock(void)
+void lock_vector_lock(void) __acquires(vector_lock)
 {
        /* Used to the online set of cpus does not change
         * during assign_irq_vector.
@@ -29,7 +29,7 @@ void lock_vector_lock(void)
        raw_spin_lock(&vector_lock);
 }
 
-void unlock_vector_lock(void)
+void unlock_vector_lock(void) __releases(vector_lock)
 {
        raw_spin_unlock(&vector_lock);
 }
index e658f21681c82e1ad8fa28bde084c7933939548b..b695a1a11a5ab9c0689f10b81bdfaa239e04ff8e 100644 (file)
@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
        return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata x2apic_cpu_notifier = {
+static struct notifier_block x2apic_cpu_notifier = {
        .notifier_call = update_clusterinfo,
 };
 
@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
                cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
 }
 
-static struct apic apic_x2apic_cluster = {
+static struct apic apic_x2apic_cluster __read_only = {
 
        .name                           = "cluster x2apic",
        .probe                          = x2apic_cluster_probe,
index 6fae733e9194893dc761ccd06839df81232c7427..5ca17afbfb1e463ebe5f3335297476e4e5218f71 100644 (file)
@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
        return apic == &apic_x2apic_phys;
 }
 
-static struct apic apic_x2apic_phys = {
+static struct apic apic_x2apic_phys __read_only = {
 
        .name                           = "physical x2apic",
        .probe                          = x2apic_phys_probe,
index 8e9dcfd630e4b539e7936050b7bb1673660ae752..c61b3e45652ab276dbc2ffa6dffb5690f25a6d2f 100644 (file)
@@ -348,7 +348,7 @@ static int uv_probe(void)
        return apic == &apic_x2apic_uv_x;
 }
 
-static struct apic __refdata apic_x2apic_uv_x = {
+static struct apic apic_x2apic_uv_x __read_only = {
 
        .name                           = "UV large system",
        .probe                          = uv_probe,
index 927ec923594798bda4d5a4dff74cfbe577ad540f..0dc3bd4753f54d0a0a979af26df5486b2298a044 100644 (file)
@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
  * This is for buggy BIOS's that refer to (real mode) segment 0x40
  * even though they are called in protected mode.
  */
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
                        (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
 
 static const char driver_version[] = "1.16ac"; /* no spaces */
@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
        BUG_ON(cpu != 0);
        gdt = get_cpu_gdt_table(cpu);
        save_desc_40 = gdt[0x40 / 8];
+
+       pax_open_kernel();
        gdt[0x40 / 8] = bad_bios_desc;
+       pax_close_kernel();
 
        apm_irq_save(flags);
        APM_DO_SAVE_SEGS;
@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
                          &call->esi);
        APM_DO_RESTORE_SEGS;
        apm_irq_restore(flags);
+
+       pax_open_kernel();
        gdt[0x40 / 8] = save_desc_40;
+       pax_close_kernel();
+
        put_cpu();
 
        return call->eax & 0xff;
@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
        BUG_ON(cpu != 0);
        gdt = get_cpu_gdt_table(cpu);
        save_desc_40 = gdt[0x40 / 8];
+
+       pax_open_kernel();
        gdt[0x40 / 8] = bad_bios_desc;
+       pax_close_kernel();
 
        apm_irq_save(flags);
        APM_DO_SAVE_SEGS;
@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
                                         &call->eax);
        APM_DO_RESTORE_SEGS;
        apm_irq_restore(flags);
+
+       pax_open_kernel();
        gdt[0x40 / 8] = save_desc_40;
+       pax_close_kernel();
+
        put_cpu();
        return error;
 }
@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
         * code to that CPU.
         */
        gdt = get_cpu_gdt_table(0);
+
+       pax_open_kernel();
        set_desc_base(&gdt[APM_CS >> 3],
                 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
        set_desc_base(&gdt[APM_CS_16 >> 3],
                 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
        set_desc_base(&gdt[APM_DS >> 3],
                 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
+       pax_close_kernel();
 
        proc_create("apm", 0, NULL, &apm_file_ops);
 
index 9f6b9341950f7895b247b1c320cc0f9cd75cda9d..cf5ffb3e0aeda966bd0aa12628294680adc9fd60 100644 (file)
@@ -32,6 +32,8 @@ void common(void) {
        OFFSET(TI_flags, thread_info, flags);
        OFFSET(TI_status, thread_info, status);
        OFFSET(TI_addr_limit, thread_info, addr_limit);
+       OFFSET(TI_lowest_stack, thread_info, lowest_stack);
+       DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
 
        BLANK();
        OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
@@ -52,8 +54,26 @@ void common(void) {
        OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
        OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
        OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+
+#ifdef CONFIG_PAX_KERNEXEC
+       OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
+       OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
+#ifdef CONFIG_X86_64
+       OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
+#endif
 #endif
 
+#endif
+
+       BLANK();
+       DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+       DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
+       DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
+
 #ifdef CONFIG_XEN
        BLANK();
        OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
index fdcbb4d27c9f80f35a587f4284fa2b958c44e774..036dd9325ef3eaacdc63f7a9c5eea947a7bff401 100644 (file)
@@ -80,6 +80,7 @@ int main(void)
        BLANK();
 #undef ENTRY
 
+       DEFINE(TSS_size, sizeof(struct tss_struct));
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        BLANK();
 
index 80091ae54c2b0995ea56629a3f7e6969a484fb9b..0c5184fc59a98d111d615626cfcc5eb93ded95ff 100644 (file)
@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
 CFLAGS_REMOVE_perf_event.o = -pg
 endif
 
-# Make sure load_percpu_segment has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_common.o                := $(nostackp)
-
 obj-y                  := intel_cacheinfo.o scattered.o topology.o
 obj-y                  += common.o
 obj-y                  += rdrand.o
index 15c5df92f74ec84a1861ac521fb8f4a9d2895dea..d9a604a01e2d89f9d1a182e1885d6ac5aae7a3ad 100644 (file)
@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
        /* AMD errata T13 (order #21922) */
-       if ((c->x86 == 6)) {
+       if (c->x86 == 6) {
                /* Duron Rev A0 */
                if (c->x86_model == 3 && c->x86_mask == 0)
                        size = 64;
index c6049650c093f79f849c4779d7800601311eb4d3..0b0e28a78b5276c63df6e119696d927bb3e13066 100644 (file)
@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
 
 static const struct cpu_dev *this_cpu = &default_cpu;
 
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
-#ifdef CONFIG_X86_64
-       /*
-        * We need valid kernel segments for data and code in long mode too
-        * IRET will check the segment types  kkeil 2000/10/28
-        * Also sysret mandates a special GDT layout
-        *
-        * TLS descriptors are currently at a different place compared to i386.
-        * Hopefully nobody expects them at a fixed place (Wine?)
-        */
-       [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
-#else
-       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
-       /*
-        * Segments used for calling PnP BIOS have byte granularity.
-        * They code segments and data segments have fixed 64k limits,
-        * the transfer segment sizes are set at run time.
-        */
-       /* 32-bit code */
-       [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-       /* 16-bit code */
-       [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
-       /*
-        * The APM segments have byte granularity and their bases
-        * are set at run time.  All have 64k limits.
-        */
-       /* 32-bit code */
-       [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-       /* 16-bit code */
-       [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-       /* data */
-       [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
-
-       [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-       [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-       GDT_STACK_CANARY_INIT
-#endif
-} };
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-
 static int __init x86_xsave_setup(char *s)
 {
        if (strlen(s))
@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
        }
 }
 
+#ifdef CONFIG_X86_64
+static __init int setup_disable_pcid(char *arg)
+{
+       setup_clear_cpu_cap(X86_FEATURE_PCID);
+       setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       if (clone_pgd_mask != ~(pgdval_t)0UL)
+               pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+#endif
+
+       return 1;
+}
+__setup("nopcid", setup_disable_pcid);
+
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
+       if (!cpu_has(c, X86_FEATURE_PCID)) {
+               clear_cpu_cap(c, X86_FEATURE_INVPCID);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+               if (clone_pgd_mask != ~(pgdval_t)0UL) {
+                       pax_open_kernel();
+                       pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+                       pax_close_kernel();
+                       printk("PAX: slow and weak UDEREF enabled\n");
+               } else
+                       printk("PAX: UDEREF disabled\n");
+#endif
+
+               return;
+       }
+
+       printk("PAX: PCID detected\n");
+       set_in_cr4(X86_CR4_PCIDE);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pax_open_kernel();
+       clone_pgd_mask = ~(pgdval_t)0UL;
+       pax_close_kernel();
+       if (pax_user_shadow_base)
+               printk("PAX: weak UDEREF enabled\n");
+       else {
+               set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
+               printk("PAX: strong UDEREF enabled\n");
+       }
+#endif
+
+       if (cpu_has(c, X86_FEATURE_INVPCID))
+               printk("PAX: INVPCID detected\n");
+}
+#endif
+
 /*
  * Some CPU features depend on higher CPUID levels, which may not always
  * be available due to CPUID level capping or broken virtualization
@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
 {
        struct desc_ptr gdt_descr;
 
-       gdt_descr.address = (long)get_cpu_gdt_table(cpu);
+       gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
        /* Reload the per-cpu base */
@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        setup_smep(c);
        setup_smap(c);
 
+#ifdef CONFIG_X86_64
+       setup_pcid(c);
+#endif
+
        /*
         * The vendor-specific functions might have changed features.
         * Now we do "generic changes."
@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        /* Filter out anything that depends on CPUID levels we don't have */
        filter_cpuid_features(c, true);
 
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
+       setup_clear_cpu_cap(X86_FEATURE_SEP);
+#endif
+
        /* If the model name is still unset, do table lookup. */
        if (!c->x86_model_id[0]) {
                const char *p;
@@ -977,7 +984,7 @@ static void syscall32_cpu_init(void)
 void enable_sep_cpu(void)
 {
        int cpu = get_cpu();
-       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       struct tss_struct *tss = init_tss + cpu;
 
        if (!boot_cpu_has(X86_FEATURE_SEP)) {
                put_cpu();
@@ -1115,14 +1122,16 @@ static __init int setup_disablecpuid(char *arg)
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
+EXPORT_PER_CPU_SYMBOL(current_tinfo);
+
 DEFINE_PER_CPU(unsigned long, kernel_stack) =
-       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+       (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
 EXPORT_PER_CPU_SYMBOL(kernel_stack);
 
 #ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
-                                   (unsigned long) debug_idt_table };
+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
 
 DEFINE_PER_CPU_FIRST(union irq_stack_union,
                     irq_stack_union) __aligned(PAGE_SIZE) __visible;
@@ -1299,7 +1308,7 @@ void cpu_init(void)
         */
        load_ucode_ap();
 
-       t = &per_cpu(init_tss, cpu);
+       t = init_tss + cpu;
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1331,7 +1340,6 @@ void cpu_init(void)
        wrmsrl(MSR_KERNEL_GS_BASE, 0);
        barrier();
 
-       x86_configure_nx();
        enable_x2apic();
 
        /*
@@ -1383,7 +1391,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(init_tss, cpu);
+       struct tss_struct *t = init_tss + cpu;
        struct thread_struct *thread = &curr->thread;
 
        wait_for_master_cpu(cpu);
index c7035073dfc17e28268237604bc3731fb78428aa..28535e3b8cd33d4315ad170c64873b8b7fe7afbc 100644 (file)
@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
 };
 
 #ifdef CONFIG_AMD_NB
+static struct attribute *default_attrs_amd_nb[] = {
+       &type.attr,
+       &level.attr,
+       &coherency_line_size.attr,
+       &physical_line_partition.attr,
+       &ways_of_associativity.attr,
+       &number_of_sets.attr,
+       &size.attr,
+       &shared_cpu_map.attr,
+       &shared_cpu_list.attr,
+       NULL,
+       NULL,
+       NULL,
+       NULL
+};
+
 static struct attribute **amd_l3_attrs(void)
 {
        static struct attribute **attrs;
@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
 
        n = ARRAY_SIZE(default_attrs);
 
-       if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-               n += 2;
-
-       if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-               n += 1;
-
-       attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
-       if (attrs == NULL)
-               return attrs = default_attrs;
-
-       for (n = 0; default_attrs[n]; n++)
-               attrs[n] = default_attrs[n];
+       attrs = default_attrs_amd_nb;
 
        if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
                attrs[n++] = &cache_disable_0.attr;
@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
        .default_attrs  = default_attrs,
 };
 
+#ifdef CONFIG_AMD_NB
+static struct kobj_type ktype_cache_amd_nb = {
+       .sysfs_ops      = &sysfs_ops,
+       .default_attrs  = default_attrs_amd_nb,
+};
+#endif
+
 static struct kobj_type ktype_percpu_entry = {
        .sysfs_ops      = &sysfs_ops,
 };
@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
                return retval;
        }
 
+#ifdef CONFIG_AMD_NB
+       amd_l3_attrs();
+#endif
+
        for (i = 0; i < num_cache_leaves; i++) {
+               struct kobj_type *ktype;
+
                this_object = INDEX_KOBJECT_PTR(cpu, i);
                this_object->cpu = cpu;
                this_object->index = i;
 
                this_leaf = CPUID4_INFO_IDX(cpu, i);
 
-               ktype_cache.default_attrs = default_attrs;
+               ktype = &ktype_cache;
 #ifdef CONFIG_AMD_NB
                if (this_leaf->base.nb)
-                       ktype_cache.default_attrs = amd_l3_attrs();
+                       ktype = &ktype_cache_amd_nb;
 #endif
                retval = kobject_init_and_add(&(this_object->kobj),
-                                             &ktype_cache,
+                                             ktype,
                                              per_cpu(ici_cache_kobject, cpu),
                                              "index%1lu", i);
                if (unlikely(retval)) {
index d2c611699cd9d2d49bfd1cee5b79c7fedf87ef71..62fd7aa6fd1b6835c2bb9a73448e074054dd9053 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/local.h>
 
 #include "mce-internal.h"
 
@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
                        !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
                                m->cs, m->ip);
 
-               if (m->cs == __KERNEL_CS)
+               if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
                        print_symbol("{%s}", m->ip);
                pr_cont("\n");
        }
@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
 
 #define PANIC_TIMEOUT 5 /* 5 seconds */
 
-static atomic_t mce_panicked;
+static atomic_unchecked_t mce_panicked;
 
 static int fake_panic;
-static atomic_t mce_fake_panicked;
+static atomic_unchecked_t mce_fake_panicked;
 
 /* Panic in progress. Enable interrupts and wait for final IPI */
 static void wait_for_panic(void)
@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
                /*
                 * Make sure only one CPU runs in machine check panic
                 */
-               if (atomic_inc_return(&mce_panicked) > 1)
+               if (atomic_inc_return_unchecked(&mce_panicked) > 1)
                        wait_for_panic();
                barrier();
 
@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
                console_verbose();
        } else {
                /* Don't log too much for fake panic */
-               if (atomic_inc_return(&mce_fake_panicked) > 1)
+               if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
                        return;
        }
        /* First print corrected ones that are still unlogged */
@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
        if (!fake_panic) {
                if (panic_timeout == 0)
                        panic_timeout = mca_cfg.panic_timeout;
-               panic(msg);
+               panic("%s", msg);
        } else
                pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 }
@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
         * might have been modified by someone else.
         */
        rmb();
-       if (atomic_read(&mce_panicked))
+       if (atomic_read_unchecked(&mce_panicked))
                wait_for_panic();
        if (!mca_cfg.monarch_timeout)
                goto out;
@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
 }
 
 /* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
                                                unexpected_machine_check;
 
 /*
@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
                return;
        }
 
+       pax_open_kernel();
        machine_check_vector = do_machine_check;
+       pax_close_kernel();
 
        __mcheck_cpu_init_generic();
        __mcheck_cpu_init_vendor(c);
@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
  */
 
 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count;      /* #times opened */
+static local_t mce_chrdev_open_count;  /* #times opened */
 static int mce_chrdev_open_exclu;      /* already open exclusive? */
 
 static int mce_chrdev_open(struct inode *inode, struct file *file)
@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
        spin_lock(&mce_chrdev_state_lock);
 
        if (mce_chrdev_open_exclu ||
-           (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
+           (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
                spin_unlock(&mce_chrdev_state_lock);
 
                return -EBUSY;
@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
 
        if (file->f_flags & O_EXCL)
                mce_chrdev_open_exclu = 1;
-       mce_chrdev_open_count++;
+       local_inc(&mce_chrdev_open_count);
 
        spin_unlock(&mce_chrdev_state_lock);
 
@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
 {
        spin_lock(&mce_chrdev_state_lock);
 
-       mce_chrdev_open_count--;
+       local_dec(&mce_chrdev_open_count);
        mce_chrdev_open_exclu = 0;
 
        spin_unlock(&mce_chrdev_state_lock);
@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
 
        for (i = 0; i < mca_cfg.banks; i++) {
                struct mce_bank *b = &mce_banks[i];
-               struct device_attribute *a = &b->attr;
+               device_attribute_no_const *a = &b->attr;
 
                sysfs_attr_init(&a->attr);
                a->attr.name    = b->attrname;
@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
 static void mce_reset(void)
 {
        cpu_missing = 0;
-       atomic_set(&mce_fake_panicked, 0);
+       atomic_set_unchecked(&mce_fake_panicked, 0);
        atomic_set(&mce_executing, 0);
        atomic_set(&mce_callin, 0);
        atomic_set(&global_nwo, 0);
index a3042989398c1cdb7d33da49bf7dea1887e00aa5..49b6d06f2984abd107f4c4812b5202c2ee53cd0a 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/pgtable.h>
 
 /* By default disabled */
 int mce_p5_enabled __read_mostly;
@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
        if (!cpu_has(c, X86_FEATURE_MCE))
                return;
 
+       pax_open_kernel();
        machine_check_vector = pentium_machine_check;
+       pax_close_kernel();
        /* Make sure the vector pointer is visible before we enable MCEs: */
        wmb();
 
index 7dc5564d0cdf57c0e7ca8c181f87f3ebb6f6ceb2..1273569d7b9d45f1653e2c3f20405342fda8a750 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/pgtable.h>
 
 /* Machine check handler for WinChip C6: */
 static void winchip_machine_check(struct pt_regs *regs, long error_code)
@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
 {
        u32 lo, hi;
 
+       pax_open_kernel();
        machine_check_vector = winchip_machine_check;
+       pax_close_kernel();
        /* Make sure the vector pointer is visible before we enable MCEs: */
        wmb();
 
index 36a83617eb21cc19245794a89c986eba45179d3a..e7058c24ca07d769ad2a96d8f95548928b001bbb 100644 (file)
@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata mc_cpu_notifier = {
+static struct notifier_block mc_cpu_notifier = {
        .notifier_call  = mc_cpu_callback,
 };
 
index c6826d1e8082584268d1b5e8f3abeb176ee61187..8dc677eb93c52affa77e2777bcbe909bb72a3b75 100644 (file)
@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
                struct microcode_header_intel mc_header;
                unsigned int mc_size;
 
+               if (leftover < sizeof(mc_header)) {
+                       pr_err("error! Truncated header in microcode data file\n");
+                       break;
+               }
+
                if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
                        break;
 
@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
 
 static int get_ucode_user(void *to, const void *from, size_t n)
 {
-       return copy_from_user(to, from, n);
+       return copy_from_user(to, (const void __force_user *)from, n);
 }
 
 static enum ucode_state
 request_microcode_user(int cpu, const void __user *buf, size_t size)
 {
-       return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+       return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
 }
 
 static void microcode_fini_cpu(int cpu)
index ec9df6f9cd47b35e7f4d6059eb094f922420c5cf..420eb933189ca487110607475ddbf33be8e8267b 100644 (file)
@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
        unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
        int i;
 
-       while (leftover) {
+       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+               if (leftover < sizeof(mc_header))
+                       break;
+
                mc_header = (struct microcode_header_intel *)ucode_ptr;
 
                mc_size = get_totalsize(mc_header);
index ea5f363a194866303395358353a658750dcff3d4..cb0e9053fe63cca57985a08679a2bdc3e2a62a81 100644 (file)
@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
 u64 size_or_mask, size_and_mask;
 static bool mtrr_aps_delayed_init;
 
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
 
 const struct mtrr_ops *mtrr_if;
 
index df5e41f31a27e71cd44aadd35cf43f311b437008..816c71999faab5c634607799f2ab54a121266864 100644 (file)
@@ -25,7 +25,7 @@ struct mtrr_ops {
        int     (*validate_add_page)(unsigned long base, unsigned long size,
                                     unsigned int type);
        int     (*have_wrcomb)(void);
-};
+} __do_const;
 
 extern int generic_get_free_region(unsigned long base, unsigned long size,
                                   int replace_reg);
index 143e5f5dc8551b568b7eabb84d83da764268d6b2..5825081de90eb92cdc21c78115c93f85c3e9cf6e 100644 (file)
@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
 
 }
 
-static struct attribute_group x86_pmu_format_group = {
+static attribute_group_no_const x86_pmu_format_group = {
        .name = "format",
        .attrs = NULL,
 };
@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
        NULL,
 };
 
-static struct attribute_group x86_pmu_events_group = {
+static attribute_group_no_const x86_pmu_events_group = {
        .name = "events",
        .attrs = events_attr,
 };
@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
                if (idx > GDT_ENTRIES)
                        return 0;
 
-               desc = raw_cpu_ptr(gdt_page.gdt);
+               desc = get_cpu_gdt_table(smp_processor_id());
        }
 
        return get_desc_base(desc + idx);
@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
                        break;
 
                perf_callchain_store(entry, frame.return_address);
-               fp = frame.next_frame;
+               fp = (const void __force_user *)frame.next_frame;
        }
 }
 
index 97242a9242bdfde7dabba99e9a0d245ffe770c01..cf9c30e47247a11b8046f3e62f4cf331abb1108b 100644 (file)
@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
 {
        struct attribute **attrs;
-       struct attribute_group *attr_group;
+       attribute_group_no_const *attr_group;
        int i = 0, j;
 
        while (amd_iommu_v2_event_descs[i].attr.attr.name)
index 498b6d967138b1fff29659e81813c77e70ff1f58..412651581b4534e5b6710edc0cb20feab903e1ec 100644 (file)
@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
                x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
 
        if (boot_cpu_has(X86_FEATURE_PDCM)) {
-               u64 capabilities;
+               u64 capabilities = x86_pmu.intel_cap.capabilities;
 
-               rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
-               x86_pmu.intel_cap.capabilities = capabilities;
+               if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
+                       x86_pmu.intel_cap.capabilities = capabilities;
        }
 
        intel_ds_init();
index c4bb8b8e5017403b25847a97ccce42c96bba3837..9f7384d407224031eb827f06e036e27792e2f83d 100644 (file)
@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
        NULL,
 };
 
-static struct attribute_group rapl_pmu_events_group = {
+static attribute_group_no_const rapl_pmu_events_group __read_only = {
        .name = "events",
        .attrs = NULL, /* patched at runtime */
 };
index c635b8b49e931e7926efc3dc96475a8c577958e0..b78835e4717beb154b5b9c042dac82403c32c6af 100644 (file)
@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
 static int __init uncore_type_init(struct intel_uncore_type *type)
 {
        struct intel_uncore_pmu *pmus;
-       struct attribute_group *attr_group;
+       attribute_group_no_const *attr_group;
        struct attribute **attrs;
        int i, j;
 
index 6c8c1e7e69d85d3ad217eada0f0e55573c3daaf0..515b98a9fb14aa75247e4aa10083869509d1eabd 100644 (file)
@@ -114,7 +114,7 @@ struct intel_uncore_box {
 struct uncore_event_desc {
        struct kobj_attribute attr;
        const char *config;
-};
+} __do_const;
 
 ssize_t uncore_event_show(struct kobject *kobj,
                          struct kobj_attribute *attr, char *buf);
index 83741a71558fb0a311fb1bb079e125f24435344a..bd3507da39f01678257e3d8a65a3009ad1d1f6f1 100644 (file)
@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
        return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata cpuid_class_cpu_notifier =
+static struct notifier_block cpuid_class_cpu_notifier =
 {
        .notifier_call = cpuid_class_cpu_callback,
 };
index aceb2f90c7166afcfa844cd7da58bfe6358efaa6..c76d3e37c6e1dc99a7083f05a2f79b7cd82968e1 100644 (file)
@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 #ifdef CONFIG_X86_32
        struct pt_regs fixed_regs;
 
-       if (!user_mode_vm(regs)) {
+       if (!user_mode(regs)) {
                crash_fixup_ss_esp(&fixed_regs, regs);
                regs = &fixed_regs;
        }
index afa64adb75eeb2bf830fbef1ef5a66960b14663e..dce67dd30b1478f63b9084d7606e3ae0a515ac39 100644 (file)
@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
                return -ENOMEM;
 
        if (userbuf) {
-               if (copy_to_user(buf, vaddr + offset, csize)) {
+               if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
                        iounmap(vaddr);
                        return -EFAULT;
                }
index f6dfd9334b67e54aa0a9eda6cfac7f1094cf80d9..892ade462acf2ed71197ec9f563cd64c0643bbf8 100644 (file)
@@ -12,7 +12,7 @@
 
 #define DOUBLEFAULT_STACKSIZE (1024)
 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
 
 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
 
@@ -22,7 +22,7 @@ static void doublefault_fn(void)
        unsigned long gdt, tss;
 
        native_store_gdt(&gdt_desc);
-       gdt = gdt_desc.address;
+       gdt = (unsigned long)gdt_desc.address;
 
        printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
 
@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
                /* 0x2 bit is always set */
                .flags          = X86_EFLAGS_SF | 0x2,
                .sp             = STACK_START,
-               .es             = __USER_DS,
+               .es             = __KERNEL_DS,
                .cs             = __KERNEL_CS,
                .ss             = __KERNEL_DS,
-               .ds             = __USER_DS,
+               .ds             = __KERNEL_DS,
                .fs             = __KERNEL_PERCPU,
 
                .__cr3          = __pa_nodebug(swapper_pg_dir),
index b74ebc7c4402e7eff3b21f4b87ba514e0c017056..2c958745d611340750e69bdce570902322ef5e55 100644 (file)
@@ -2,6 +2,9 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kallsyms.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
 
 void printk_address(unsigned long address)
 {
-       pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
+       pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 {
-       struct task_struct *task;
        unsigned long ret_addr;
        int index;
 
        if (addr != (unsigned long)return_to_handler)
                return;
 
-       task = tinfo->task;
        index = task->curr_ret_stack;
 
        if (!task->ret_stack || index < *graph)
@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
 static inline void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 { }
 #endif
 
@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-                       void *p, unsigned int size, void *end)
+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
 {
-       void *t = tinfo;
        if (end) {
                if (p < end && p >= (end-THREAD_SIZE))
                        return 1;
@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
 }
 
 unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task, void *stack_start,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data,
                unsigned long *end, int *graph)
 {
        struct stack_frame *frame = (struct stack_frame *)bp;
 
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+       while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
                unsigned long addr;
 
                addr = *stack;
@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
                        } else {
                                ops->address(data, addr, 0);
                        }
-                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+                       print_ftrace_graph_addr(addr, data, ops, task, graph);
                }
                stack++;
        }
@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
 EXPORT_SYMBOL_GPL(print_context_stack);
 
 unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task, void *stack_start,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph)
@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
        struct stack_frame *frame = (struct stack_frame *)bp;
        unsigned long *ret_addr = &frame->return_address;
 
-       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+       while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
                unsigned long addr = *ret_addr;
 
                if (!__kernel_text_address(addr))
@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
                ops->address(data, addr, 1);
                frame = frame->next_frame;
                ret_addr = &frame->return_address;
-               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+               print_ftrace_graph_addr(addr, data, ops, task, graph);
        }
 
        return (unsigned long)frame;
@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
 static void print_trace_address(void *data, unsigned long addr, int reliable)
 {
        touch_nmi_watchdog();
-       printk(data);
+       printk("%s", (char *)data);
        printk_stack_address(addr, reliable);
 }
 
@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
 EXPORT_SYMBOL_GPL(oops_begin);
 NOKPROBE_SYMBOL(oops_begin);
 
+extern void gr_handle_kernel_exploit(void);
+
 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
        if (regs && kexec_should_crash(current))
@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
                panic("Fatal exception in interrupt");
        if (panic_on_oops)
                panic("Fatal exception");
-       do_exit(signr);
+
+       gr_handle_kernel_exploit();
+
+       do_group_exit(signr);
 }
 NOKPROBE_SYMBOL(oops_end);
 
@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
        print_modules();
        show_regs(regs);
 #ifdef CONFIG_X86_32
-       if (user_mode_vm(regs)) {
+       if (user_mode(regs)) {
                sp = regs->sp;
                ss = regs->ss & 0xffff;
        } else {
@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
        unsigned long flags = oops_begin();
        int sig = SIGSEGV;
 
-       if (!user_mode_vm(regs))
+       if (!user_mode(regs))
                report_bug(regs->ip, regs);
 
        if (__die(str, regs, err))
index 5abd4cd4230c69f3ff4730e97a1297be40013c44..c65733b49899bd4fab5214434bf20e253d03694d 100644 (file)
@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                bp = stack_frame(task, regs);
 
        for (;;) {
-               struct thread_info *context;
+               void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
                void *end_stack;
 
                end_stack = is_hardirq_stack(stack, cpu);
                if (!end_stack)
                        end_stack = is_softirq_stack(stack, cpu);
 
-               context = task_thread_info(task);
-               bp = ops->walk_stack(context, stack, bp, ops, data,
+               bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
                                     end_stack, &graph);
 
                /* Stop if not on irq stack */
@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
        int i;
 
        show_regs_print_info(KERN_EMERG);
-       __show_regs(regs, !user_mode_vm(regs));
+       __show_regs(regs, !user_mode(regs));
 
        /*
         * When in-kernel, we also print out the stack and code at the
         * time of the fault..
         */
-       if (!user_mode_vm(regs)) {
+       if (!user_mode(regs)) {
                unsigned int code_prologue = code_bytes * 43 / 64;
                unsigned int code_len = code_bytes;
                unsigned char c;
                u8 *ip;
+               unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
 
                pr_emerg("Stack:\n");
                show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
 
                pr_emerg("Code:");
 
-               ip = (u8 *)regs->ip - code_prologue;
+               ip = (u8 *)regs->ip - code_prologue + cs_base;
                if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
                        /* try starting at IP */
-                       ip = (u8 *)regs->ip;
+                       ip = (u8 *)regs->ip + cs_base;
                        code_len = code_len - code_prologue + 1;
                }
                for (i = 0; i < code_len; i++, ip++) {
@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
                                pr_cont("  Bad EIP value.");
                                break;
                        }
-                       if (ip == (u8 *)regs->ip)
+                       if (ip == (u8 *)regs->ip + cs_base)
                                pr_cont(" <%02x>", c);
                        else
                                pr_cont(" %02x", c);
@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
 {
        unsigned short ud2;
 
+       ip = ktla_ktva(ip);
        if (ip < PAGE_OFFSET)
                return 0;
        if (probe_kernel_address((unsigned short *)ip, ud2))
@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
 
        return ud2 == 0x0b0f;
 }
+
+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
+void pax_check_alloca(unsigned long size)
+{
+       unsigned long sp = (unsigned long)&sp, stack_left;
+
+       /* all kernel stacks are of the same size */
+       stack_left = sp & (THREAD_SIZE - 1);
+       BUG_ON(stack_left < 256 || size >= stack_left - 256);
+}
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
index ff86f19b575849fca7e20a4086e09f798ae8291d..73eabf4c19e6dabfacf2b011e5ca85d02e7620d7 100644 (file)
@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       struct thread_info *tinfo;
        unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned long dummy;
        unsigned used = 0;
        int graph = 0;
        int done = 0;
+       void *stack_start;
 
        if (!task)
                task = current;
@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * current stack address. If the stacks consist of nested
         * exceptions
         */
-       tinfo = task_thread_info(task);
        while (!done) {
                unsigned long *stack_end;
                enum stack_type stype;
@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        if (ops->stack(data, id) < 0)
                                break;
 
-                       bp = ops->walk_stack(tinfo, stack, bp, ops,
+                       bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
                                             data, stack_end, &graph);
                        ops->stack(data, "<EOE>");
                        /*
@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                         * second-to-last pointer (index -2 to end) in the
                         * exception stack:
                         */
+                       if ((u16)stack_end[-1] != __KERNEL_DS)
+                               goto out;
                        stack = (unsigned long *) stack_end[-2];
                        done = 0;
                        break;
@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
                        if (ops->stack(data, "IRQ") < 0)
                                break;
-                       bp = ops->walk_stack(tinfo, stack, bp,
+                       bp = ops->walk_stack(task, irq_stack, stack, bp,
                                     ops, data, stack_end, &graph);
                        /*
                         * We link to the next stack (which would be
@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
        /*
         * This handles the process stack:
         */
-       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+       stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+       bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+out:
        put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
 {
        unsigned short ud2;
 
-       if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
+       if (probe_kernel_address((unsigned short *)ip, ud2))
                return 0;
 
        return ud2 == 0x0b0f;
 }
+
+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
+void pax_check_alloca(unsigned long size)
+{
+       unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
+       unsigned cpu, used;
+       char *id;
+
+       /* check the process stack first */
+       stack_start = (unsigned long)task_stack_page(current);
+       stack_end = stack_start + THREAD_SIZE;
+       if (likely(stack_start <= sp && sp < stack_end)) {
+               unsigned long stack_left = sp & (THREAD_SIZE - 1);
+               BUG_ON(stack_left < 256 || size >= stack_left - 256);
+               return;
+       }
+
+       cpu = get_cpu();
+
+       /* check the irq stacks */
+       stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
+       stack_start = stack_end - IRQ_STACK_SIZE;
+       if (stack_start <= sp && sp < stack_end) {
+               unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
+               put_cpu();
+               BUG_ON(stack_left < 256 || size >= stack_left - 256);
+               return;
+       }
+
+       /* check the exception stacks */
+       used = 0;
+       stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
+       stack_start = stack_end - EXCEPTION_STKSZ;
+       if (stack_end && stack_start <= sp && sp < stack_end) {
+               unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
+               put_cpu();
+               BUG_ON(stack_left < 256 || size >= stack_left - 256);
+               return;
+       }
+
+       put_cpu();
+
+       /* unknown stack */
+       BUG();
+}
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
index dd2f07ae9d0cbd852566f40eb89ece22bc5f3b6c..845dc057e20c5f7791ffe0ba827e5cdb1e1bc0dd 100644 (file)
@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
 
 static void early_panic(char *msg)
 {
-       early_printk(msg);
-       panic(msg);
+       early_printk("%s", msg);
+       panic("%s", msg);
 }
 
 static int userdef __initdata;
index 01d1c187c9f9468f0488fa498d44e814ca9a2179..8073693cae93d665b7bfb0c2f01cfcd2c8779331 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/pci_regs.h>
 #include <linux/pci_ids.h>
 #include <linux/errno.h>
+#include <linux/sched.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/fcntl.h>
index 000d4199b03e69905527d7972ccd918835e6cc1f..8f66802ae4672fd0b73613c984548ebf33200e76 100644 (file)
        /*CFI_REL_OFFSET gs, PT_GS*/
 .endm
 .macro SET_KERNEL_GS reg
+
+#ifdef CONFIG_CC_STACKPROTECTOR
        movl $(__KERNEL_STACK_CANARY), \reg
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
+       movl $(__USER_DS), \reg
+#else
+       xorl \reg, \reg
+#endif
+
        movl \reg, %gs
 .endm
 
 #endif /* CONFIG_X86_32_LAZY_GS */
 
-.macro SAVE_ALL
+.macro pax_enter_kernel
+#ifdef CONFIG_PAX_KERNEXEC
+       call pax_enter_kernel
+#endif
+.endm
+
+.macro pax_exit_kernel
+#ifdef CONFIG_PAX_KERNEXEC
+       call pax_exit_kernel
+#endif
+.endm
+
+#ifdef CONFIG_PAX_KERNEXEC
+ENTRY(pax_enter_kernel)
+#ifdef CONFIG_PARAVIRT
+       pushl %eax
+       pushl %ecx
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
+       mov %eax, %esi
+#else
+       mov %cr0, %esi
+#endif
+       bts $16, %esi
+       jnc 1f
+       mov %cs, %esi
+       cmp $__KERNEL_CS, %esi
+       jz 3f
+       ljmp $__KERNEL_CS, $3f
+1:     ljmp $__KERNEXEC_KERNEL_CS, $2f
+2:
+#ifdef CONFIG_PARAVIRT
+       mov %esi, %eax
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
+#else
+       mov %esi, %cr0
+#endif
+3:
+#ifdef CONFIG_PARAVIRT
+       popl %ecx
+       popl %eax
+#endif
+       ret
+ENDPROC(pax_enter_kernel)
+
+ENTRY(pax_exit_kernel)
+#ifdef CONFIG_PARAVIRT
+       pushl %eax
+       pushl %ecx
+#endif
+       mov %cs, %esi
+       cmp $__KERNEXEC_KERNEL_CS, %esi
+       jnz 2f
+#ifdef CONFIG_PARAVIRT
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
+       mov %eax, %esi
+#else
+       mov %cr0, %esi
+#endif
+       btr $16, %esi
+       ljmp $__KERNEL_CS, $1f
+1:
+#ifdef CONFIG_PARAVIRT
+       mov %esi, %eax
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
+#else
+       mov %esi, %cr0
+#endif
+2:
+#ifdef CONFIG_PARAVIRT
+       popl %ecx
+       popl %eax
+#endif
+       ret
+ENDPROC(pax_exit_kernel)
+#endif
+
+       .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+       call pax_erase_kstack
+#endif
+       .endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+/*
+ * ebp: thread_info
+ */
+ENTRY(pax_erase_kstack)
+       pushl %edi
+       pushl %ecx
+       pushl %eax
+
+       mov TI_lowest_stack(%ebp), %edi
+       mov $-0xBEEF, %eax
+       std
+
+1:     mov %edi, %ecx
+       and $THREAD_SIZE_asm - 1, %ecx
+       shr $2, %ecx
+       repne scasl
+       jecxz 2f
+
+       cmp $2*16, %ecx
+       jc 2f
+
+       mov $2*16, %ecx
+       repe scasl
+       jecxz 2f
+       jne 1b
+
+2:     cld
+       or $2*4, %edi
+       mov %esp, %ecx
+       sub %edi, %ecx
+
+       cmp $THREAD_SIZE_asm, %ecx
+       jb 3f
+       ud2
+3:
+
+       shr $2, %ecx
+       rep stosl
+
+       mov TI_task_thread_sp0(%ebp), %edi
+       sub $128, %edi
+       mov %edi, TI_lowest_stack(%ebp)
+
+       popl %eax
+       popl %ecx
+       popl %edi
+       ret
+ENDPROC(pax_erase_kstack)
+#endif
+
+.macro __SAVE_ALL _DS
        cld
        PUSH_GS
        pushl_cfi %fs
        CFI_REL_OFFSET ecx, 0
        pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
-       movl $(__USER_DS), %edx
+       movl $\_DS, %edx
        movl %edx, %ds
        movl %edx, %es
        movl $(__KERNEL_PERCPU), %edx
        SET_KERNEL_GS %edx
 .endm
 
+.macro SAVE_ALL
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       __SAVE_ALL __KERNEL_DS
+       pax_enter_kernel
+#else
+       __SAVE_ALL __USER_DS
+#endif
+.endm
+
 .macro RESTORE_INT_REGS
        popl_cfi %ebx
        CFI_RESTORE ebx
@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
        popfl_cfi
        jmp syscall_exit
        CFI_ENDPROC
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
        CFI_STARTPROC
@@ -340,7 +490,15 @@ ret_from_intr:
        andl $SEGMENT_RPL_MASK, %eax
 #endif
        cmpl $USER_RPL, %eax
+
+#ifdef CONFIG_PAX_KERNEXEC
+       jae resume_userspace
+
+       pax_exit_kernel
+       jmp resume_kernel
+#else
        jb resume_kernel                # not returning to v8086 or userspace
+#endif
 
 ENTRY(resume_userspace)
        LOCKDEP_SYS_EXIT
@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
        andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
                                        # int/exception return?
        jne work_pending
-       jmp restore_all
-END(ret_from_exception)
+       jmp restore_all_pax
+ENDPROC(ret_from_exception)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
@@ -365,7 +523,7 @@ need_resched:
        jz restore_all
        call preempt_schedule_irq
        jmp need_resched
-END(resume_kernel)
+ENDPROC(resume_kernel)
 #endif
        CFI_ENDPROC
 
@@ -395,30 +553,45 @@ sysenter_past_esp:
        /*CFI_REL_OFFSET cs, 0*/
        /*
         * Push current_thread_info()->sysenter_return to the stack.
-        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
-        * pushed above; +8 corresponds to copy_thread's esp0 setting.
         */
-       pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
+       pushl_cfi $0
        CFI_REL_OFFSET eip, 0
 
        pushl_cfi %eax
        SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+       movl TI_sysenter_return(%ebp),%ebp
+       movl %ebp,PT_EIP(%esp)
        ENABLE_INTERRUPTS(CLBR_NONE)
 
 /*
  * Load the potential sixth argument from user stack.
  * Careful about security.
  */
+       movl PT_OLDESP(%esp),%ebp
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       mov PT_OLDSS(%esp),%ds
+1:     movl %ds:(%ebp),%ebp
+       push %ss
+       pop %ds
+#else
        cmpl $__PAGE_OFFSET-3,%ebp
        jae syscall_fault
        ASM_STAC
 1:     movl (%ebp),%ebp
        ASM_CLAC
+#endif
+
        movl %ebp,PT_EBP(%esp)
        _ASM_EXTABLE(1b,syscall_fault)
 
        GET_THREAD_INFO(%ebp)
 
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz sysenter_audit
 sysenter_do_call:
@@ -434,12 +607,24 @@ sysenter_after_call:
        testl $_TIF_ALLWORK_MASK, %ecx
        jne sysexit_audit
 sysenter_exit:
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pushl_cfi %eax
+       movl %esp, %eax
+       call pax_randomize_kstack
+       popl_cfi %eax
+#endif
+
+       pax_erase_kstack
+
 /* if something modifies registers it must also disable sysexit */
        movl PT_EIP(%esp), %edx
        movl PT_OLDESP(%esp), %ecx
        xorl %ebp,%ebp
        TRACE_IRQS_ON
 1:     mov  PT_FS(%esp), %fs
+2:     mov  PT_DS(%esp), %ds
+3:     mov  PT_ES(%esp), %es
        PTGS_TO_GS
        ENABLE_INTERRUPTS_SYSEXIT
 
@@ -453,6 +638,9 @@ sysenter_audit:
        pushl_cfi PT_ESI(%esp)          /* a3: 5th arg */
        pushl_cfi PT_EDX+4(%esp)        /* a2: 4th arg */
        call __audit_syscall_entry
+
+       pax_erase_kstack
+
        popl_cfi %ecx /* get that remapped edx off the stack */
        popl_cfi %ecx /* get that remapped esi off the stack */
        movl PT_EAX(%esp),%eax          /* reload syscall number */
@@ -479,10 +667,16 @@ sysexit_audit:
 
        CFI_ENDPROC
 .pushsection .fixup,"ax"
-2:     movl $0,PT_FS(%esp)
+4:     movl $0,PT_FS(%esp)
+       jmp 1b
+5:     movl $0,PT_DS(%esp)
+       jmp 1b
+6:     movl $0,PT_ES(%esp)
        jmp 1b
 .popsection
-       _ASM_EXTABLE(1b,2b)
+       _ASM_EXTABLE(1b,4b)
+       _ASM_EXTABLE(2b,5b)
+       _ASM_EXTABLE(3b,6b)
        PTGS_TO_GS_EX
 ENDPROC(ia32_sysenter_target)
 
@@ -493,6 +687,11 @@ ENTRY(system_call)
        pushl_cfi %eax                  # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
                                        # system call tracing in operation / emulation
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz syscall_trace_entry
@@ -512,6 +711,15 @@ syscall_exit:
        testl $_TIF_ALLWORK_MASK, %ecx  # current->work
        jne syscall_exit_work
 
+restore_all_pax:
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       movl %esp, %eax
+       call pax_randomize_kstack
+#endif
+
+       pax_erase_kstack
+
 restore_all:
        TRACE_IRQS_IRET
 restore_all_notrace:
@@ -566,14 +774,34 @@ ldt_ss:
  * compensating for the offset by changing to the ESPFIX segment with
  * a base address that matches for the difference.
  */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
        mov %esp, %edx                  /* load kernel esp */
        mov PT_OLDESP(%esp), %eax       /* load userspace esp */
        mov %dx, %ax                    /* eax: new kernel esp */
        sub %eax, %edx                  /* offset (low word is 0) */
+#ifdef CONFIG_SMP
+       movl PER_CPU_VAR(cpu_number), %ebx
+       shll $PAGE_SHIFT_asm, %ebx
+       addl $cpu_gdt_table, %ebx
+#else
+       movl $cpu_gdt_table, %ebx
+#endif
        shr $16, %edx
-       mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
-       mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+       mov %cr0, %esi
+       btr $16, %esi
+       mov %esi, %cr0
+#endif
+
+       mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
+       mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+       bts $16, %esi
+       mov %esi, %cr0
+#endif
+
        pushl_cfi $__ESPFIX_SS
        pushl_cfi %eax                  /* new kernel esp */
        /* Disable interrupts, but do not irqtrace this section: we
@@ -603,20 +831,18 @@ work_resched:
        movl TI_flags(%ebp), %ecx
        andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
                                        # than syscall tracing?
-       jz restore_all
+       jz restore_all_pax
        testb $_TIF_NEED_RESCHED, %cl
        jnz work_resched
 
 work_notifysig:                                # deal with pending signals and
                                        # notify-resume requests
+       movl %esp, %eax
 #ifdef CONFIG_VM86
        testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-       movl %esp, %eax
        jne work_notifysig_v86          # returning to kernel-space or
                                        # vm86-space
 1:
-#else
-       movl %esp, %eax
 #endif
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
@@ -637,7 +863,7 @@ work_notifysig_v86:
        movl %eax, %esp
        jmp 1b
 #endif
-END(work_pending)
+ENDPROC(work_pending)
 
        # perform syscall exit tracing
        ALIGN
@@ -645,11 +871,14 @@ syscall_trace_entry:
        movl $-ENOSYS,PT_EAX(%esp)
        movl %esp, %eax
        call syscall_trace_enter
+
+       pax_erase_kstack
+
        /* What it returned is what we'll actually use.  */
        cmpl $(NR_syscalls), %eax
        jnae syscall_call
        jmp syscall_exit
-END(syscall_trace_entry)
+ENDPROC(syscall_trace_entry)
 
        # perform syscall exit tracing
        ALIGN
@@ -662,26 +891,30 @@ syscall_exit_work:
        movl %esp, %eax
        call syscall_trace_leave
        jmp resume_userspace
-END(syscall_exit_work)
+ENDPROC(syscall_exit_work)
        CFI_ENDPROC
 
        RING0_INT_FRAME                 # can't unwind into user space anyway
 syscall_fault:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       push %ss
+       pop %ds
+#endif
        ASM_CLAC
        GET_THREAD_INFO(%ebp)
        movl $-EFAULT,PT_EAX(%esp)
        jmp resume_userspace
-END(syscall_fault)
+ENDPROC(syscall_fault)
 
 syscall_badsys:
        movl $-ENOSYS,%eax
        jmp syscall_after_call
-END(syscall_badsys)
+ENDPROC(syscall_badsys)
 
 sysenter_badsys:
        movl $-ENOSYS,%eax
        jmp sysenter_after_call
-END(sysenter_badsys)
+ENDPROC(sysenter_badsys)
        CFI_ENDPROC
 
 .macro FIXUP_ESPFIX_STACK
@@ -694,8 +927,15 @@ END(sysenter_badsys)
  */
 #ifdef CONFIG_X86_ESPFIX32
        /* fixup the stack */
-       mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
-       mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+#ifdef CONFIG_SMP
+       movl PER_CPU_VAR(cpu_number), %ebx
+       shll $PAGE_SHIFT_asm, %ebx
+       addl $cpu_gdt_table, %ebx
+#else
+       movl $cpu_gdt_table, %ebx
+#endif
+       mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
+       mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
        shl $16, %eax
        addl %esp, %eax                 /* the adjusted stack pointer */
        pushl_cfi $__KERNEL_DS
@@ -751,7 +991,7 @@ vector=vector+1
   .endr
 2:     jmp common_interrupt
 .endr
-END(irq_entries_start)
+ENDPROC(irq_entries_start)
 
 .previous
 END(interrupt)
@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
        pushl_cfi $do_coprocessor_error
        jmp error_code
        CFI_ENDPROC
-END(coprocessor_error)
+ENDPROC(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
        RING0_INT_FRAME
@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
 .section .altinstructions,"a"
        altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
 .previous
-.section .altinstr_replacement,"ax"
+.section .altinstr_replacement,"a"
 663:   pushl $do_simd_coprocessor_error
 664:
 .previous
@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
 #endif
        jmp error_code
        CFI_ENDPROC
-END(simd_coprocessor_error)
+ENDPROC(simd_coprocessor_error)
 
 ENTRY(device_not_available)
        RING0_INT_FRAME
@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
        pushl_cfi $do_device_not_available
        jmp error_code
        CFI_ENDPROC
-END(device_not_available)
+ENDPROC(device_not_available)
 
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_iret)
        iret
        _ASM_EXTABLE(native_iret, iret_exc)
-END(native_iret)
+ENDPROC(native_iret)
 
 ENTRY(native_irq_enable_sysexit)
        sti
        sysexit
-END(native_irq_enable_sysexit)
+ENDPROC(native_irq_enable_sysexit)
 #endif
 
 ENTRY(overflow)
@@ -860,7 +1100,7 @@ ENTRY(overflow)
        pushl_cfi $do_overflow
        jmp error_code
        CFI_ENDPROC
-END(overflow)
+ENDPROC(overflow)
 
 ENTRY(bounds)
        RING0_INT_FRAME
@@ -869,7 +1109,7 @@ ENTRY(bounds)
        pushl_cfi $do_bounds
        jmp error_code
        CFI_ENDPROC
-END(bounds)
+ENDPROC(bounds)
 
 ENTRY(invalid_op)
        RING0_INT_FRAME
@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
        pushl_cfi $do_invalid_op
        jmp error_code
        CFI_ENDPROC
-END(invalid_op)
+ENDPROC(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
        RING0_INT_FRAME
@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
        pushl_cfi $do_coprocessor_segment_overrun
        jmp error_code
        CFI_ENDPROC
-END(coprocessor_segment_overrun)
+ENDPROC(coprocessor_segment_overrun)
 
 ENTRY(invalid_TSS)
        RING0_EC_FRAME
@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
        pushl_cfi $do_invalid_TSS
        jmp error_code
        CFI_ENDPROC
-END(invalid_TSS)
+ENDPROC(invalid_TSS)
 
 ENTRY(segment_not_present)
        RING0_EC_FRAME
@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
        pushl_cfi $do_segment_not_present
        jmp error_code
        CFI_ENDPROC
-END(segment_not_present)
+ENDPROC(segment_not_present)
 
 ENTRY(stack_segment)
        RING0_EC_FRAME
@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
        pushl_cfi $do_stack_segment
        jmp error_code
        CFI_ENDPROC
-END(stack_segment)
+ENDPROC(stack_segment)
 
 ENTRY(alignment_check)
        RING0_EC_FRAME
@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
        pushl_cfi $do_alignment_check
        jmp error_code
        CFI_ENDPROC
-END(alignment_check)
+ENDPROC(alignment_check)
 
 ENTRY(divide_error)
        RING0_INT_FRAME
@@ -928,7 +1168,7 @@ ENTRY(divide_error)
        pushl_cfi $do_divide_error
        jmp error_code
        CFI_ENDPROC
-END(divide_error)
+ENDPROC(divide_error)
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
@@ -938,7 +1178,7 @@ ENTRY(machine_check)
        pushl_cfi machine_check_vector
        jmp error_code
        CFI_ENDPROC
-END(machine_check)
+ENDPROC(machine_check)
 #endif
 
 ENTRY(spurious_interrupt_bug)
@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
        pushl_cfi $do_spurious_interrupt_bug
        jmp error_code
        CFI_ENDPROC
-END(spurious_interrupt_bug)
+ENDPROC(spurious_interrupt_bug)
 
 #ifdef CONFIG_XEN
 /* Xen doesn't set %esp to be precisely what the normal sysenter
@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
 
 ENTRY(mcount)
        ret
-END(mcount)
+ENDPROC(mcount)
 
 ENTRY(ftrace_caller)
        pushl %eax
@@ -1084,7 +1324,7 @@ ftrace_graph_call:
 .globl ftrace_stub
 ftrace_stub:
        ret
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
        pushf   /* push flags before compare (in cs location) */
@@ -1182,7 +1422,7 @@ trace:
        popl %ecx
        popl %eax
        jmp ftrace_stub
-END(mcount)
+ENDPROC(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
        popl %ecx
        popl %eax
        ret
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
 .globl return_to_handler
 return_to_handler:
@@ -1261,15 +1501,18 @@ error_code:
        movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
        REG_TO_PTGS %ecx
        SET_KERNEL_GS %ecx
-       movl $(__USER_DS), %ecx
+       movl $(__KERNEL_DS), %ecx
        movl %ecx, %ds
        movl %ecx, %es
+
+       pax_enter_kernel
+
        TRACE_IRQS_OFF
        movl %esp,%eax                  # pt_regs pointer
        call *%edi
        jmp ret_from_exception
        CFI_ENDPROC
-END(page_fault)
+ENDPROC(page_fault)
 
 /*
  * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -1312,7 +1555,7 @@ debug_stack_correct:
        call do_debug
        jmp ret_from_exception
        CFI_ENDPROC
-END(debug)
+ENDPROC(debug)
 
 /*
  * NMI is doubly nasty. It can happen _while_ we're handling
@@ -1352,6 +1595,9 @@ nmi_stack_correct:
        xorl %edx,%edx          # zero error code
        movl %esp,%eax          # pt_regs pointer
        call do_nmi
+
+       pax_exit_kernel
+
        jmp restore_all_notrace
        CFI_ENDPROC
 
@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
        FIXUP_ESPFIX_STACK              # %eax == %esp
        xorl %edx,%edx                  # zero error code
        call do_nmi
+
+       pax_exit_kernel
+
        RESTORE_REGS
        lss 12+4(%esp), %esp            # back to espfix stack
        CFI_ADJUST_CFA_OFFSET -24
        jmp irq_return
 #endif
        CFI_ENDPROC
-END(nmi)
+ENDPROC(nmi)
 
 ENTRY(int3)
        RING0_INT_FRAME
@@ -1408,14 +1657,14 @@ ENTRY(int3)
        call do_int3
        jmp ret_from_exception
        CFI_ENDPROC
-END(int3)
+ENDPROC(int3)
 
 ENTRY(general_protection)
        RING0_EC_FRAME
        pushl_cfi $do_general_protection
        jmp error_code
        CFI_ENDPROC
-END(general_protection)
+ENDPROC(general_protection)
 
 #ifdef CONFIG_KVM_GUEST
 ENTRY(async_page_fault)
@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
        pushl_cfi $do_async_page_fault
        jmp error_code
        CFI_ENDPROC
-END(async_page_fault)
+ENDPROC(async_page_fault)
 #endif
 
index 4ee9a2315f80dac88d3b8f10b6e48cc571558282..c78661071e2923b23d21cbd7af84ee0e5cbcc0ca 100644 (file)
@@ -59,6 +59,8 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <linux/err.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
 ENDPROC(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
+       .macro ljmpq sel, off
+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
+       .byte 0x48; ljmp *1234f(%rip)
+       .pushsection .rodata
+       .align 16
+       1234: .quad \off; .word \sel
+       .popsection
+#else
+       pushq $\sel
+       pushq $\off
+       lretq
+#endif
+       .endm
+
+       .macro pax_enter_kernel
+       pax_set_fptr_mask
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       call pax_enter_kernel
+#endif
+       .endm
+
+       .macro pax_exit_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       call pax_exit_kernel
+#endif
+
+       .endm
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ENTRY(pax_enter_kernel)
+       pushq %rdi
+
+#ifdef CONFIG_PARAVIRT
+       PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+       GET_CR0_INTO_RDI
+       bts $16,%rdi
+       jnc 3f
+       mov %cs,%edi
+       cmp $__KERNEL_CS,%edi
+       jnz 2f
+1:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       661: jmp 111f
+       .pushsection .altinstr_replacement, "a"
+       662: ASM_NOP2
+       .popsection
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+       .popsection
+       GET_CR3_INTO_RDI
+       cmp $0,%dil
+       jnz 112f
+       mov $__KERNEL_DS,%edi
+       mov %edi,%ss
+       jmp 111f
+112:   cmp $1,%dil
+       jz 113f
+       ud2
+113:   sub $4097,%rdi
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       mov $__UDEREF_KERNEL_DS,%edi
+       mov %edi,%ss
+111:
+#endif
+
+#ifdef CONFIG_PARAVIRT
+       PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+       popq %rdi
+       pax_force_retaddr
+       retq
+
+#ifdef CONFIG_PAX_KERNEXEC
+2:     ljmpq __KERNEL_CS,1b
+3:     ljmpq __KERNEXEC_KERNEL_CS,4f
+4:     SET_RDI_INTO_CR0
+       jmp 1b
+#endif
+ENDPROC(pax_enter_kernel)
+
+ENTRY(pax_exit_kernel)
+       pushq %rdi
+
+#ifdef CONFIG_PARAVIRT
+       PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+       mov %cs,%rdi
+       cmp $__KERNEXEC_KERNEL_CS,%edi
+       jz 2f
+       GET_CR0_INTO_RDI
+       bts $16,%rdi
+       jnc 4f
+1:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       661: jmp 111f
+       .pushsection .altinstr_replacement, "a"
+       662: ASM_NOP2
+       .popsection
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+       .popsection
+       mov %ss,%edi
+       cmp $__UDEREF_KERNEL_DS,%edi
+       jnz 111f
+       GET_CR3_INTO_RDI
+       cmp $0,%dil
+       jz 112f
+       ud2
+112:   add $4097,%rdi
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       mov $__KERNEL_DS,%edi
+       mov %edi,%ss
+111:
+#endif
+
+#ifdef CONFIG_PARAVIRT
+       PV_RESTORE_REGS(CLBR_RDI);
+#endif
+
+       popq %rdi
+       pax_force_retaddr
+       retq
+
+#ifdef CONFIG_PAX_KERNEXEC
+2:     GET_CR0_INTO_RDI
+       btr $16,%rdi
+       jnc 4f
+       ljmpq __KERNEL_CS,3f
+3:     SET_RDI_INTO_CR0
+       jmp 1b
+4:     ud2
+       jmp 4b
+#endif
+ENDPROC(pax_exit_kernel)
+#endif
+
+       .macro pax_enter_kernel_user
+       pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       call pax_enter_kernel_user
+#endif
+       .endm
+
+       .macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+       pushq %rax
+       pushq %r11
+       call pax_randomize_kstack
+       popq %r11
+       popq %rax
+#endif
+       .endm
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ENTRY(pax_enter_kernel_user)
+       pushq %rdi
+       pushq %rbx
+
+#ifdef CONFIG_PARAVIRT
+       PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+       661: jmp 111f
+       .pushsection .altinstr_replacement, "a"
+       662: ASM_NOP2
+       .popsection
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+       .popsection
+       GET_CR3_INTO_RDI
+       cmp $1,%dil
+       jnz 4f
+       sub $4097,%rdi
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       jmp 3f
+111:
+
+       GET_CR3_INTO_RDI
+       mov %rdi,%rbx
+       add $__START_KERNEL_map,%rbx
+       sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+       cmpl $0, pv_info+PARAVIRT_enabled
+       jz 1f
+       pushq %rdi
+       i = 0
+       .rept USER_PGD_PTRS
+       mov i*8(%rbx),%rsi
+       mov $0,%sil
+       lea i*8(%rbx),%rdi
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+       i = i + 1
+       .endr
+       popq %rdi
+       jmp 2f
+1:
+#endif
+
+       i = 0
+       .rept USER_PGD_PTRS
+       movb $0,i*8(%rbx)
+       i = i + 1
+       .endr
+
+2:     SET_RDI_INTO_CR3
+
+#ifdef CONFIG_PAX_KERNEXEC
+       GET_CR0_INTO_RDI
+       bts $16,%rdi
+       SET_RDI_INTO_CR0
+#endif
+
+3:
+
+#ifdef CONFIG_PARAVIRT
+       PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+       popq %rbx
+       popq %rdi
+       pax_force_retaddr
+       retq
+4:     ud2
+ENDPROC(pax_enter_kernel_user)
+
+ENTRY(pax_exit_kernel_user)
+       pushq %rdi
+       pushq %rbx
+
+#ifdef CONFIG_PARAVIRT
+       PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+       GET_CR3_INTO_RDI
+       661: jmp 1f
+       .pushsection .altinstr_replacement, "a"
+       662: ASM_NOP2
+       .popsection
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+       .popsection
+       cmp $0,%dil
+       jnz 3f
+       add $4097,%rdi
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       jmp 2f
+1:
+
+       mov %rdi,%rbx
+
+#ifdef CONFIG_PAX_KERNEXEC
+       GET_CR0_INTO_RDI
+       btr $16,%rdi
+       jnc 3f
+       SET_RDI_INTO_CR0
+#endif
+
+       add $__START_KERNEL_map,%rbx
+       sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+       cmpl $0, pv_info+PARAVIRT_enabled
+       jz 1f
+       i = 0
+       .rept USER_PGD_PTRS
+       mov i*8(%rbx),%rsi
+       mov $0x67,%sil
+       lea i*8(%rbx),%rdi
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+       i = i + 1
+       .endr
+       jmp 2f
+1:
+#endif
+
+       i = 0
+       .rept USER_PGD_PTRS
+       movb $0x67,i*8(%rbx)
+       i = i + 1
+       .endr
+2:
+
+#ifdef CONFIG_PARAVIRT
+       PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+       popq %rbx
+       popq %rdi
+       pax_force_retaddr
+       retq
+3:     ud2
+ENDPROC(pax_exit_kernel_user)
+#endif
+
+       .macro pax_enter_kernel_nmi
+       pax_set_fptr_mask
+
+#ifdef CONFIG_PAX_KERNEXEC
+       GET_CR0_INTO_RDI
+       bts $16,%rdi
+       jc 110f
+       SET_RDI_INTO_CR0
+       or $2,%ebx
+110:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       661: jmp 111f
+       .pushsection .altinstr_replacement, "a"
+       662: ASM_NOP2
+       .popsection
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+       .popsection
+       GET_CR3_INTO_RDI
+       cmp $0,%dil
+       jz 111f
+       sub $4097,%rdi
+       or $4,%ebx
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       mov $__UDEREF_KERNEL_DS,%edi
+       mov %edi,%ss
+111:
+#endif
+       .endm
+
+       .macro pax_exit_kernel_nmi
+#ifdef CONFIG_PAX_KERNEXEC
+       btr $1,%ebx
+       jnc 110f
+       GET_CR0_INTO_RDI
+       btr $16,%rdi
+       SET_RDI_INTO_CR0
+110:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       btr $2,%ebx
+       jnc 111f
+       GET_CR3_INTO_RDI
+       add $4097,%rdi
+       bts $63,%rdi
+       SET_RDI_INTO_CR3
+       mov $__KERNEL_DS,%edi
+       mov %edi,%ss
+111:
+#endif
+       .endm
+
+       .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+       call pax_erase_kstack
+#endif
+       .endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+ENTRY(pax_erase_kstack)
+       pushq %rdi
+       pushq %rcx
+       pushq %rax
+       pushq %r11
+
+       GET_THREAD_INFO(%r11)
+       mov TI_lowest_stack(%r11), %rdi
+       mov $-0xBEEF, %rax
+       std
+
+1:     mov %edi, %ecx
+       and $THREAD_SIZE_asm - 1, %ecx
+       shr $3, %ecx
+       repne scasq
+       jecxz 2f
+
+       cmp $2*8, %ecx
+       jc 2f
+
+       mov $2*8, %ecx
+       repe scasq
+       jecxz 2f
+       jne 1b
+
+2:     cld
+       or $2*8, %rdi
+       mov %esp, %ecx
+       sub %edi, %ecx
+
+       cmp $THREAD_SIZE_asm, %rcx
+       jb 3f
+       ud2
+3:
+
+       shr $3, %ecx
+       rep stosq
+
+       mov TI_task_thread_sp0(%r11), %rdi
+       sub $256, %rdi
+       mov %rdi, TI_lowest_stack(%r11)
+
+       popq %r11
+       popq %rax
+       popq %rcx
+       popq %rdi
+       pax_force_retaddr
+       ret
+ENDPROC(pax_erase_kstack)
+#endif
 
 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
-       bt   $9,EFLAGS-\offset(%rsp)    /* interrupts off? */
+       bt   $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp)    /* interrupts off? */
        jnc  1f
        TRACE_IRQS_ON_DEBUG
 1:
@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
        movq \tmp,R11+\offset(%rsp)
        .endm
 
-       .macro FAKE_STACK_FRAME child_rip
-       /* push in order ss, rsp, eflags, cs, rip */
-       xorl %eax, %eax
-       pushq_cfi $__KERNEL_DS /* ss */
-       /*CFI_REL_OFFSET        ss,0*/
-       pushq_cfi %rax /* rsp */
-       CFI_REL_OFFSET  rsp,0
-       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
-       /*CFI_REL_OFFSET        rflags,0*/
-       pushq_cfi $__KERNEL_CS /* cs */
-       /*CFI_REL_OFFSET        cs,0*/
-       pushq_cfi \child_rip /* rip */
-       CFI_REL_OFFSET  rip,0
-       pushq_cfi %rax /* orig rax */
-       .endm
-
-       .macro UNFAKE_STACK_FRAME
-       addq $8*6, %rsp
-       CFI_ADJUST_CFA_OFFSET   -(6*8)
-       .endm
-
 /*
  * initial frame state for interrupts (and exceptions without error code)
  */
@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
 /* save partial stack frame */
        .macro SAVE_ARGS_IRQ
        cld
-       /* start from rbp in pt_regs and jump over */
-       movq_cfi rdi, (RDI-RBP)
-       movq_cfi rsi, (RSI-RBP)
-       movq_cfi rdx, (RDX-RBP)
-       movq_cfi rcx, (RCX-RBP)
-       movq_cfi rax, (RAX-RBP)
-       movq_cfi  r8,  (R8-RBP)
-       movq_cfi  r9,  (R9-RBP)
-       movq_cfi r10, (R10-RBP)
-       movq_cfi r11, (R11-RBP)
+       /* start from r15 in pt_regs and jump over */
+       movq_cfi rdi, RDI
+       movq_cfi rsi, RSI
+       movq_cfi rdx, RDX
+       movq_cfi rcx, RCX
+       movq_cfi rax, RAX
+       movq_cfi  r8,  R8
+       movq_cfi  r9,  R9
+       movq_cfi r10, R10
+       movq_cfi r11, R11
+       movq_cfi r12, R12
 
        /* Save rbp so that we can unwind from get_irq_regs() */
-       movq_cfi rbp, 0
+       movq_cfi rbp, RBP
 
        /* Save previous stack value */
        movq %rsp, %rsi
 
-       leaq -RBP(%rsp),%rdi    /* arg1 for handler */
-       testl $3, CS-RBP(%rsi)
+       movq %rsp,%rdi  /* arg1 for handler */
+       testb $3, CS(%rsi)
        je 1f
        SWAPGS
        /*
@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
                        0x06 /* DW_OP_deref */, \
                        0x08 /* DW_OP_const1u */, SS+8-RBP, \
                        0x22 /* DW_OP_plus */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       testb $3, CS(%rdi)
+       jnz 1f
+       pax_enter_kernel
+       jmp 2f
+1:     pax_enter_kernel_user
+2:
+#else
+       pax_enter_kernel
+#endif
+
        /* We entered an interrupt context - irqs are off: */
        TRACE_IRQS_OFF
        .endm
@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
        js 1f   /* negative -> in kernel */
        SWAPGS
        xorl %ebx,%ebx
-1:     ret
+1:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       testb $3, CS+8(%rsp)
+       jnz 1f
+       pax_enter_kernel
+       jmp 2f
+1:     pax_enter_kernel_user
+2:
+#else
+       pax_enter_kernel
+#endif
+       pax_force_retaddr
+       ret
        CFI_ENDPROC
-END(save_paranoid)
+ENDPROC(save_paranoid)
+
+ENTRY(save_paranoid_nmi)
+       XCPT_FRAME 1 RDI+8
+       cld
+       movq_cfi rdi, RDI+8
+       movq_cfi rsi, RSI+8
+       movq_cfi rdx, RDX+8
+       movq_cfi rcx, RCX+8
+       movq_cfi rax, RAX+8
+       movq_cfi r8, R8+8
+       movq_cfi r9, R9+8
+       movq_cfi r10, R10+8
+       movq_cfi r11, R11+8
+       movq_cfi rbx, RBX+8
+       movq_cfi rbp, RBP+8
+       movq_cfi r12, R12+8
+       movq_cfi r13, R13+8
+       movq_cfi r14, R14+8
+       movq_cfi r15, R15+8
+       movl $1,%ebx
+       movl $MSR_GS_BASE,%ecx
+       rdmsr
+       testl %edx,%edx
+       js 1f   /* negative -> in kernel */
+       SWAPGS
+       xorl %ebx,%ebx
+1:     pax_enter_kernel_nmi
+       pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ENDPROC(save_paranoid_nmi)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
 
        RESTORE_REST
 
-       testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
+       testb $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
        jz   1f
 
        /*
@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
        jmp  int_ret_from_sys_call
 
 1:
-       subq $REST_SKIP, %rsp   # leave space for volatiles
-       CFI_ADJUST_CFA_OFFSET   REST_SKIP
        movq %rbp, %rdi
        call *%rbx
        movl $0, RAX(%rsp)
        RESTORE_REST
        jmp int_ret_from_sys_call
        CFI_ENDPROC
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
 
 /*
  * System call entry. Up to 6 arguments in registers are supported.
@@ -389,7 +849,7 @@ END(ret_from_fork)
 ENTRY(system_call)
        CFI_STARTPROC   simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
+       CFI_DEF_CFA     rsp,0
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
 
        movq    %rsp,PER_CPU_VAR(old_rsp)
        movq    PER_CPU_VAR(kernel_stack),%rsp
+       SAVE_ARGS 8*6, 0, rax_enosys=1
+       pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+       pax_erase_kstack
+#endif
+
        /*
         * No need to follow this irqs off/on section - it's straight
         * and short:
         */
        ENABLE_INTERRUPTS(CLBR_NONE)
-       SAVE_ARGS 8, 0, rax_enosys=1
        movq_cfi rax,(ORIG_RAX-ARGOFFSET)
        movq  %rcx,RIP-ARGOFFSET(%rsp)
        CFI_REL_OFFSET rip,RIP-ARGOFFSET
-       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       GET_THREAD_INFO(%rcx)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
        jnz tracesys
 system_call_fastpath:
 #if __SYSCALL_MASK == ~0
@@ -435,10 +902,13 @@ sysret_check:
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
+       GET_THREAD_INFO(%rcx)
+       movl TI_flags(%rcx),%edx
        andl %edi,%edx
        jnz  sysret_careful
        CFI_REMEMBER_STATE
+       pax_exit_kernel_user
+       pax_erase_kstack
        /*
         * sysretq will re-enable interrupts:
         */
@@ -497,12 +967,15 @@ sysret_audit:
 
        /* Do syscall tracing */
 tracesys:
-       leaq -REST_SKIP(%rsp), %rdi
+       movq %rsp, %rdi
        movq $AUDIT_ARCH_X86_64, %rsi
        call syscall_trace_enter_phase1
        test %rax, %rax
        jnz tracesys_phase2             /* if needed, run the slow path */
-       LOAD_ARGS 0                     /* else restore clobbered regs */
+
+       pax_erase_kstack
+
+       LOAD_ARGS                       /* else restore clobbered regs */
        jmp system_call_fastpath        /*      and return to the fast path */
 
 tracesys_phase2:
@@ -513,12 +986,14 @@ tracesys_phase2:
        movq %rax,%rdx
        call syscall_trace_enter_phase2
 
+       pax_erase_kstack
+
        /*
         * Reload arg registers from stack in case ptrace changed them.
         * We don't reload %rax because syscall_trace_entry_phase2() returned
         * the value it wants us to use in the table lookup.
         */
-       LOAD_ARGS ARGOFFSET, 1
+       LOAD_ARGS 1
        RESTORE_REST
 #if __SYSCALL_MASK == ~0
        cmpq $__NR_syscall_max,%rax
@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
        andl %edi,%edx
        jnz   int_careful
        andl    $~TS_COMPAT,TI_status(%rcx)
-       jmp   retint_swapgs
+       pax_exit_kernel_user
+       pax_erase_kstack
+       jmp   retint_swapgs_pax
 
        /* Either reschedule or signal or syscall exit tracking needed. */
        /* First do a reschedule test. */
@@ -594,7 +1071,7 @@ int_restore_rest:
        TRACE_IRQS_OFF
        jmp int_with_check
        CFI_ENDPROC
-END(system_call)
+ENDPROC(system_call)
 
        .macro FORK_LIKE func
 ENTRY(stub_\func)
@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
        DEFAULT_FRAME 0 8               /* offset 8: return address */
        call sys_\func
        RESTORE_TOP_OF_STACK %r11, 8
-       ret $REST_SKIP          /* pop extended registers */
+       pax_force_retaddr
+       ret
        CFI_ENDPROC
-END(stub_\func)
+ENDPROC(stub_\func)
        .endm
 
        .macro FIXED_FRAME label,func
@@ -619,9 +1097,10 @@ ENTRY(\label)
        FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
        call \func
        RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
+       pax_force_retaddr
        ret
        CFI_ENDPROC
-END(\label)
+ENDPROC(\label)
        .endm
 
        FORK_LIKE  clone
@@ -629,19 +1108,6 @@ END(\label)
        FORK_LIKE  vfork
        FIXED_FRAME stub_iopl, sys_iopl
 
-ENTRY(ptregscall_common)
-       DEFAULT_FRAME 1 8       /* offset 8: return address */
-       RESTORE_TOP_OF_STACK %r11, 8
-       movq_cfi_restore R15+8, r15
-       movq_cfi_restore R14+8, r14
-       movq_cfi_restore R13+8, r13
-       movq_cfi_restore R12+8, r12
-       movq_cfi_restore RBP+8, rbp
-       movq_cfi_restore RBX+8, rbx
-       ret $REST_SKIP          /* pop extended registers */
-       CFI_ENDPROC
-END(ptregscall_common)
-
 ENTRY(stub_execve)
        CFI_STARTPROC
        addq $8, %rsp
@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
        RESTORE_REST
        jmp int_ret_from_sys_call
        CFI_ENDPROC
-END(stub_execve)
+ENDPROC(stub_execve)
 
 ENTRY(stub_execveat)
        CFI_STARTPROC
@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
        RESTORE_REST
        jmp int_ret_from_sys_call
        CFI_ENDPROC
-END(stub_execveat)
+ENDPROC(stub_execveat)
 
 /*
  * sigreturn is special because it needs to restore all registers on return.
@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
        RESTORE_REST
        jmp int_ret_from_sys_call
        CFI_ENDPROC
-END(stub_rt_sigreturn)
+ENDPROC(stub_rt_sigreturn)
 
 #ifdef CONFIG_X86_X32_ABI
 ENTRY(stub_x32_rt_sigreturn)
@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
        RESTORE_REST
        jmp int_ret_from_sys_call
        CFI_ENDPROC
-END(stub_x32_rt_sigreturn)
+ENDPROC(stub_x32_rt_sigreturn)
 
 ENTRY(stub_x32_execve)
        CFI_STARTPROC
@@ -763,7 +1229,7 @@ vector=vector+1
 2:     jmp common_interrupt
 .endr
        CFI_ENDPROC
-END(irq_entries_start)
+ENDPROC(irq_entries_start)
 
 .previous
 END(interrupt)
@@ -780,8 +1246,8 @@ END(interrupt)
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
        /* reserve pt_regs for scratch regs and rbp */
-       subq $ORIG_RAX-RBP, %rsp
-       CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+       subq $ORIG_RAX, %rsp
+       CFI_ADJUST_CFA_OFFSET ORIG_RAX
        SAVE_ARGS_IRQ
        call \func
        .endm
@@ -804,14 +1270,14 @@ ret_from_intr:
 
        /* Restore saved previous stack */
        popq %rsi
-       CFI_DEF_CFA rsi,SS+8-RBP        /* reg/off reset after def_cfa_expr */
-       leaq ARGOFFSET-RBP(%rsi), %rsp
+       CFI_DEF_CFA rsi,SS+8    /* reg/off reset after def_cfa_expr */
+       movq %rsi, %rsp
        CFI_DEF_CFA_REGISTER    rsp
-       CFI_ADJUST_CFA_OFFSET   RBP-ARGOFFSET
+       CFI_ADJUST_CFA_OFFSET   -ARGOFFSET
 
 exit_intr:
        GET_THREAD_INFO(%rcx)
-       testl $3,CS-ARGOFFSET(%rsp)
+       testb $3,CS-ARGOFFSET(%rsp)
        je retint_kernel
 
        /* Interrupt came from user space */
@@ -833,12 +1299,35 @@ retint_swapgs:           /* return to user-space */
         * The iretq could re-enable interrupts:
         */
        DISABLE_INTERRUPTS(CLBR_ANY)
+       pax_exit_kernel_user
+retint_swapgs_pax:
        TRACE_IRQS_IRETQ
        SWAPGS
        jmp restore_args
 
 retint_restore_args:   /* return to kernel space */
        DISABLE_INTERRUPTS(CLBR_ANY)
+       pax_exit_kernel
+
+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
+       /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
+        * namely calling EFI runtime services with a phys mapping. We're
+        * starting off with NOPs and patch in the real instrumentation
+        * (BTS/OR) before starting any userland process; even before starting
+        * up the APs.
+        */
+       .pushsection .altinstr_replacement, "a"
+       601: pax_force_retaddr (RIP-ARGOFFSET)
+       602:
+       .popsection
+       603: .fill 602b-601b, 1, 0x90
+       .pushsection .altinstructions, "a"
+       altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
+       .popsection
+#else
+       pax_force_retaddr (RIP-ARGOFFSET)
+#endif
+
        /*
         * The iretq could re-enable interrupts:
         */
@@ -876,15 +1365,15 @@ native_irq_return_ldt:
        SWAPGS
        movq PER_CPU_VAR(espfix_waddr),%rdi
        movq %rax,(0*8)(%rdi)   /* RAX */
-       movq (2*8)(%rsp),%rax   /* RIP */
+       movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
        movq %rax,(1*8)(%rdi)
-       movq (3*8)(%rsp),%rax   /* CS */
+       movq (2*8 + CS-RIP)(%rsp),%rax  /* CS */
        movq %rax,(2*8)(%rdi)
-       movq (4*8)(%rsp),%rax   /* RFLAGS */
+       movq (2*8 + EFLAGS-RIP)(%rsp),%rax      /* RFLAGS */
        movq %rax,(3*8)(%rdi)
-       movq (6*8)(%rsp),%rax   /* SS */
+       movq (2*8 + SS-RIP)(%rsp),%rax  /* SS */
        movq %rax,(5*8)(%rdi)
-       movq (5*8)(%rsp),%rax   /* RSP */
+       movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
        movq %rax,(4*8)(%rdi)
        andl $0xffff0000,%eax
        popq_cfi %rdi
@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
        jmp exit_intr
 #endif
        CFI_ENDPROC
-END(common_interrupt)
+ENDPROC(common_interrupt)
 
 /*
  * APIC interrupts.
@@ -952,7 +1441,7 @@ ENTRY(\sym)
        interrupt \do_sym
        jmp ret_from_intr
        CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 #ifdef CONFIG_TRACING
@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
 /*
  * Exception entry points.
  */
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -1076,6 +1565,12 @@ ENTRY(\sym)
        .endif
 
        .if \shift_ist != -1
+#ifdef CONFIG_SMP
+       imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
+       lea init_tss(%r13), %r13
+#else
+       lea init_tss(%rip), %r13
+#endif
        subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
        .endif
 
@@ -1092,7 +1587,7 @@ ENTRY(\sym)
        .endif
 
        CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 #ifdef CONFIG_TRACING
@@ -1133,9 +1628,10 @@ gs_change:
 2:     mfence          /* workaround */
        SWAPGS
        popfq_cfi
+       pax_force_retaddr
        ret
        CFI_ENDPROC
-END(native_load_gs_index)
+ENDPROC(native_load_gs_index)
 
        _ASM_EXTABLE(gs_change,bad_gs)
        .section .fixup,"ax"
@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
        decl PER_CPU_VAR(irq_count)
+       pax_force_retaddr
        ret
        CFI_ENDPROC
-END(do_softirq_own_stack)
+ENDPROC(do_softirq_own_stack)
 
 #ifdef CONFIG_XEN
 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        decl PER_CPU_VAR(irq_count)
        jmp  error_exit
        CFI_ENDPROC
-END(xen_do_hypervisor_callback)
+ENDPROC(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
        SAVE_ALL
        jmp error_exit
        CFI_ENDPROC
-END(xen_failsafe_callback)
+ENDPROC(xen_failsafe_callback)
 
 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
        xen_hvm_callback_vector xen_evtchn_do_upcall
@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
        DEFAULT_FRAME
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF_DEBUG
-       testl %ebx,%ebx                         /* swapgs needed? */
+       testl $1,%ebx                           /* swapgs needed? */
        jnz paranoid_restore
-       testl $3,CS(%rsp)
+       testb $3,CS(%rsp)
        jnz   paranoid_userspace
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pax_exit_kernel
+       TRACE_IRQS_IRETQ 0
+       SWAPGS_UNSAFE_STACK
+       RESTORE_ALL 8
+       pax_force_retaddr_bts
+       jmp irq_return
+#endif
 paranoid_swapgs:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pax_exit_kernel_user
+#else
+       pax_exit_kernel
+#endif
        TRACE_IRQS_IRETQ 0
        SWAPGS_UNSAFE_STACK
        RESTORE_ALL 8
        jmp irq_return
 paranoid_restore:
+       pax_exit_kernel
        TRACE_IRQS_IRETQ_DEBUG 0
        RESTORE_ALL 8
+       pax_force_retaddr_bts
        jmp irq_return
 paranoid_userspace:
        GET_THREAD_INFO(%rcx)
@@ -1349,7 +1861,7 @@ paranoid_schedule:
        TRACE_IRQS_OFF
        jmp paranoid_userspace
        CFI_ENDPROC
-END(paranoid_exit)
+ENDPROC(paranoid_exit)
 
 /*
  * Exception entry point. This expects an error code/orig_rax on the stack.
@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
        movq %r14, R14+8(%rsp)
        movq %r15, R15+8(%rsp)
        xorl %ebx,%ebx
-       testl $3,CS+8(%rsp)
+       testb $3,CS+8(%rsp)
        je error_kernelspace
 error_swapgs:
        SWAPGS
 error_sti:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       testb $3, CS+8(%rsp)
+       jnz 1f
+       pax_enter_kernel
+       jmp 2f
+1:     pax_enter_kernel_user
+2:
+#else
+       pax_enter_kernel
+#endif
        TRACE_IRQS_OFF
+       pax_force_retaddr
        ret
 
 /*
@@ -1416,7 +1939,7 @@ error_bad_iret:
        decl %ebx       /* Return to usergs */
        jmp error_sti
        CFI_ENDPROC
-END(error_entry)
+ENDPROC(error_entry)
 
 
 /* ebx:        no swapgs flag (1: don't need swapgs, 0: need it) */
@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        GET_THREAD_INFO(%rcx)
-       testl %eax,%eax
+       testl $1,%eax
        jne retint_kernel
        LOCKDEP_SYS_EXIT_IRQ
        movl TI_flags(%rcx),%edx
@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
        jnz retint_careful
        jmp retint_swapgs
        CFI_ENDPROC
-END(error_exit)
+ENDPROC(error_exit)
 
 /*
  * Test if a given stack is an NMI stack or not.
@@ -1494,9 +2017,11 @@ ENTRY(nmi)
         * If %cs was not the kernel segment, then the NMI triggered in user
         * space, which means it is definitely not nested.
         */
+       cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
+       je 1f
        cmpl $__KERNEL_CS, 16(%rsp)
        jne first_nmi
-
+1:
        /*
         * Check the special variable on the stack to see if NMIs are
         * executing.
@@ -1530,8 +2055,7 @@ nested_nmi:
 
 1:
        /* Set up the interrupted NMIs stack to jump to repeat_nmi */
-       leaq -1*8(%rsp), %rdx
-       movq %rdx, %rsp
+       subq $8, %rsp
        CFI_ADJUST_CFA_OFFSET 1*8
        leaq -10*8(%rsp), %rdx
        pushq_cfi $__KERNEL_DS
@@ -1549,6 +2073,7 @@ nested_nmi_out:
        CFI_RESTORE rdx
 
        /* No need to check faults here */
+#      pax_force_retaddr_bts
        INTERRUPT_RETURN
 
        CFI_RESTORE_STATE
@@ -1645,13 +2170,13 @@ end_repeat_nmi:
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
        /*
-        * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
+        * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
         * as we should not be calling schedule in NMI context.
         * Even with normal interrupts enabled. An NMI should not be
         * setting NEED_RESCHED or anything that normal interrupts and
         * exceptions might do.
         */
-       call save_paranoid
+       call save_paranoid_nmi
        DEFAULT_FRAME 0
 
        /*
@@ -1661,9 +2186,9 @@ end_repeat_nmi:
         * NMI itself takes a page fault, the page fault that was preempted
         * will read the information from the NMI page fault and not the
         * origin fault. Save it off and restore it if it changes.
-        * Use the r12 callee-saved register.
+        * Use the r13 callee-saved register.
         */
-       movq %cr2, %r12
+       movq %cr2, %r13
 
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        movq %rsp,%rdi
@@ -1672,29 +2197,34 @@ end_repeat_nmi:
 
        /* Did the NMI take a page fault? Restore cr2 if it did */
        movq %cr2, %rcx
-       cmpq %rcx, %r12
+       cmpq %rcx, %r13
        je 1f
-       movq %r12, %cr2
+       movq %r13, %cr2
 1:
        
-       testl %ebx,%ebx                         /* swapgs needed? */
+       testl $1,%ebx                           /* swapgs needed? */
        jnz nmi_restore
 nmi_swapgs:
        SWAPGS_UNSAFE_STACK
 nmi_restore:
+       pax_exit_kernel_nmi
        /* Pop the extra iret frame at once */
        RESTORE_ALL 6*8
+       testb $3, 8(%rsp)
+       jnz 1f
+       pax_force_retaddr_bts
+1:
 
        /* Clear the NMI executing stack variable */
        movq $0, 5*8(%rsp)
        jmp irq_return
        CFI_ENDPROC
-END(nmi)
+ENDPROC(nmi)
 
 ENTRY(ignore_sysret)
        CFI_STARTPROC
        mov $-ENOSYS,%eax
        sysret
        CFI_ENDPROC
-END(ignore_sysret)
+ENDPROC(ignore_sysret)
 
index f5d0730e7b084beddefb0f4b8ff6755b9166535a..5bce89c77013b5d687fc2752de75560abedda797 100644 (file)
@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
 #define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
 static void *espfix_pages[ESPFIX_MAX_PAGES];
 
-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
-       __aligned(PAGE_SIZE);
+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
 
 static unsigned int page_random, slot_random;
 
@@ -122,11 +121,17 @@ static void init_espfix_random(void)
 void __init init_espfix_bsp(void)
 {
        pgd_t *pgd_p;
+       unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
 
        /* Install the espfix pud into the kernel page directory */
-       pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+       pgd_p = &init_level4_pgt[index];
        pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
+       clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
+#endif
+
        /* Randomize the locations */
        init_espfix_random();
 
@@ -194,7 +199,7 @@ void init_espfix_ap(void)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
        /* Job is done for this CPU and any CPU which shares this page */
-       ACCESS_ONCE(espfix_pages[page]) = stack_page;
+       ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
 
 unlock_done:
        mutex_unlock(&espfix_init_mutex);
index 8b7b0a51e742cd26defe12b535f37a865ee3c172..2395f29856082baf85b3a97e44057689e787829f 100644 (file)
@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
         * kernel identity mapping to modify code.
         */
        if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-               ip = (unsigned long)__va(__pa_symbol(ip));
+               ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
 
        return ip;
 }
@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
 {
        unsigned char replaced[MCOUNT_INSN_SIZE];
 
+       ip = ktla_ktva(ip);
+
        /*
         * Note: Due to modules and __init, code can
         *  disappear and change, we need to protect against faulting
@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
        unsigned char old[MCOUNT_INSN_SIZE];
        int ret;
 
-       memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+       memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
 
        ftrace_update_func = ip;
        /* Make sure the breakpoints see the ftrace_update_func update */
@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
        unsigned char replaced[MCOUNT_INSN_SIZE];
        unsigned char brk = BREAKPOINT_INSTRUCTION;
 
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        /* Make sure it is what we expect it to be */
index eda1a865641e2e86f01961265619ab0484b96631..8f6df48ebd58bee755ff09be6affd421961f3d9c 100644 (file)
@@ -67,12 +67,12 @@ again:
        pgd = *pgd_p;
 
        /*
-        * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
-        * critical -- __PAGE_OFFSET would point us back into the dynamic
+        * The use of __early_va rather than __va here is critical:
+        * __va would point us back into the dynamic
         * range and we might end up looping forever...
         */
        if (pgd)
-               pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+               pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
        else {
                if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
                        reset_early_page_tables();
@@ -82,13 +82,13 @@ again:
                pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
                for (i = 0; i < PTRS_PER_PUD; i++)
                        pud_p[i] = 0;
-               *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+               *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
        }
        pud_p += pud_index(address);
        pud = *pud_p;
 
        if (pud)
-               pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+               pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
        else {
                if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
                        reset_early_page_tables();
@@ -98,7 +98,7 @@ again:
                pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
                for (i = 0; i < PTRS_PER_PMD; i++)
                        pmd_p[i] = 0;
-               *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+               *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
        }
        pmd = (physaddr & PMD_MASK) + early_pmd_flags;
        pmd_p[pmd_index(address)] = pmd;
@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
                early_printk("Kernel alive\n");
 
-       clear_page(init_level4_pgt);
        /* set init_level4_pgt kernel high mapping*/
        init_level4_pgt[511] = early_level4_pgt[511];
 
index f36bd42d6f0c8b5fc5cd75dcf133b35a1f6bfe3b..0ab447408610d36cb26d0976e9689e7cd991854d 100644 (file)
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
 
+#ifdef CONFIG_PAX_KERNEXEC
+#define ta(X) (X)
+#else
+#define ta(X) ((X) - __PAGE_OFFSET)
+#endif
+
 /*
  * References to members of the new_cpu_data structure.
  */
  * and small than max_low_pfn, otherwise will waste some page table entries
  */
 
-#if PTRS_PER_PMD > 1
-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
-#else
-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
-#endif
+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
 
 /* Number of possible pages in the lowmem region */
 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
@@ -77,6 +79,12 @@ KERNEL_PAGES = LOWMEM_PAGES
 INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
 
+/*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
+
 /*
  * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
  * %esi points to the real-mode code as a 32-bit pointer.
@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
+
+#ifdef CONFIG_PAX_KERNEXEC
+       jmp startup_32
+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
+.fill PAGE_SIZE-5,1,0xcc
+#endif
+
 ENTRY(startup_32)
        movl pa(stack_start),%ecx
        
@@ -106,6 +121,59 @@ ENTRY(startup_32)
 2:
        leal -__PAGE_OFFSET(%ecx),%esp
 
+#ifdef CONFIG_SMP
+       movl $pa(cpu_gdt_table),%edi
+       movl $__per_cpu_load,%eax
+       movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
+       rorl $16,%eax
+       movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
+       movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
+       movl $__per_cpu_end - 1,%eax
+       subl $__per_cpu_start,%eax
+       movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       movl $NR_CPUS,%ecx
+       movl $pa(cpu_gdt_table),%edi
+1:
+       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
+       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
+       movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
+       addl $PAGE_SIZE_asm,%edi
+       loop 1b
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+       movl $pa(boot_gdt),%edi
+       movl $__LOAD_PHYSICAL_ADDR,%eax
+       movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
+       rorl $16,%eax
+       movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
+       movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
+       rorl $16,%eax
+
+       ljmp $(__BOOT_CS),$1f
+1:
+
+       movl $NR_CPUS,%ecx
+       movl $pa(cpu_gdt_table),%edi
+       addl $__PAGE_OFFSET,%eax
+1:
+       movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
+       movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
+       movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
+       movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
+       rorl $16,%eax
+       movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
+       movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
+       movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
+       movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
+       rorl $16,%eax
+       addl $PAGE_SIZE_asm,%edi
+       loop 1b
+#endif
+
 /*
  * Clear BSS first so that there are no surprises...
  */
@@ -201,8 +269,11 @@ ENTRY(startup_32)
        movl %eax, pa(max_pfn_mapped)
 
        /* Do early initialization of the fixmap area */
-       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-       movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#ifdef CONFIG_COMPAT_VDSO
+       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#else
+       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#endif
 #else  /* Not PAE */
 
 page_pde_offset = (__PAGE_OFFSET >> 20);
@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
        movl %eax, pa(max_pfn_mapped)
 
        /* Do early initialization of the fixmap area */
-       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-       movl %eax,pa(initial_page_table+0xffc)
+#ifdef CONFIG_COMPAT_VDSO
+       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
+#else
+       movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
+#endif
 #endif
 
 #ifdef CONFIG_PARAVIRT
@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
        cmpl $num_subarch_entries, %eax
        jae bad_subarch
 
-       movl pa(subarch_entries)(,%eax,4), %eax
-       subl $__PAGE_OFFSET, %eax
-       jmp *%eax
+       jmp *pa(subarch_entries)(,%eax,4)
 
 bad_subarch:
 WEAK(lguest_entry)
@@ -261,10 +333,10 @@ WEAK(xen_entry)
        __INITDATA
 
 subarch_entries:
-       .long default_entry             /* normal x86/PC */
-       .long lguest_entry              /* lguest hypervisor */
-       .long xen_entry                 /* Xen hypervisor */
-       .long default_entry             /* Moorestown MID */
+       .long ta(default_entry)         /* normal x86/PC */
+       .long ta(lguest_entry)          /* lguest hypervisor */
+       .long ta(xen_entry)             /* Xen hypervisor */
+       .long ta(default_entry)         /* Moorestown MID */
 num_subarch_entries = (. - subarch_entries) / 4
 .previous
 #else
@@ -354,6 +426,7 @@ default_entry:
        movl pa(mmu_cr4_features),%eax
        movl %eax,%cr4
 
+#ifdef CONFIG_X86_PAE
        testb $X86_CR4_PAE, %al         # check if PAE is enabled
        jz enable_paging
 
@@ -382,6 +455,9 @@ default_entry:
        /* Make changes effective */
        wrmsr
 
+       btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
+#endif
+
 enable_paging:
 
 /*
@@ -449,14 +525,20 @@ is486:
 1:     movl $(__KERNEL_DS),%eax        # reload all the segment registers
        movl %eax,%ss                   # after changing gdt.
 
-       movl $(__USER_DS),%eax          # DS/ES contains default USER segment
+#      movl $(__KERNEL_DS),%eax        # DS/ES contains default KERNEL segment
        movl %eax,%ds
        movl %eax,%es
 
        movl $(__KERNEL_PERCPU), %eax
        movl %eax,%fs                   # set this cpu's percpu
 
+#ifdef CONFIG_CC_STACKPROTECTOR
        movl $(__KERNEL_STACK_CANARY),%eax
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
+       movl $(__USER_DS),%eax
+#else
+       xorl %eax,%eax
+#endif
        movl %eax,%gs
 
        xorl %eax,%eax                  # Clear LDT
@@ -512,8 +594,11 @@ setup_once:
         * relocation.  Manually set base address in stack canary
         * segment descriptor.
         */
-       movl $gdt_page,%eax
+       movl $cpu_gdt_table,%eax
        movl $stack_canary,%ecx
+#ifdef CONFIG_SMP
+       addl $__per_cpu_load,%ecx
+#endif
        movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
        shrl $16, %ecx
        movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
        cmpl $2,(%esp)          # X86_TRAP_NMI
        je is_nmi               # Ignore NMI
 
-       cmpl $2,%ss:early_recursion_flag
+       cmpl $1,%ss:early_recursion_flag
        je hlt_loop
        incl %ss:early_recursion_flag
 
@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
        pushl (20+6*4)(%esp)    /* trapno */
        pushl $fault_msg
        call printk
-#endif
        call dump_stack
+#endif
 hlt_loop:
        hlt
        jmp hlt_loop
@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
 /* This is the default interrupt "handler" :-) */
        ALIGN
 ignore_int:
-       cld
 #ifdef CONFIG_PRINTK
+       cmpl $2,%ss:early_recursion_flag
+       je hlt_loop
+       incl %ss:early_recursion_flag
+       cld
        pushl %eax
        pushl %ecx
        pushl %edx
@@ -617,9 +705,6 @@ ignore_int:
        movl $(__KERNEL_DS),%eax
        movl %eax,%ds
        movl %eax,%es
-       cmpl $2,early_recursion_flag
-       je hlt_loop
-       incl early_recursion_flag
        pushl 16(%esp)
        pushl 24(%esp)
        pushl 32(%esp)
@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
 /*
  * BSS section
  */
-__PAGE_ALIGNED_BSS
-       .align PAGE_SIZE
 #ifdef CONFIG_X86_PAE
+.section .initial_pg_pmd,"a",@progbits
 initial_pg_pmd:
        .fill 1024*KPMDS,4,0
 #else
+.section .initial_page_table,"a",@progbits
 ENTRY(initial_page_table)
        .fill 1024,4,0
 #endif
+.section .initial_pg_fixmap,"a",@progbits
 initial_pg_fixmap:
        .fill 1024,4,0
+.section .empty_zero_page,"a",@progbits
 ENTRY(empty_zero_page)
        .fill 4096,1,0
+.section .swapper_pg_dir,"a",@progbits
 ENTRY(swapper_pg_dir)
+#ifdef CONFIG_X86_PAE
+       .fill 4,8,0
+#else
        .fill 1024,4,0
+#endif
 
 /*
  * This starts the data section.
  */
 #ifdef CONFIG_X86_PAE
-__PAGE_ALIGNED_DATA
-       /* Page-aligned for the benefit of paravirt? */
-       .align PAGE_SIZE
+.section .initial_page_table,"a",@progbits
 ENTRY(initial_page_table)
        .long   pa(initial_pg_pmd+PGD_IDENT_ATTR),0     /* low identity map */
 # if KPMDS == 3
@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
 #  error "Kernel PMDs should be 1, 2 or 3"
 # endif
        .align PAGE_SIZE                /* needs to be page-sized too */
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+ENTRY(cpu_pgd)
+       .rept 2*NR_CPUS
+       .fill   4,8,0
+       .endr
+#endif
+
 #endif
 
 .data
 .balign 4
 ENTRY(stack_start)
-       .long init_thread_union+THREAD_SIZE
+       .long init_thread_union+THREAD_SIZE-8
 
 __INITRODATA
 int_msg:
@@ -727,7 +825,7 @@ fault_msg:
  * segment size, and 32-bit linear address value:
  */
 
-       .data
+.section .rodata,"a",@progbits
 .globl boot_gdt_descr
 .globl idt_descr
 
@@ -736,7 +834,7 @@ fault_msg:
        .word 0                         # 32 bit align gdt_desc.address
 boot_gdt_descr:
        .word __BOOT_DS+7
-       .long boot_gdt - __PAGE_OFFSET
+       .long pa(boot_gdt)
 
        .word 0                         # 32-bit align idt_desc.address
 idt_descr:
@@ -747,7 +845,7 @@ idt_descr:
        .word 0                         # 32 bit align gdt_desc.address
 ENTRY(early_gdt_descr)
        .word GDT_ENTRIES*8-1
-       .long gdt_page                  /* Overwritten for secondary CPUs */
+       .long cpu_gdt_table             /* Overwritten for secondary CPUs */
 
 /*
  * The boot_gdt must mirror the equivalent in setup.S and is
@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
        .align L1_CACHE_BYTES
 ENTRY(boot_gdt)
        .fill GDT_ENTRY_BOOT_CS,8,0
-       .quad 0x00cf9a000000ffff        /* kernel 4GB code at 0x00000000 */
-       .quad 0x00cf92000000ffff        /* kernel 4GB data at 0x00000000 */
+       .quad 0x00cf9b000000ffff        /* kernel 4GB code at 0x00000000 */
+       .quad 0x00cf93000000ffff        /* kernel 4GB data at 0x00000000 */
+
+       .align PAGE_SIZE_asm
+ENTRY(cpu_gdt_table)
+       .rept NR_CPUS
+       .quad 0x0000000000000000        /* NULL descriptor */
+       .quad 0x0000000000000000        /* 0x0b reserved */
+       .quad 0x0000000000000000        /* 0x13 reserved */
+       .quad 0x0000000000000000        /* 0x1b reserved */
+
+#ifdef CONFIG_PAX_KERNEXEC
+       .quad 0x00cf9b000000ffff        /* 0x20 alternate kernel 4GB code at 0x00000000 */
+#else
+       .quad 0x0000000000000000        /* 0x20 unused */
+#endif
+
+       .quad 0x0000000000000000        /* 0x28 unused */
+       .quad 0x0000000000000000        /* 0x33 TLS entry 1 */
+       .quad 0x0000000000000000        /* 0x3b TLS entry 2 */
+       .quad 0x0000000000000000        /* 0x43 TLS entry 3 */
+       .quad 0x0000000000000000        /* 0x4b reserved */
+       .quad 0x0000000000000000        /* 0x53 reserved */
+       .quad 0x0000000000000000        /* 0x5b reserved */
+
+       .quad 0x00cf9b000000ffff        /* 0x60 kernel 4GB code at 0x00000000 */
+       .quad 0x00cf93000000ffff        /* 0x68 kernel 4GB data at 0x00000000 */
+       .quad 0x00cffb000000ffff        /* 0x73 user 4GB code at 0x00000000 */
+       .quad 0x00cff3000000ffff        /* 0x7b user 4GB data at 0x00000000 */
+
+       .quad 0x0000000000000000        /* 0x80 TSS descriptor */
+       .quad 0x0000000000000000        /* 0x88 LDT descriptor */
+
+       /*
+        * Segments used for calling PnP BIOS have byte granularity.
+        * The code segments and data segments have fixed 64k limits,
+        * the transfer segment sizes are set at run time.
+        */
+       .quad 0x00409b000000ffff        /* 0x90 32-bit code */
+       .quad 0x00009b000000ffff        /* 0x98 16-bit code */
+       .quad 0x000093000000ffff        /* 0xa0 16-bit data */
+       .quad 0x0000930000000000        /* 0xa8 16-bit data */
+       .quad 0x0000930000000000        /* 0xb0 16-bit data */
+
+       /*
+        * The APM segments have byte granularity and their bases
+        * are set at run time.  All have 64k limits.
+        */
+       .quad 0x00409b000000ffff        /* 0xb8 APM CS    code */
+       .quad 0x00009b000000ffff        /* 0xc0 APM CS 16 code (16 bit) */
+       .quad 0x004093000000ffff        /* 0xc8 APM DS    data */
+
+       .quad 0x00c093000000ffff        /* 0xd0 - ESPFIX SS */
+       .quad 0x0040930000000000        /* 0xd8 - PERCPU */
+       .quad 0x0040910000000017        /* 0xe0 - STACK_CANARY */
+       .quad 0x0000000000000000        /* 0xe8 - PCIBIOS_CS */
+       .quad 0x0000000000000000        /* 0xf0 - PCIBIOS_DS */
+       .quad 0x0000000000000000        /* 0xf8 - GDT entry 31: double-fault TSS */
+
+       /* Be sure this is zeroed to avoid false validations in Xen */
+       .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+       .endr
index a468c0a65c42e00df4e10afd9921d81a53dbba3d..8b5a8799133ea4b72318cf67a818ccc18c26623e 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/processor-flags.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
+L4_VMALLOC_START = pgd_index(VMALLOC_START)
+L3_VMALLOC_START = pud_index(VMALLOC_START)
+L4_VMALLOC_END = pgd_index(VMALLOC_END)
+L3_VMALLOC_END = pud_index(VMALLOC_END)
+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
 
        .text
        __HEAD
@@ -89,11 +97,24 @@ startup_64:
         * Fixup the physical addresses in the page table
         */
        addq    %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+       addq    %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
 
-       addq    %rbp, level3_kernel_pgt + (510*8)(%rip)
-       addq    %rbp, level3_kernel_pgt + (511*8)(%rip)
+       addq    %rbp, level3_ident_pgt + (0*8)(%rip)
+#ifndef CONFIG_XEN
+       addq    %rbp, level3_ident_pgt + (1*8)(%rip)
+#endif
+
+       addq    %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+       addq    %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
+       addq    %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
 
        addq    %rbp, level2_fixmap_pgt + (506*8)(%rip)
+       addq    %rbp, level2_fixmap_pgt + (507*8)(%rip)
 
        /*
         * Set up the identity mapping for the switchover.  These
@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
         * after the boot processor executes this code.
         */
 
+       orq     $-1, %rbp
        movq    $(init_level4_pgt - __START_KERNEL_map), %rax
 1:
 
-       /* Enable PAE mode and PGE */
-       movl    $(X86_CR4_PAE | X86_CR4_PGE), %ecx
+       /* Enable PAE mode and PSE/PGE */
+       movl    $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
        movq    %rcx, %cr4
 
        /* Setup early boot stage 4 level pagetables. */
@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
        movl    $MSR_EFER, %ecx
        rdmsr
        btsl    $_EFER_SCE, %eax        /* Enable System Call */
-       btl     $20,%edi                /* No Execute supported? */
+       btl     $(X86_FEATURE_NX & 31),%edi     /* No Execute supported? */
        jnc     1f
        btsl    $_EFER_NX, %eax
+       cmpq    $-1, %rbp
+       je      1f
        btsq    $_PAGE_BIT_NX,early_pmd_flags(%rip)
+       btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
+       btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
+       btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
+       btsq    $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
+       btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
+       btsq    $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
+       btsq    $_PAGE_BIT_NX, __supported_pte_mask(%rip)
 1:     wrmsr                           /* Make changes effective */
 
        /* Setup cr0 */
@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
         *      REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
         *              address given in m16:64.
         */
+       pax_set_fptr_mask
        movq    initial_code(%rip),%rax
        pushq   $0              # fake return address to stop unwinder
        pushq   $__KERNEL_CS    # set correct cs
@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
        .quad   INIT_PER_CPU_VAR(irq_stack_union)
 
        GLOBAL(stack_start)
-       .quad  init_thread_union+THREAD_SIZE-8
+       .quad  init_thread_union+THREAD_SIZE-16
        .word  0
        __FINITDATA
 
@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
        call dump_stack
 #ifdef CONFIG_KALLSYMS 
        leaq early_idt_ripmsg(%rip),%rdi
-       movq 40(%rsp),%rsi      # %rip again
+       movq 88(%rsp),%rsi      # %rip again
        call __print_symbol
 #endif
 #endif /* EARLY_PRINTK */
@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
 early_recursion_flag:
        .long 0
 
+       .section .rodata,"a",@progbits
 #ifdef CONFIG_EARLY_PRINTK
 early_idt_msg:
        .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
 NEXT_PAGE(early_dynamic_pgts)
        .fill   512*EARLY_DYNAMIC_PAGE_TABLES,8,0
 
-       .data
+       .section .rodata,"a",@progbits
 
-#ifndef CONFIG_XEN
 NEXT_PAGE(init_level4_pgt)
-       .fill   512,8,0
-#else
-NEXT_PAGE(init_level4_pgt)
-       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
        .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
        .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_VMALLOC_START*8, 0
+       .quad   level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_VMALLOC_END*8, 0
+       .quad   level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .org    init_level4_pgt + L4_VMEMMAP_START*8, 0
+       .quad   level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
        .org    init_level4_pgt + L4_START_KERNEL*8, 0
        /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
        .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+NEXT_PAGE(cpu_pgd)
+       .rept 2*NR_CPUS
+       .fill   512,8,0
+       .endr
+#endif
+
 NEXT_PAGE(level3_ident_pgt)
        .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+#ifdef CONFIG_XEN
        .fill   511, 8, 0
+#else
+       .quad   level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
+       .fill   510,8,0
+#endif
+
+NEXT_PAGE(level3_vmalloc_start_pgt)
+       .fill   512,8,0
+
+NEXT_PAGE(level3_vmalloc_end_pgt)
+       .fill   512,8,0
+
+NEXT_PAGE(level3_vmemmap_pgt)
+       .fill   L3_VMEMMAP_START,8,0
+       .quad   level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
 NEXT_PAGE(level2_ident_pgt)
-       /* Since I easily can, map the first 1G.
+       /* Since I easily can, map the first 2G.
         * Don't set NX because code runs from these pages.
         */
-       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
-#endif
+       PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
 
 NEXT_PAGE(level3_kernel_pgt)
        .fill   L3_START_KERNEL,8,0
@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
        .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
        .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
 
+NEXT_PAGE(level2_vmemmap_pgt)
+       .fill   512,8,0
+
 NEXT_PAGE(level2_kernel_pgt)
        /*
         * 512 MB kernel mapping. We spend a full page on this pagetable
@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
 NEXT_PAGE(level2_fixmap_pgt)
        .fill   506,8,0
        .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-       /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-       .fill   5,8,0
+       .quad   level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
+       /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
+       .fill   4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
        .fill   512,8,0
 
+NEXT_PAGE(level1_vsyscall_pgt)
+       .fill   512,8,0
+
 #undef PMDS
 
-       .data
+       .align PAGE_SIZE
+ENTRY(cpu_gdt_table)
+       .rept NR_CPUS
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00cf9b000000ffff      /* __KERNEL32_CS */
+       .quad   0x00af9b000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
+       .quad   0x00cffb000000ffff      /* __USER32_CS */
+       .quad   0x00cff3000000ffff      /* __USER_DS, __USER32_DS  */
+       .quad   0x00affb000000ffff      /* __USER_CS */
+
+#ifdef CONFIG_PAX_KERNEXEC
+       .quad   0x00af9b000000ffff      /* __KERNEXEC_KERNEL_CS */
+#else
+       .quad   0x0                     /* unused */
+#endif
+
+       .quad   0,0                     /* TSS */
+       .quad   0,0                     /* LDT */
+       .quad   0,0,0                   /* three TLS descriptors */
+       .quad   0x0000f40000000000      /* node/CPU stored in limit */
+       /* asm/segment.h:GDT_ENTRIES must match this */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       .quad   0x00cf93000000ffff      /* __UDEREF_KERNEL_DS */
+#else
+       .quad   0x0                     /* unused */
+#endif
+
+       /* zero the remaining page */
+       .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
+       .endr
+
        .align 16
        .globl early_gdt_descr
 early_gdt_descr:
        .word   GDT_ENTRIES*8-1
 early_gdt_descr_base:
-       .quad   INIT_PER_CPU_VAR(gdt_page)
+       .quad   cpu_gdt_table
 
 ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
        .quad   0x0000000000000000
 
 #include "../../x86/xen/xen-head.S"
-       
-       __PAGE_ALIGNED_BSS
+
+       .section .rodata,"a",@progbits
 NEXT_PAGE(empty_zero_page)
        .skip PAGE_SIZE
index 05fd74f537d62122ade73f53dad17c97346c7a80..c3548b1aeb690dedb75ad92051a73ea485ebfb27 100644 (file)
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
 EXPORT_SYMBOL(cmpxchg8b_emu);
 #endif
 
+EXPORT_SYMBOL_GPL(cpu_gdt_table);
+
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
 EXPORT_SYMBOL(___preempt_schedule_context);
 #endif
 #endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+EXPORT_SYMBOL(cpu_pgd);
+#endif
index a9a4229f6161b25b6e8a5752be7943f478525d21..6f4d4763fa5efce5542c6c1ad56020c9b2e5228c 100644 (file)
@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
 static inline bool interrupted_user_mode(void)
 {
        struct pt_regs *regs = get_irq_regs();
-       return regs && user_mode_vm(regs);
+       return regs && user_mode(regs);
 }
 
 /*
index e7cc5370cd2fcade87dc1cecae2ab85184f62d27..67d7372ce0b9c4b45193995424554d6cb9ebcff2 100644 (file)
@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
 static void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
-       io_apic_irqs &= ~(1<<irq);
+       io_apic_irqs &= ~(1UL<<irq);
        irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
        enable_irq(irq);
 }
@@ -208,7 +208,7 @@ spurious_8259A_irq:
                               "spurious 8259A interrupt: IRQ%d.\n", irq);
                        spurious_irq_mask |= irqmask;
                }
-               atomic_inc(&irq_err_count);
+               atomic_inc_unchecked(&irq_err_count);
                /*
                 * Theoretically we do not have to handle this IRQ,
                 * but in Linux this does not cause problems and is
@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
        /* (slave's support for AEOI in flat mode is to be investigated) */
        outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
 
+       pax_open_kernel();
        if (auto_eoi)
                /*
                 * In AEOI mode we just have to mask the interrupt
                 * when acking.
                 */
-               i8259A_chip.irq_mask_ack = disable_8259A_irq;
+               *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
        else
-               i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+               *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+       pax_close_kernel();
 
        udelay(100);            /* wait for 8259A to initialize */
 
index a979b5bd2fc06ee5d07e2eb6a292caa773113f1a..1d6db75432aed508faa1a11ac5880753ae33f944 100644 (file)
@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
  * Quirk table for systems that misbehave (lock up, etc.) if port
  * 0x80 is used:
  */
-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
        {
                .callback       = dmi_io_delay_0xed_port,
                .ident          = "Compaq Presario V6000",
index 4ddaf66ea35f696eac6afce6bb43f01d2d84f1aa..49d5c1866b53bd8642c0750432056937128d257b 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
                return -EINVAL;
        if (turn_on && !capable(CAP_SYS_RAWIO))
                return -EPERM;
+#ifdef CONFIG_GRKERNSEC_IO
+       if (turn_on && grsec_disable_privio) {
+               gr_handle_ioperm();
+               return -ENODEV;
+       }
+#endif
 
        /*
         * If it's the first ioperm() call in this thread's lifetime, set the
@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(init_tss, get_cpu());
+       tss = init_tss + get_cpu();
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
        if (level > old) {
                if (!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+#ifdef CONFIG_GRKERNSEC_IO
+               if (grsec_disable_privio) {
+                       gr_handle_iopl();
+                       return -ENODEV;
+               }
+#endif
        }
        regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
        t->iopl = level << 12;
index 705ef8d48e2dc464936672fb54eea908f8f03b4e..8672c9d53b0d8567f8d4f4dc9127c4e1cd2c9907 100644 (file)
@@ -22,7 +22,7 @@
 #define CREATE_TRACE_POINTS
 #include <asm/trace/irq_vectors.h>
 
-atomic_t irq_err_count;
+atomic_unchecked_t irq_err_count;
 
 /* Function pointer for generic interrupt vector handling */
 void (*x86_platform_ipi_callback)(void) = NULL;
@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
                seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
        seq_puts(p, "  Hypervisor callback interrupts\n");
 #endif
-       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
-       seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
+       seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
 #endif
        return 0;
 }
@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 
 u64 arch_irq_stat(void)
 {
-       u64 sum = atomic_read(&irq_err_count);
+       u64 sum = atomic_read_unchecked(&irq_err_count);
        return sum;
 }
 
index 63ce838e5a5423ad3f425368f1c5adffb8c8356e..2ea3e06fd9d0550b31fa52583f74c09300420e66 100644 (file)
@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
 
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 
+extern void gr_handle_kernel_exploit(void);
+
 int sysctl_panic_on_stackoverflow __read_mostly;
 
 /* Debugging check for stack overflow: is there less than 1KB free? */
@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
        __asm__ __volatile__("andl %%esp,%0" :
                             "=r" (sp) : "0" (THREAD_SIZE - 1));
 
-       return sp < (sizeof(struct thread_info) + STACK_WARN);
+       return sp < STACK_WARN;
 }
 
 static void print_stack_overflow(void)
 {
        printk(KERN_WARNING "low stack detected by irq handler\n");
        dump_stack();
+       gr_handle_kernel_exploit();
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
 }
@@ -84,10 +87,9 @@ static inline void *current_stack(void)
 static inline int
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
 {
-       struct irq_stack *curstk, *irqstk;
+       struct irq_stack *irqstk;
        u32 *isp, *prev_esp, arg1, arg2;
 
-       curstk = (struct irq_stack *) current_stack();
        irqstk = __this_cpu_read(hardirq_stack);
 
        /*
@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
         * handler) we can't do that and just have to keep using the
         * current stack (which is the irq stack already after all)
         */
-       if (unlikely(curstk == irqstk))
+       if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
                return 0;
 
-       isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
+       isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
 
        /* Save the next esp at the bottom of the stack */
        prev_esp = (u32 *)irqstk;
        *prev_esp = current_stack_pointer;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(MAKE_MM_SEG(0));
+#endif
+
        if (unlikely(overflow))
                call_on_stack(print_stack_overflow, isp);
 
@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
                     :  "0" (irq),   "1" (desc),  "2" (isp),
                        "D" (desc->handle_irq)
                     : "memory", "cc", "ecx");
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(current_thread_info()->addr_limit);
+#endif
+
        return 1;
 }
 
@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
  */
 void irq_ctx_init(int cpu)
 {
-       struct irq_stack *irqstk;
-
        if (per_cpu(hardirq_stack, cpu))
                return;
 
-       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
-                                              THREADINFO_GFP,
-                                              THREAD_SIZE_ORDER));
-       per_cpu(hardirq_stack, cpu) = irqstk;
-
-       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
-                                              THREADINFO_GFP,
-                                              THREAD_SIZE_ORDER));
-       per_cpu(softirq_stack, cpu) = irqstk;
-
-       printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
-              cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
+       per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
+       per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
 }
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curstk;
        struct irq_stack *irqstk;
        u32 *isp, *prev_esp;
 
-       curstk = current_stack();
        irqstk = __this_cpu_read(softirq_stack);
 
        /* build the stack frame on the softirq stack */
@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
        prev_esp = (u32 *)irqstk;
        *prev_esp = current_stack_pointer;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(MAKE_MM_SEG(0));
+#endif
+
        call_on_stack(__do_softirq, isp);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(current_thread_info()->addr_limit);
+#endif
+
 }
 
 bool handle_irq(unsigned irq, struct pt_regs *regs)
@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
        if (unlikely(!desc))
                return false;
 
-       if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+       if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
                if (unlikely(overflow))
                        print_stack_overflow();
                desc->handle_irq(irq, desc);
index e4b503d5558c5d435eddc3c3c5be8c55f1de558d..824fce8647d98764a99cd187b5c6ce096c610eeb 100644 (file)
@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
 EXPORT_PER_CPU_SYMBOL(irq_regs);
 
+extern void gr_handle_kernel_exploit(void);
+
 int sysctl_panic_on_stackoverflow;
 
 /*
@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        u64 estack_top, estack_bottom;
        u64 curbase = (u64)task_stack_page(current);
 
-       if (user_mode_vm(regs))
+       if (user_mode(regs))
                return;
 
        if (regs->sp >= curbase + sizeof(struct thread_info) +
@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
                irq_stack_top, irq_stack_bottom,
                estack_top, estack_bottom);
 
+       gr_handle_kernel_exploit();
+
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
 #endif
index 26d5a55a273610b8d49da975a006f156adb4e482..a01160a1332f70b90019038ddc95a4e8ada43eb6 100644 (file)
@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
                         * Jump label is enabled for the first time.
                         * So we expect a default_nop...
                         */
-                       if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+                       if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
                                     != 0))
                                bug_at((void *)entry->code, __LINE__);
                } else {
@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
                         * ...otherwise expect an ideal_nop. Otherwise
                         * something went horribly wrong.
                         */
-                       if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+                       if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
                                     != 0))
                                bug_at((void *)entry->code, __LINE__);
                }
@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
                 * are converting the default nop to the ideal nop.
                 */
                if (init) {
-                       if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
+                       if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
                                bug_at((void *)entry->code, __LINE__);
                } else {
                        code.jump = 0xe9;
                        code.offset = entry->target -
                                (entry->code + JUMP_LABEL_NOP_SIZE);
-                       if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
+                       if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
                                bug_at((void *)entry->code, __LINE__);
                }
                memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
index 7ec1d5f8d28339bce0b74191a1d458c6c8e5d5df..5a7d13016cc1cc3d6a6a0e2ae382d91d2912ff90 100644 (file)
@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
 #ifdef CONFIG_X86_32
        switch (regno) {
        case GDB_SS:
-               if (!user_mode_vm(regs))
+               if (!user_mode(regs))
                        *(unsigned long *)mem = __KERNEL_DS;
                break;
        case GDB_SP:
-               if (!user_mode_vm(regs))
+               if (!user_mode(regs))
                        *(unsigned long *)mem = kernel_stack_pointer(regs);
                break;
        case GDB_GS:
@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
                bp->attr.bp_addr = breakinfo[breakno].addr;
                bp->attr.bp_len = breakinfo[breakno].len;
                bp->attr.bp_type = breakinfo[breakno].type;
-               info->address = breakinfo[breakno].addr;
+               if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
+                       info->address = ktla_ktva(breakinfo[breakno].addr);
+               else
+                       info->address = breakinfo[breakno].addr;
                info->len = breakinfo[breakno].len;
                info->type = breakinfo[breakno].type;
                val = arch_install_hw_breakpoint(bp);
@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
        case 'k':
                /* clear the trace bit */
                linux_regs->flags &= ~X86_EFLAGS_TF;
-               atomic_set(&kgdb_cpu_doing_single_step, -1);
+               atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
 
                /* set the trace bit if we're stepping */
                if (remcomInBuffer[0] == 's') {
                        linux_regs->flags |= X86_EFLAGS_TF;
-                       atomic_set(&kgdb_cpu_doing_single_step,
+                       atomic_set_unchecked(&kgdb_cpu_doing_single_step,
                                   raw_smp_processor_id());
                }
 
@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
 
        switch (cmd) {
        case DIE_DEBUG:
-               if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+               if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
                        if (user_mode(regs))
                                return single_step_cont(regs, args);
                        break;
@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 #endif /* CONFIG_DEBUG_RODATA */
 
        bpt->type = BP_BREAKPOINT;
-       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+       err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
                                BREAK_INSTR_SIZE);
        if (err)
                return err;
-       err = probe_kernel_write((char *)bpt->bpt_addr,
+       err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
                                 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
 #ifdef CONFIG_DEBUG_RODATA
        if (!err)
@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
                return -EBUSY;
        text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
                  BREAK_INSTR_SIZE);
-       err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+       err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
        if (err)
                return err;
        if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
        if (mutex_is_locked(&text_mutex))
                goto knl_write;
        text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
-       err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+       err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
        if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
                goto knl_write;
        return err;
 knl_write:
 #endif /* CONFIG_DEBUG_RODATA */
-       return probe_kernel_write((char *)bpt->bpt_addr,
+       return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
                                  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
index 98f654d466e585167153e58811902675bfeb5baa..ac043529fb8abd166d8b15f44d21cf3731b1847d 100644 (file)
@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
                s32 raddr;
        } __packed *insn;
 
-       insn = (struct __arch_relative_insn *)from;
+       insn = (struct __arch_relative_insn *)ktla_ktva(from);
+
+       pax_open_kernel();
        insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
        insn->op = op;
+       pax_close_kernel();
 }
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
        kprobe_opcode_t opcode;
        kprobe_opcode_t *orig_opcodes = opcodes;
 
-       if (search_exception_tables((unsigned long)opcodes))
+       if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
                return 0;       /* Page fault may occur on this address. */
 
 retry:
@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
         *  for the first byte, we can recover the original instruction
         *  from it and kp->opcode.
         */
-       memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
        buf[0] = kp->opcode;
-       return (unsigned long)buf;
+       return ktva_ktla((unsigned long)buf);
 }
 
 /*
@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
        /* Another subsystem puts a breakpoint, failed to recover */
        if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
                return 0;
+       pax_open_kernel();
        memcpy(dest, insn.kaddr, insn.length);
+       pax_close_kernel();
 
 #ifdef CONFIG_X86_64
        if (insn_rip_relative(&insn)) {
@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
                        return 0;
                }
                disp = (u8 *) dest + insn_offset_displacement(&insn);
+               pax_open_kernel();
                *(s32 *) disp = (s32) newdisp;
+               pax_close_kernel();
        }
 #endif
        return insn.length;
@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
                 * nor set current_kprobe, because it doesn't use single
                 * stepping.
                 */
-               regs->ip = (unsigned long)p->ainsn.insn;
+               regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
                preempt_enable_no_resched();
                return;
        }
@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
        regs->flags &= ~X86_EFLAGS_IF;
        /* single step inline if the instruction is an int3 */
        if (p->opcode == BREAKPOINT_INSTRUCTION)
-               regs->ip = (unsigned long)p->addr;
+               regs->ip = ktla_ktva((unsigned long)p->addr);
        else
-               regs->ip = (unsigned long)p->ainsn.insn;
+               regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
 }
 NOKPROBE_SYMBOL(setup_singlestep);
 
@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
-       if (user_mode_vm(regs))
+       if (user_mode(regs))
                return 0;
 
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
                                setup_singlestep(p, regs, kcb, 0);
                        return 1;
                }
-       } else if (*addr != BREAKPOINT_INSTRUCTION) {
+       } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
                /*
                 * The breakpoint instruction was removed right
                 * after we hit it.  Another cpu has removed
@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
                        "       movq %rax, 152(%rsp)\n"
                        RESTORE_REGS_STRING
                        "       popfq\n"
+#ifdef KERNEXEC_PLUGIN
+                       "       btsq $63,(%rsp)\n"
+#endif
 #else
                        "       pushf\n"
                        SAVE_REGS_STRING
@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
                             struct kprobe_ctlblk *kcb)
 {
        unsigned long *tos = stack_addr(regs);
-       unsigned long copy_ip = (unsigned long)p->ainsn.insn;
+       unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
        unsigned long orig_ip = (unsigned long)p->addr;
        kprobe_opcode_t *insn = p->ainsn.insn;
 
@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
        struct die_args *args = data;
        int ret = NOTIFY_DONE;
 
-       if (args->regs && user_mode_vm(args->regs))
+       if (args->regs && user_mode(args->regs))
                return ret;
 
        if (val == DIE_GPF) {
index 7c523bbf3dc8fc3f95acf0a374e9f6f1164c3f47..01b051b788b396ec4a1e1d4e4fd7f6a80ca5ac2f 100644 (file)
@@ -79,6 +79,7 @@ found:
 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 {
+       pax_open_kernel();
 #ifdef CONFIG_X86_64
        *addr++ = 0x48;
        *addr++ = 0xbf;
@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
        *addr++ = 0xb8;
 #endif
        *(unsigned long *)addr = val;
+       pax_close_kernel();
 }
 
 asm (
@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
         * Verify if the address gap is in 2GB range, because this uses
         * a relative jump.
         */
-       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+       rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
        if (abs(rel) > 0x7fffffff) {
                __arch_remove_optimized_kprobe(op, 0);
                return -ERANGE;
@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
        op->optinsn.size = ret;
 
        /* Copy arch-dep-instance from template */
-       memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+       pax_open_kernel();
+       memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
+       pax_close_kernel();
 
        /* Set probe information */
        synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
 
        /* Set probe function call */
-       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+       synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
 
        /* Set returning jmp instruction at the tail of out-of-line buffer */
-       synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+       synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
                           (u8 *)op->kp.addr + op->optinsn.size);
 
        flush_icache_range((unsigned long) buf,
@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
                WARN_ON(kprobe_disabled(&op->kp));
 
                /* Backup instructions which will be replaced by jump address */
-               memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+               memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
                       RELATIVE_ADDR_SIZE);
 
                insn_buf[0] = RELATIVEJUMP_OPCODE;
@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
                /* This kprobe is really able to run optimized path. */
                op = container_of(p, struct optimized_kprobe, kp);
                /* Detour through copied instructions */
-               regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
+               regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
                if (!reenter)
                        reset_current_kprobe();
                preempt_enable_no_resched();
index c2bedaea11f7b929709e64979e17e15ca4d8d60a..25e7ab60fe04699ca8c1723f013180ebb4d10a25 100644 (file)
@@ -184,7 +184,7 @@ out:
 
 static struct kobj_attribute type_attr = __ATTR_RO(type);
 
-static struct bin_attribute data_attr = {
+static bin_attribute_no_const data_attr __read_only = {
        .attr = {
                .name = "data",
                .mode = S_IRUGO,
index c37886d759ccac2736c36b357cc0f399786f2fb3..d851d32a669a221ef4ce98595539ff130ba9958e 100644 (file)
@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
        if (reload) {
 #ifdef CONFIG_SMP
                preempt_disable();
-               load_LDT(pc);
+               load_LDT_nolock(pc);
                if (!cpumask_equal(mm_cpumask(current->mm),
                                   cpumask_of(smp_processor_id())))
                        smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
 #else
-               load_LDT(pc);
+               load_LDT_nolock(pc);
 #endif
        }
        if (oldsize) {
@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
                return err;
 
        for (i = 0; i < old->size; i++)
-               write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+               write_ldt_entry(new->ldt, i, old->ldt + i);
        return 0;
 }
 
@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
                retval = copy_ldt(&mm->context, &old_mm->context);
                mutex_unlock(&old_mm->context.lock);
        }
+
+       if (tsk == current) {
+               mm->context.vdso = 0;
+
+#ifdef CONFIG_X86_32
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+               mm->context.user_cs_base = 0UL;
+               mm->context.user_cs_limit = ~0UL;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+               cpus_clear(mm->context.cpu_user_cs_mask);
+#endif
+
+#endif
+#endif
+
+       }
+
        return retval;
 }
 
@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                }
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
+               error = -EINVAL;
+               goto out_unlock;
+       }
+#endif
+
        if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
                error = -EINVAL;
                goto out_unlock;
index 469b23d6acc272b2113878182582d9fa7532f189..5449cfed1083cf9ca29ecf69899c2be72732f1c0 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/cacheflush.h>
 #include <asm/debugreg.h>
 
-static void set_idt(void *newidt, __u16 limit)
+static void set_idt(struct desc_struct *newidt, __u16 limit)
 {
        struct desc_ptr curidt;
 
@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
 }
 
 
-static void set_gdt(void *newgdt, __u16 limit)
+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
 {
        struct desc_ptr curgdt;
 
@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
        }
 
        control_page = page_address(image->control_code_page);
-       memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+       memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
 
        relocate_kernel_ptr = control_page;
        page_list[PA_CONTROL_PAGE] = __pa(control_page);
index 94ea120fa21ff7a9de85cc123054e9d2133b2305..4154cea5de9cd98a478d9ad48679df7b103d7623 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/linkage.h>
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
-
+#include <asm/alternative-asm.h>
 
        .code64
        .section .entry.text, "ax"
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 ENTRY(function_hook)
+       pax_force_retaddr
        retq
-END(function_hook)
+ENDPROC(function_hook)
 
 ENTRY(ftrace_caller)
        /* save_mcount_regs fills in first two parameters */
@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
 #endif
 
 GLOBAL(ftrace_stub)
+       pax_force_retaddr
        retq
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
        /* Save the current flags before any operations that can change them */
@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
 
        jmp ftrace_return
 
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
 
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -272,18 +274,20 @@ fgraph_trace:
 #endif
 
 GLOBAL(ftrace_stub)
+       pax_force_retaddr
        retq
 
 trace:
        /* save_mcount_regs fills in first two parameters */
        save_mcount_regs
 
+       pax_force_fptr ftrace_trace_function
        call   *ftrace_trace_function
 
        restore_mcount_regs
 
        jmp fgraph_trace
-END(function_hook)
+ENDPROC(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
 
        restore_mcount_regs
 
+       pax_force_retaddr
        retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
 GLOBAL(return_to_handler)
        subq  $24, %rsp
@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
        movq 8(%rsp), %rdx
        movq (%rsp), %rax
        addq $24, %rsp
+       pax_force_fptr %rdi
        jmp *%rdi
+ENDPROC(return_to_handler)
 #endif
index e69f9882bf95a942ae1ce0f75efbf07301c9adc3..72902b76b04a5b4058b46e854a7683213b99aeb1 100644 (file)
@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
 }
 #endif
 
-void *module_alloc(unsigned long size)
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
 {
-       if (PAGE_ALIGN(size) > MODULES_LEN)
+       if (!size || PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
        return __vmalloc_node_range(size, 1,
                                    MODULES_VADDR + get_module_load_offset(),
-                                   MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
-                                   PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                                   MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                                   prot, NUMA_NO_NODE,
                                    __builtin_return_address(0));
 }
 
+void *module_alloc(unsigned long size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+       return __module_alloc(size, PAGE_KERNEL);
+#else
+       return __module_alloc(size, PAGE_KERNEL_EXEC);
+#endif
+
+}
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+void *module_alloc_exec(unsigned long size)
+{
+       struct vm_struct *area;
+
+       if (size == 0)
+               return NULL;
+
+       area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
+return area ? area->addr : NULL;
+}
+EXPORT_SYMBOL(module_alloc_exec);
+
+void module_memfree_exec(void *module_region)
+{
+       vunmap(module_region);
+}
+EXPORT_SYMBOL(module_memfree_exec);
+#else
+void module_memfree_exec(void *module_region)
+{
+       module_memfree(module_region);
+}
+EXPORT_SYMBOL(module_memfree_exec);
+
+void *module_alloc_exec(unsigned long size)
+{
+       return __module_alloc(size, PAGE_KERNEL_RX);
+}
+EXPORT_SYMBOL(module_alloc_exec);
+#endif
+#endif
+
 #ifdef CONFIG_X86_32
 int apply_relocate(Elf32_Shdr *sechdrs,
                   const char *strtab,
@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
        unsigned int i;
        Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
        Elf32_Sym *sym;
-       uint32_t *location;
+       uint32_t *plocation, location;
 
        DEBUGP("Applying relocate section %u to %u\n",
               relsec, sechdrs[relsec].sh_info);
        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
                /* This is where to make the change */
-               location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-                       + rel[i].r_offset;
+               plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+               location = (uint32_t)plocation;
+               if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
+                       plocation = ktla_ktva((void *)plocation);
                /* This is the symbol it is referring to.  Note that all
                   undefined symbols have been resolved.  */
                sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
                switch (ELF32_R_TYPE(rel[i].r_info)) {
                case R_386_32:
                        /* We add the value into the location given */
-                       *location += sym->st_value;
+                       pax_open_kernel();
+                       *plocation += sym->st_value;
+                       pax_close_kernel();
                        break;
                case R_386_PC32:
                        /* Add the value, subtract its position */
-                       *location += sym->st_value - (uint32_t)location;
+                       pax_open_kernel();
+                       *plocation += sym->st_value - location;
+                       pax_close_kernel();
                        break;
                default:
                        pr_err("%s: Unknown relocation: %u\n",
@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                case R_X86_64_NONE:
                        break;
                case R_X86_64_64:
+                       pax_open_kernel();
                        *(u64 *)loc = val;
+                       pax_close_kernel();
                        break;
                case R_X86_64_32:
+                       pax_open_kernel();
                        *(u32 *)loc = val;
+                       pax_close_kernel();
                        if (val != *(u32 *)loc)
                                goto overflow;
                        break;
                case R_X86_64_32S:
+                       pax_open_kernel();
                        *(s32 *)loc = val;
+                       pax_close_kernel();
                        if ((s64)val != *(s32 *)loc)
                                goto overflow;
                        break;
                case R_X86_64_PC32:
                        val -= (u64)loc;
+                       pax_open_kernel();
                        *(u32 *)loc = val;
+                       pax_close_kernel();
+
 #if 0
                        if ((s64)val != *(s32 *)loc)
                                goto overflow;
index 113e70784854fb55bdc0eb08fe5e38bee4946126..0a690e125326572f135d0a6d5be272d4365d27cc 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/notifier.h>
 #include <linux/uaccess.h>
 #include <linux/gfp.h>
+#include <linux/grsecurity.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
        int err = 0;
        ssize_t bytes = 0;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+       gr_handle_msr_write();
+       return -EPERM;
+#endif
+
        if (count % 8)
                return -EINVAL; /* Invalid chunk size */
 
@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
                        err = -EBADF;
                        break;
                }
+#ifdef CONFIG_GRKERNSEC_KMEM
+               gr_handle_msr_write();
+               return -EPERM;
+#endif
                if (copy_from_user(&regs, uregs, sizeof regs)) {
                        err = -EFAULT;
                        break;
@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
        return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata msr_class_cpu_notifier = {
+static struct notifier_block msr_class_cpu_notifier = {
        .notifier_call = msr_class_cpu_callback,
 };
 
index c3e985d1751ced9dbab5ac0aa7c38f9623b449f4..110a36a3fe30b18d3cbd5133ee25766c1d2b2833 100644 (file)
@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
 
 static void nmi_max_handler(struct irq_work *w)
 {
-       struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
+       struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
        int remainder_ns, decimal_msecs;
-       u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+       u64 whole_msecs = ACCESS_ONCE(n->max_duration);
 
        remainder_ns = do_div(whole_msecs, (1000 * 1000));
        decimal_msecs = remainder_ns / 1000;
 
        printk_ratelimited(KERN_INFO
                "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
-               a->handler, whole_msecs, decimal_msecs);
+               n->action->handler, whole_msecs, decimal_msecs);
 }
 
 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
                delta = sched_clock() - delta;
                trace_nmi_handler(a->handler, (int)delta, thishandled);
 
-               if (delta < nmi_longest_ns || delta < a->max_duration)
+               if (delta < nmi_longest_ns || delta < a->work->max_duration)
                        continue;
 
-               a->max_duration = delta;
-               irq_work_queue(&a->irq_work);
+               a->work->max_duration = delta;
+               irq_work_queue(&a->work->irq_work);
        }
 
        rcu_read_unlock();
@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
 }
 NOKPROBE_SYMBOL(nmi_handle);
 
-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
 {
        struct nmi_desc *desc = nmi_to_desc(type);
        unsigned long flags;
@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
        if (!action->handler)
                return -EINVAL;
 
-       init_irq_work(&action->irq_work, nmi_max_handler);
+       action->work->action = action;
+       init_irq_work(&action->work->irq_work, nmi_max_handler);
 
        spin_lock_irqsave(&desc->lock, flags);
 
@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
         * event confuses some handlers (kdump uses this flag)
         */
        if (action->flags & NMI_FLAG_FIRST)
-               list_add_rcu(&action->list, &desc->head);
+               pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
        else
-               list_add_tail_rcu(&action->list, &desc->head);
+               pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
        
        spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
                if (!strcmp(n->name, name)) {
                        WARN(in_nmi(),
                                "Trying to free NMI (%s) from NMI context!\n", n->name);
-                       list_del_rcu(&n->list);
+                       pax_list_del_rcu((struct list_head *)&n->list);
                        break;
                }
        }
@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
 dotraplinkage notrace void
 do_nmi(struct pt_regs *regs, long error_code)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       if (!user_mode(regs)) {
+               unsigned long cs = regs->cs & 0xFFFF;
+               unsigned long ip = ktva_ktla(regs->ip);
+
+               if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
+                       regs->ip = ip;
+       }
+#endif
+
        nmi_nesting_preprocess(regs);
 
        nmi_enter();
index 6d9582ec0324936646da2b537602ae267b6a4a64..f7462879202afb18cc59ddf019449f6bddbbeb8c 100644 (file)
@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
 {
        /* trap all the unknown NMIs we may generate */
        register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
-                       __initdata);
+                       __initconst);
 }
 
 static void __init cleanup_nmi_testsuite(void)
@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
        unsigned long timeout;
 
        if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
-                                NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
+                                NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
                nmi_fail = FAILURE;
                return;
        }
index bbb6c7316341f806dc3cb3a5bdce52cbb28de2a3..24a58ef040311f4edabb9bcea0a3be696ae437bd 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <asm/paravirt.h>
 
-struct pv_lock_ops pv_lock_ops = {
+struct pv_lock_ops pv_lock_ops __read_only = {
 #ifdef CONFIG_SMP
        .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
        .unlock_kick = paravirt_nop,
index 548d25f00c90ad010379b0b36884cabee82b6e02..f8fb99c5b67dd6e611c4b18fa27956f7371d768d 100644 (file)
@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
 {
        return x;
 }
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
+#endif
 
 void __init default_banner(void)
 {
@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
 
        if (opfunc == NULL)
                /* If there's no function, patch it with a ud2a (BUG) */
-               ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-       else if (opfunc == _paravirt_nop)
+               ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
+       else if (opfunc == (void *)_paravirt_nop)
                /* If the operation is a nop, then nop the callsite */
                ret = paravirt_patch_nop();
 
        /* identity functions just return their single argument */
-       else if (opfunc == _paravirt_ident_32)
+       else if (opfunc == (void *)_paravirt_ident_32)
                ret = paravirt_patch_ident_32(insnbuf, len);
-       else if (opfunc == _paravirt_ident_64)
+       else if (opfunc == (void *)_paravirt_ident_64)
+               ret = paravirt_patch_ident_64(insnbuf, len);
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+       else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
                ret = paravirt_patch_ident_64(insnbuf, len);
+#endif
 
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
        if (insn_len > len || start == NULL)
                insn_len = len;
        else
-               memcpy(insnbuf, start, insn_len);
+               memcpy(insnbuf, ktla_ktva(start), insn_len);
 
        return insn_len;
 }
@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
        return this_cpu_read(paravirt_lazy_mode);
 }
 
-struct pv_info pv_info = {
+struct pv_info pv_info __read_only = {
        .name = "bare hardware",
        .paravirt_enabled = 0,
        .kernel_rpl = 0,
@@ -311,16 +318,16 @@ struct pv_info pv_info = {
 #endif
 };
 
-struct pv_init_ops pv_init_ops = {
+struct pv_init_ops pv_init_ops __read_only = {
        .patch = native_patch,
 };
 
-struct pv_time_ops pv_time_ops = {
+struct pv_time_ops pv_time_ops __read_only = {
        .sched_clock = native_sched_clock,
        .steal_clock = native_steal_clock,
 };
 
-__visible struct pv_irq_ops pv_irq_ops = {
+__visible struct pv_irq_ops pv_irq_ops __read_only = {
        .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
        .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
        .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
 #endif
 };
 
-__visible struct pv_cpu_ops pv_cpu_ops = {
+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
        .cpuid = native_cpuid,
        .get_debugreg = native_get_debugreg,
        .set_debugreg = native_set_debugreg,
@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
 NOKPROBE_SYMBOL(native_set_debugreg);
 NOKPROBE_SYMBOL(native_load_idt);
 
-struct pv_apic_ops pv_apic_ops = {
+struct pv_apic_ops pv_apic_ops __read_only= {
 #ifdef CONFIG_X86_LOCAL_APIC
        .startup_ipi_hook = paravirt_nop,
 #endif
 };
 
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_PAE
+/* 64-bit pagetable entries */
+#define PTE_IDENT      PV_CALLEE_SAVE(_paravirt_ident_64)
+#else
 /* 32-bit pagetable entries */
 #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
+#endif
 #else
 /* 64-bit pagetable entries */
 #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
 #endif
 
-struct pv_mmu_ops pv_mmu_ops = {
+struct pv_mmu_ops pv_mmu_ops __read_only = {
 
        .read_cr2 = native_read_cr2,
        .write_cr2 = native_write_cr2,
@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
        .make_pud = PTE_IDENT,
 
        .set_pgd = native_set_pgd,
+       .set_pgd_batched = native_set_pgd_batched,
 #endif
 #endif /* PAGETABLE_LEVELS >= 3 */
 
@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
        },
 
        .set_fixmap = native_set_fixmap,
+
+#ifdef CONFIG_PAX_KERNEXEC
+       .pax_open_kernel = native_pax_open_kernel,
+       .pax_close_kernel = native_pax_close_kernel,
+#endif
+
 };
 
 EXPORT_SYMBOL_GPL(pv_time_ops);
index a1da6737ba5b80c4ee636204d49d4813348ef903..b6f5831018d93099074fddb226bef386fdd9eb50 100644 (file)
@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
+
+#ifndef CONFIG_PAX_MEMORY_UDEREF
 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
+#endif
+
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
                PATCH_SITE(pv_cpu_ops, clts);
+
+#ifndef CONFIG_PAX_MEMORY_UDEREF
                PATCH_SITE(pv_mmu_ops, flush_tlb_single);
+#endif
+
                PATCH_SITE(pv_cpu_ops, wbinvd);
 
        patch_site:
index 0497f719977dff8ca0094b536a6b7e40ac371ef2..7186c0d34241567fb6f015bc21bbcad621f83068 100644 (file)
@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
                        tce_space = be64_to_cpu(readq(target));
                        tce_space = tce_space & TAR_SW_BITS;
 
-                       tce_space = tce_space & (~specified_table_size);
+                       tce_space = tce_space & (~(unsigned long)specified_table_size);
                        info->tce_space = (u64 *)__va(tce_space);
                }
        }
index 35ccf75696eb8ab22af0e94e6246b5cb3f655b4f..7a15747dc4af869543fbee2ceef940c31c4a76b4 100644 (file)
@@ -2,7 +2,7 @@
 #include <asm/iommu_table.h>
 #include <linux/string.h>
 #include <linux/kallsyms.h>
-
+#include <linux/sched.h>
 
 #define DEBUG 1
 
index 77dd0ad58be4a6c9c8af113189c9805c1a535633..9ec47239994a1df2bb7bb1a347fc8a5fbc511f95 100644 (file)
@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
                                      struct dma_attrs *attrs)
 {
        if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
-               swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+               swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
        else
                dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
 }
index e127ddaa2d5ad3e139ff7a99fb5485b43fb19416..94e384d42e0cd0f8c2e6dcd675c5f757681d7a45 100644 (file)
@@ -36,7 +36,8 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
+EXPORT_SYMBOL(init_tss);
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
         task_xstate_cachep =
                kmem_cache_create("task_xstate", xstate_size,
                                  __alignof__(union thread_xstate),
-                                 SLAB_PANIC | SLAB_NOTRACK, NULL);
+                                 SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
        setup_xstate_comp();
 }
 
@@ -108,7 +109,7 @@ void exit_thread(void)
        unsigned long *bp = t->io_bitmap_ptr;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+               struct tss_struct *tss = init_tss + get_cpu();
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
@@ -128,6 +129,9 @@ void flush_thread(void)
 {
        struct task_struct *tsk = current;
 
+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+       loadsegment(gs, 0);
+#endif
        flush_ptrace_hw_breakpoint(tsk);
        memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
        drop_init_fpu(tsk);
@@ -274,7 +278,7 @@ static void __exit_idle(void)
 void exit_idle(void)
 {
        /* idle loop has pid 0 */
-       if (current->pid)
+       if (task_pid_nr(current))
                return;
        __exit_idle();
 }
@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
        return ret;
 }
 #endif
-void stop_this_cpu(void *dummy)
+__noreturn void stop_this_cpu(void *dummy)
 {
        local_irq_disable();
        /*
@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
 }
 early_param("idle", idle_setup);
 
-unsigned long arch_align_stack(unsigned long sp)
+#ifdef CONFIG_PAX_RANDKSTACK
+void pax_randomize_kstack(struct pt_regs *regs)
 {
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() % 8192;
-       return sp & ~0xf;
-}
+       struct thread_struct *thread = &current->thread;
+       unsigned long time;
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
-}
+       if (!randomize_va_space)
+               return;
+
+       if (v8086_mode(regs))
+               return;
 
+       rdtscl(time);
+
+       /* P4 seems to return a 0 LSB, ignore it */
+#ifdef CONFIG_MPENTIUM4
+       time &= 0x3EUL;
+       time <<= 2;
+#elif defined(CONFIG_X86_64)
+       time &= 0xFUL;
+       time <<= 4;
+#else
+       time &= 0x1FUL;
+       time <<= 3;
+#endif
+
+       thread->sp0 ^= time;
+       load_sp0(init_tss + smp_processor_id(), thread);
+
+#ifdef CONFIG_X86_64
+       this_cpu_write(kernel_stack, thread->sp0);
+#endif
+}
+#endif
index 8f3ebfe710d0715d9b876fa6f0d551933f30a225..cbc731b7944f161d7345d89292952fb18e1e7729 100644 (file)
@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
        return ((unsigned long *)tsk->thread.sp)[3];
+//XXX  return tsk->thread.eip;
 }
 
 void __show_regs(struct pt_regs *regs, int all)
@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
        unsigned long sp;
        unsigned short ss, gs;
 
-       if (user_mode_vm(regs)) {
+       if (user_mode(regs)) {
                sp = regs->sp;
                ss = regs->ss & 0xffff;
-               gs = get_user_gs(regs);
        } else {
                sp = kernel_stack_pointer(regs);
                savesegment(ss, ss);
-               savesegment(gs, gs);
        }
+       gs = get_user_gs(regs);
 
        printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
                        (u16)regs->cs, regs->ip, regs->flags,
-                       smp_processor_id());
+                       raw_smp_processor_id());
        print_symbol("EIP is at %s\n", regs->ip);
 
        printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
 int copy_thread(unsigned long clone_flags, unsigned long sp,
        unsigned long arg, struct task_struct *p)
 {
-       struct pt_regs *childregs = task_pt_regs(p);
+       struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
        struct task_struct *tsk;
        int err;
 
        p->thread.sp = (unsigned long) childregs;
        p->thread.sp0 = (unsigned long) (childregs+1);
+       p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
        memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
        if (unlikely(p->flags & PF_KTHREAD)) {
                /* kernel thread */
                memset(childregs, 0, sizeof(struct pt_regs));
                p->thread.ip = (unsigned long) ret_from_kernel_thread;
-               task_user_gs(p) = __KERNEL_STACK_CANARY;
-               childregs->ds = __USER_DS;
-               childregs->es = __USER_DS;
+               savesegment(gs, childregs->gs);
+               childregs->ds = __KERNEL_DS;
+               childregs->es = __KERNEL_DS;
                childregs->fs = __KERNEL_PERCPU;
                childregs->bx = sp;     /* function */
                childregs->bp = arg;
@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct thread_struct *prev = &prev_p->thread,
                                 *next = &next_p->thread;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       struct tss_struct *tss = init_tss + cpu;
        fpu_switch_t fpu;
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        lazy_save_gs(prev->gs);
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(task_thread_info(next_p)->addr_limit);
+#endif
+
        /*
         * Load the per-thread Thread-Local Storage descriptor.
         */
@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        arch_end_context_switch(next_p);
 
-       this_cpu_write(kernel_stack,
-                 (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - KERNEL_STACK_OFFSET);
+       this_cpu_write(current_task, next_p);
+       this_cpu_write(current_tinfo, &next_p->tinfo);
+       this_cpu_write(kernel_stack, next->sp0);
 
        /*
         * Restore %gs if needed (which is common)
@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        switch_fpu_finish(next_p, fpu);
 
-       this_cpu_write(current_task, next_p);
-
        return prev_p;
 }
 
@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
        } while (count++ < 16);
        return 0;
 }
-
index 5a2c02913af3bd43dfc1be40947f176e0542422f..ec8611d2e53e410397074f8ccc9f8735401eaefb 100644 (file)
@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
        struct pt_regs *childregs;
        struct task_struct *me = current;
 
-       p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+       p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
        childregs = task_pt_regs(p);
        p->thread.sp = (unsigned long) childregs;
        p->thread.usersp = me->thread.usersp;
+       p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
        set_tsk_thread_flag(p, TIF_FORK);
        p->thread.io_bitmap_ptr = NULL;
 
@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
        p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
        savesegment(es, p->thread.es);
        savesegment(ds, p->thread.ds);
+       savesegment(ss, p->thread.ss);
+       BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
        memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
        if (unlikely(p->flags & PF_KTHREAD)) {
@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct thread_struct *prev = &prev_p->thread;
        struct thread_struct *next = &next_p->thread;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       struct tss_struct *tss = init_tss + cpu;
        unsigned fsindex, gsindex;
        fpu_switch_t fpu;
 
@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (unlikely(next->ds | prev->ds))
                loadsegment(ds, next->ds);
 
+       savesegment(ss, prev->ss);
+       if (unlikely(next->ss != prev->ss))
+               loadsegment(ss, next->ss);
+
        /*
         * Switch FS and GS.
         *
@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        prev->usersp = this_cpu_read(old_rsp);
        this_cpu_write(old_rsp, next->usersp);
        this_cpu_write(current_task, next_p);
+       this_cpu_write(current_tinfo, &next_p->tinfo);
 
        /*
         * If it were not for PREEMPT_ACTIVE we could guarantee that the
@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
        this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
 
-       this_cpu_write(kernel_stack,
-                 (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - KERNEL_STACK_OFFSET);
+       this_cpu_write(kernel_stack, next->sp0);
 
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
        stack = (unsigned long)task_stack_page(p);
-       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+       if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
                return 0;
        fp = *(u64 *)(p->thread.sp);
        do {
-               if (fp < (unsigned long)stack ||
-                   fp >= (unsigned long)stack+THREAD_SIZE)
+               if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
                        return 0;
                ip = *(u64 *)(fp+8);
                if (!in_sched_functions(ip))
index e510618b2e91a7969bb8cf6c74a35f59e4bf1bea..5165ac0011bf479dd49308647e7df37e23d4a719 100644 (file)
@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
        unsigned long sp = (unsigned long)&regs->sp;
        u32 *prev_esp;
 
-       if (context == (sp & ~(THREAD_SIZE - 1)))
+       if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
                return sp;
 
-       prev_esp = (u32 *)(context);
+       prev_esp = *(u32 **)(context);
        if (prev_esp)
                return (unsigned long)prev_esp;
 
@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
                if (child->thread.gs != value)
                        return do_arch_prctl(child, ARCH_SET_GS, value);
                return 0;
+
+       case offsetof(struct user_regs_struct,ip):
+               /*
+                * Protect against any attempt to set ip to an
+                * impossible address.  There are dragons lurking if the
+                * address is noncanonical.  (This explicitly allows
+                * setting ip to TASK_SIZE_MAX, because user code can do
+                * that all by itself by running off the end of its
+                * address space.
+                */
+               if (value > TASK_SIZE_MAX)
+                       return -EIO;
+               break;
+
 #endif
        }
 
@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
 {
        int i;
-       int dr7 = 0;
+       unsigned long dr7 = 0;
        struct arch_hw_breakpoint *info;
 
        for (i = 0; i < HBP_NUM; i++) {
@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
                 unsigned long addr, unsigned long data)
 {
        int ret;
-       unsigned long __user *datap = (unsigned long __user *)data;
+       unsigned long __user *datap = (__force unsigned long __user *)data;
 
        switch (request) {
        /* read the word at location addr in the USER area. */
@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
                if ((int) addr < 0)
                        return -EIO;
                ret = do_get_thread_area(child, addr,
-                                       (struct user_desc __user *)data);
+                                       (__force struct user_desc __user *) data);
                break;
 
        case PTRACE_SET_THREAD_AREA:
                if ((int) addr < 0)
                        return -EIO;
                ret = do_set_thread_area(child, addr,
-                                       (struct user_desc __user *)data, 0);
+                                       (__force struct user_desc __user *) data, 0);
                break;
 #endif
 
@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 #ifdef CONFIG_X86_64
 
-static struct user_regset x86_64_regsets[] __read_mostly = {
+static user_regset_no_const x86_64_regsets[] __read_only = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
                .n = sizeof(struct user_regs_struct) / sizeof(long),
@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
 #endif /* CONFIG_X86_64 */
 
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-static struct user_regset x86_32_regsets[] __read_mostly = {
+static user_regset_no_const x86_32_regsets[] __read_only = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
                .n = sizeof(struct user_regs_struct32) / sizeof(u32),
@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
  */
 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
 
-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
 {
 #ifdef CONFIG_X86_64
        x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
        memset(info, 0, sizeof(*info));
        info->si_signo = SIGTRAP;
        info->si_code = si_code;
-       info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+       info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
 }
 
 void user_single_step_siginfo(struct task_struct *tsk,
@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
        }
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * We can return 0 to resume the syscall or anything else to go to phase
  * 2.  If we resume the syscall, we need to put something appropriate in
@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
 
        BUG_ON(regs != task_pt_regs(current));
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        /*
         * If we stepped into a sysenter/syscall insn, it trapped in
         * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
         */
        user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+       if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+               gr_delayed_cred_worker();
+#endif
+
        audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
index 2f355d229a587771680b28080d92fd06f345d7e7..e75ed0a081e44da3d27584513f4201e8b0751a76 100644 (file)
@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
        reset_hung_task_detector();
 }
 
-static atomic64_t last_value = ATOMIC64_INIT(0);
+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
 
 void pvclock_resume(void)
 {
-       atomic64_set(&last_value, 0);
+       atomic64_set_unchecked(&last_value, 0);
 }
 
 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
         * updating at the same time, and one of them could be slightly behind,
         * making the assumption that last_value always go forward fail to hold.
         */
-       last = atomic64_read(&last_value);
+       last = atomic64_read_unchecked(&last_value);
        do {
                if (ret < last)
                        return last;
-               last = atomic64_cmpxchg(&last_value, last, ret);
+               last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
        } while (unlikely(last != ret));
 
        return ret;
index bae6c609888e7fdff25784d5bd96fd8dcd5ea88a..b438619f1c6fe1a99b85bcc2a922fcd6435cd76a 100644 (file)
@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
 
 void __noreturn machine_real_restart(unsigned int type)
 {
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
+       struct desc_struct *gdt;
+#endif
+
        local_irq_disable();
 
        /*
@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
 
        /* Jump to the identity-mapped low memory code */
 #ifdef CONFIG_X86_32
-       asm volatile("jmpl *%0" : :
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+       gdt = get_cpu_gdt_table(smp_processor_id());
+       pax_open_kernel();
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       gdt[GDT_ENTRY_KERNEL_DS].type = 3;
+       gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
+       loadsegment(ds, __KERNEL_DS);
+       loadsegment(es, __KERNEL_DS);
+       loadsegment(ss, __KERNEL_DS);
+#endif
+#ifdef CONFIG_PAX_KERNEXEC
+       gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
+       gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
+       gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
+       gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
+       gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
+       gdt[GDT_ENTRY_KERNEL_CS].g = 1;
+#endif
+       pax_close_kernel();
+#endif
+
+       asm volatile("ljmpl *%0" : :
                     "rm" (real_mode_header->machine_real_restart_asm),
                     "a" (type));
 #else
@@ -501,7 +528,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
  * This means that this function can never return, it can misbehave
  * by not rebooting properly and hanging.
  */
-static void native_machine_emergency_restart(void)
+static void __noreturn native_machine_emergency_restart(void)
 {
        int i;
        int attempt = 0;
@@ -621,13 +648,13 @@ void native_machine_shutdown(void)
 #endif
 }
 
-static void __machine_emergency_restart(int emergency)
+static void __noreturn __machine_emergency_restart(int emergency)
 {
        reboot_emergency = emergency;
        machine_ops.emergency_restart();
 }
 
-static void native_machine_restart(char *__unused)
+static void __noreturn native_machine_restart(char *__unused)
 {
        pr_notice("machine restart\n");
 
@@ -636,7 +663,7 @@ static void native_machine_restart(char *__unused)
        __machine_emergency_restart(0);
 }
 
-static void native_machine_halt(void)
+static void __noreturn native_machine_halt(void)
 {
        /* Stop other cpus and apics */
        machine_shutdown();
@@ -646,7 +673,7 @@ static void native_machine_halt(void)
        stop_this_cpu(NULL);
 }
 
-static void native_machine_power_off(void)
+static void __noreturn native_machine_power_off(void)
 {
        if (pm_power_off) {
                if (!reboot_force)
@@ -655,9 +682,10 @@ static void native_machine_power_off(void)
        }
        /* A fallback in case there is no PM info available */
        tboot_shutdown(TB_SHUTDOWN_HALT);
+       unreachable();
 }
 
-struct machine_ops machine_ops = {
+struct machine_ops machine_ops __read_only = {
        .power_off = native_machine_power_off,
        .shutdown = native_machine_shutdown,
        .emergency_restart = native_machine_emergency_restart,
index c8e41e90f59ceb9da7be768fcfe197a83602742e..64049ef3734216d4cb6df1451a9642657eee5efc 100644 (file)
@@ -57,7 +57,7 @@ struct device_fixup {
        unsigned int vendor;
        unsigned int device;
        void (*reboot_fixup)(struct pci_dev *);
-};
+} __do_const;
 
 /*
  * PCI ids solely used for fixups_table go here
index 3fd2c693e4752d01e071de68ef57e2db8b47b605..a44426401fc58c59e69c55e9485bfedceac8f93d 100644 (file)
@@ -96,8 +96,7 @@ relocate_kernel:
 
        /* jump to identity mapped page */
        addq    $(identity_mapped - relocate_kernel), %r8
-       pushq   %r8
-       ret
+       jmp     *%r8
 
 identity_mapped:
        /* set return address to 0 if not preserving context */
index ab4734e5411d76daa06624afc6d1eb4325d1a128..c4ca0eb17947817623f330bf06d071d199fd033b 100644 (file)
 #include <asm/mce.h>
 #include <asm/alternative.h>
 #include <asm/prom.h>
+#include <asm/boot.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
 #endif
 
 
-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-__visible unsigned long mmu_cr4_features;
+#ifdef CONFIG_X86_64
+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
+#elif defined(CONFIG_X86_PAE)
+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
 #else
-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
+__visible unsigned long mmu_cr4_features __read_only;
 #endif
 
+void set_in_cr4(unsigned long mask)
+{
+       unsigned long cr4 = read_cr4();
+
+       if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
+               return;
+
+       pax_open_kernel();
+       mmu_cr4_features |= mask;
+       pax_close_kernel();
+
+       if (trampoline_cr4_features)
+               *trampoline_cr4_features = mmu_cr4_features;
+       cr4 |= mask;
+       write_cr4(cr4);
+}
+EXPORT_SYMBOL(set_in_cr4);
+
+void clear_in_cr4(unsigned long mask)
+{
+       unsigned long cr4 = read_cr4();
+
+       if (!(cr4 & mask) && cr4 == mmu_cr4_features)
+               return;
+
+       pax_open_kernel();
+       mmu_cr4_features &= ~mask;
+       pax_close_kernel();
+
+       if (trampoline_cr4_features)
+               *trampoline_cr4_features = mmu_cr4_features;
+       cr4 &= ~mask;
+       write_cr4(cr4);
+}
+EXPORT_SYMBOL(clear_in_cr4);
+
 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 int bootloader_type, bootloader_version;
 
@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
         * area (640->1Mb) as ram even though it is not.
         * take them out.
         */
-       e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+       e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 }
@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
 /* called before trim_bios_range() to spare extra sanitize */
 static void __init e820_add_kernel_range(void)
 {
-       u64 start = __pa_symbol(_text);
+       u64 start = __pa_symbol(ktla_ktva(_text));
        u64 size = __pa_symbol(_end) - start;
 
        /*
@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 
 void __init setup_arch(char **cmdline_p)
 {
+#ifdef CONFIG_X86_32
+       memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
+#else
        memblock_reserve(__pa_symbol(_text),
                         (unsigned long)__bss_stop - (unsigned long)_text);
+#endif
 
        early_reserve_initrd();
 
@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
 
        if (!boot_params.hdr.root_flags)
                root_mountflags &= ~MS_RDONLY;
-       init_mm.start_code = (unsigned long) _text;
-       init_mm.end_code = (unsigned long) _etext;
+       init_mm.start_code = ktla_ktva((unsigned long) _text);
+       init_mm.end_code = ktla_ktva((unsigned long) _etext);
        init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = _brk_end;
 
        mpx_mm_init(&init_mm);
 
-       code_resource.start = __pa_symbol(_text);
-       code_resource.end = __pa_symbol(_etext)-1;
-       data_resource.start = __pa_symbol(_etext);
+       code_resource.start = __pa_symbol(ktla_ktva(_text));
+       code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
+       data_resource.start = __pa_symbol(_sdata);
        data_resource.end = __pa_symbol(_edata)-1;
        bss_resource.start = __pa_symbol(__bss_start);
        bss_resource.end = __pa_symbol(__bss_stop)-1;
index e4fcb87ba7a61bc3f0f3c06bc860240e6ea6559e..9c06c55810469ac354c47caccc2b8fd21dd17214 100644 (file)
 #include <asm/cpu.h>
 #include <asm/stackprotector.h>
 
-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
 
-#ifdef CONFIG_X86_64
 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
-#else
-#define BOOT_PERCPU_OFFSET 0
-#endif
 
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
        [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
 };
 EXPORT_SYMBOL(__per_cpu_offset);
@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
 {
 #ifdef CONFIG_NEED_MULTIPLE_NODES
        pg_data_t *last = NULL;
-       unsigned int cpu;
+       int cpu;
 
        for_each_possible_cpu(cpu) {
                int node = early_cpu_to_node(cpu);
@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
 {
 #ifdef CONFIG_X86_32
        struct desc_struct gdt;
+       unsigned long base = per_cpu_offset(cpu);
 
-       pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
-                       0x2 | DESCTYPE_S, 0x8);
-       gdt.s = 1;
+       pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
+                       0x83 | DESCTYPE_S, 0xC);
        write_gdt_entry(get_cpu_gdt_table(cpu),
                        GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
 #endif
@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
        /* alrighty, percpu areas up and running */
        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
        for_each_possible_cpu(cpu) {
+#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_X86_32
+               unsigned long canary = per_cpu(stack_canary.canary, cpu);
+#endif
+#endif
                per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
                per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
                per_cpu(cpu_number, cpu) = cpu;
@@ -258,6 +261,12 @@ void __init setup_per_cpu_areas(void)
                 * So set them all (boot cpu and all APs).
                 */
                set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
+#endif
+#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_X86_32
+               if (!cpu)
+                       per_cpu(stack_canary.canary, cpu) = canary;
+#endif
 #endif
                /*
                 * Up to this point, the boot CPU has been using .init.data
index ed37a768d0fc03dae75cda0b3402dd73202b3c96..39f936e320b77419ae48593d2e16f241bb8aff4d 100644 (file)
@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
         * Align the stack pointer according to the i386 ABI,
         * i.e. so that on function entry ((sp + 4) & 15) == 0.
         */
-       sp = ((sp + 4) & -16ul) - 4;
+       sp = ((sp - 12) & -16ul) - 4;
 #else /* !CONFIG_X86_32 */
        sp = round_down(sp, 16) - 8;
 #endif
@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
        }
 
        if (current->mm->context.vdso)
-               restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_sigreturn;
+               restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
        else
-               restorer = &frame->retcode;
+               restorer = (void __user *)&frame->retcode;
        if (ksig->ka.sa.sa_flags & SA_RESTORER)
                restorer = ksig->ka.sa.sa_restorer;
 
@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
         * reasons and because gdb uses it as a signature to notice
         * signal handler stack frames.
         */
-       err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
+       err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
 
        if (err)
                return -EFAULT;
@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
                save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
                /* Set up to return from userspace.  */
-               restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_rt_sigreturn;
+               if (current->mm->context.vdso)
+                       restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
+               else
+                       restorer = (void __user *)&frame->retcode;
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                put_user_ex(restorer, &frame->pretcode);
@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
                 * reasons and because gdb uses it as a signature to notice
                 * signal handler stack frames.
                 */
-               put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+               put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
        } put_user_catch(err);
        
        err |= copy_siginfo_to_user(&frame->info, &ksig->info);
@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
 {
        int usig = signr_convert(ksig->sig);
        sigset_t *set = sigmask_to_save();
-       compat_sigset_t *cset = (compat_sigset_t *) set;
+       sigset_t sigcopy;
+       compat_sigset_t *cset;
+
+       sigcopy = *set;
+
+       cset = (compat_sigset_t *) &sigcopy;
 
        /* Set up the stack frame */
        if (is_ia32_frame()) {
@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
        } else if (is_x32_frame()) {
                return x32_setup_rt_frame(ksig, cset, regs);
        } else {
-               return __setup_rt_frame(ksig->sig, ksig, set, regs);
+               return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
        }
 }
 
index be8e1bde07aa47ff373f0245e0f4b7d6d2edcfd5..a3d93fa4767c3d7afe6ba9d55c265eaeb3cc4683 100644 (file)
@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
 
 __setup("nonmi_ipi", nonmi_ipi_setup);
 
-struct smp_ops smp_ops = {
+struct smp_ops smp_ops __read_only = {
        .smp_prepare_boot_cpu   = native_smp_prepare_boot_cpu,
        .smp_prepare_cpus       = native_smp_prepare_cpus,
        .smp_cpus_done          = native_smp_cpus_done,
index 6d7022c683e31555967f20edfc18b490576bc10b..4feb6beae892b682db71ddb5b62540ed561c1e69 100644 (file)
@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
 
        enable_start_cpu0 = 0;
 
-#ifdef CONFIG_X86_32
+       /* otherwise gcc will move up smp_processor_id before the cpu_init */
+       barrier();
+
        /* switch away from the initial page table */
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+#else
        load_cr3(swapper_pg_dir);
-       __flush_tlb_all();
 #endif
+       __flush_tlb_all();
 
-       /* otherwise gcc will move up smp_processor_id before the cpu_init */
-       barrier();
        /*
         * Check TSC synchronization with the BP:
         */
@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        alternatives_enable_smp();
 
        idle->thread.sp = (unsigned long) (((struct pt_regs *)
-                         (THREAD_SIZE  task_stack_page(idle))) - 1);
+                         (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
        per_cpu(current_task, cpu) = idle;
+       per_cpu(current_tinfo, cpu) = &idle->tinfo;
 
 #ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        clear_tsk_thread_flag(idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
 #endif
-       per_cpu(kernel_stack, cpu) =
-               (unsigned long)task_stack_page(idle) -
-               KERNEL_STACK_OFFSET + THREAD_SIZE;
+       per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
+       pax_open_kernel();
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+       pax_close_kernel();
        initial_code = (unsigned long)start_secondary;
        stack_start  = idle->thread.sp;
 
@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
        /* the FPU context is blank, nobody can own it */
        __cpu_disable_lazy_restore(cpu);
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+       clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+#endif
+
        err = do_boot_cpu(apicid, cpu, tidle);
        if (err) {
                pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
index 9b4d51d0c0d013274f7ba46c2e58319f0d1d9145..5d28b589a27bb2568bc67dcd95c576424e11e817 100644 (file)
@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                struct desc_struct *desc;
                unsigned long base;
 
-               seg &= ~7UL;
+               seg >>= 3;
 
                mutex_lock(&child->mm->context.lock);
-               if (unlikely((seg >> 3) >= child->mm->context.size))
+               if (unlikely(seg >= child->mm->context.size))
                        addr = -1L; /* bogus selector, access would fault */
                else {
                        desc = child->mm->context.ldt + seg;
@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                        addr += base;
                }
                mutex_unlock(&child->mm->context.lock);
-       }
+       } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
+               addr = ktla_ktva(addr);
 
        return addr;
 }
@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
        unsigned char opcode[15];
        unsigned long addr = convert_ip_to_linear(child, regs);
 
+       if (addr == -EINVAL)
+               return 0;
+
        copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
        for (i = 0; i < copied; i++) {
                switch (opcode[i]) {
index 30277e27431acde9a9320e0b1be4470bddb40e3a..5664a29429e3d29f601f8200934d3cdf9c2abc98 100644 (file)
@@ -81,8 +81,8 @@ out:
        return error;
 }
 
-static void find_start_end(unsigned long flags, unsigned long *begin,
-                          unsigned long *end)
+static void find_start_end(struct mm_struct *mm, unsigned long flags,
+                          unsigned long *begin, unsigned long *end)
 {
        if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
                unsigned long new_begin;
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                                *begin = new_begin;
                }
        } else {
-               *begin = current->mm->mmap_legacy_base;
+               *begin = mm->mmap_legacy_base;
                *end = TASK_SIZE;
        }
 }
@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
        unsigned long begin, end;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
        if (flags & MAP_FIXED)
                return addr;
 
-       find_start_end(flags, &begin, &end);
+       find_start_end(mm, flags, &begin, &end);
 
        if (len > end)
                return -ENOMEM;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (end - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.high_limit = end;
        info.align_mask = filp ? get_align_mask() : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE)
@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
                goto bottomup;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.high_limit = mm->mmap_base;
        info.align_mask = filp ? get_align_mask() : 0;
        info.align_offset = pgoff << PAGE_SHIFT;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
        if (!(addr & ~PAGE_MASK))
                return addr;
index 91a4496db43429de294e481075782295b76ff0e6..bb87552d0a242518e7d0eb83c7c22cb3b06376f2 100644 (file)
@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
 
 void tboot_shutdown(u32 shutdown_type)
 {
-       void (*shutdown)(void);
+       void (* __noreturn shutdown)(void);
 
        if (!tboot_enabled())
                return;
@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
 
        switch_to_tboot_pt();
 
-       shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
+       shutdown = (void *)(unsigned long)tboot->shutdown_entry;
        shutdown();
 
        /* should not reach here */
@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
        return -ENODEV;
 }
 
-static atomic_t ap_wfs_count;
+static atomic_unchecked_t ap_wfs_count;
 
 static int tboot_wait_for_aps(int num_aps)
 {
@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
 {
        switch (action) {
        case CPU_DYING:
-               atomic_inc(&ap_wfs_count);
+               atomic_inc_unchecked(&ap_wfs_count);
                if (num_online_cpus() == 1)
-                       if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+                       if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
                                return NOTIFY_BAD;
                break;
        }
@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
 
        tboot_create_trampoline();
 
-       atomic_set(&ap_wfs_count, 0);
+       atomic_set_unchecked(&ap_wfs_count, 0);
        register_hotcpu_notifier(&tboot_cpu_notifier);
 
 #ifdef CONFIG_DEBUG_FS
index 25adc0e16eaa6349e78bd909860f6710705acb8e..1df434906d0341e3c4435e84e3ba9a9abc76c704 100644 (file)
@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
 {
        unsigned long pc = instruction_pointer(regs);
 
-       if (!user_mode_vm(regs) && in_lock_functions(pc)) {
+       if (!user_mode(regs) && in_lock_functions(pc)) {
 #ifdef CONFIG_FRAME_POINTER
-               return *(unsigned long *)(regs->bp + sizeof(long));
+               return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
 #else
                unsigned long *sp =
                        (unsigned long *)kernel_stack_pointer(regs);
@@ -41,10 +41,16 @@ unsigned long profile_pc(struct pt_regs *regs)
                 * or above a saved flags. Eflags has bits 22-31 zero,
                 * kernel addresses don't.
                 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               return ktla_ktva(sp[0]);
+#else
                if (sp[0] >> 22)
                        return sp[0];
                if (sp[1] >> 22)
                        return sp[1];
+#endif
+
 #endif
        }
        return pc;
index 7fc5e843f247b358288b23e459eebfefcf6631f0..c6e445a43214188b9c7aafd0af368069e1837fa5 100644 (file)
@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
                return -EINVAL;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
+               return -EINVAL;
+#endif
+
        set_tls_desc(p, idx, &info, 1);
 
        return 0;
@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
 
        if (kbuf)
                info = kbuf;
-       else if (__copy_from_user(infobuf, ubuf, count))
+       else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
                return -EFAULT;
        else
                info = infobuf;
index 1c113db9ed573c3d8723fccc177431861df2bd19..287b42e465ca822c5365f1e802794c2f7ab6b790 100644 (file)
@@ -9,11 +9,11 @@
 #include <linux/atomic.h>
 
 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
                                (unsigned long) trace_idt_table };
 
 /* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
 
 static int trace_irq_vector_refcount;
 static DEFINE_MUTEX(irq_vector_mutex);
index 88900e288021f23a2f22aebf739e25070f456971..aa4149d6ba1198796107d882ca3345e78bd0e49a 100644 (file)
@@ -68,7 +68,7 @@
 #include <asm/proto.h>
 
 /* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
 #else
 #include <asm/processor-flags.h>
 #include <asm/setup.h>
@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
 #endif
 
 /* Must be page-aligned because the real IDT is used in a fixmap. */
-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
 
 DECLARE_BITMAP(used_vectors, NR_VECTORS);
 EXPORT_SYMBOL_GPL(used_vectors);
@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
 }
 
 static nokprobe_inline int
-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
                  struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_X86_32
-       if (regs->flags & X86_VM_MASK) {
+       if (v8086_mode(regs)) {
                /*
                 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
                 * On nmi (interrupt 2), do_trap should not be called.
@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
                return -1;
        }
 #endif
-       if (!user_mode(regs)) {
+       if (!user_mode_novm(regs)) {
                if (!fixup_exception(regs)) {
                        tsk->thread.error_code = error_code;
                        tsk->thread.trap_nr = trapnr;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+                       if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
+                               str = "PAX: suspicious stack segment fault";
+#endif
+
                        die(str, regs, error_code);
                }
+
+#ifdef CONFIG_PAX_REFCOUNT
+               if (trapnr == X86_TRAP_OF)
+                       pax_report_refcount_overflow(regs);
+#endif
+
                return 0;
        }
 
@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
 }
 
 static void
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
        long error_code, siginfo_t *info)
 {
        struct task_struct *tsk = current;
@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
        if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
            printk_ratelimit()) {
                pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
-                       tsk->comm, tsk->pid, str,
+                       tsk->comm, task_pid_nr(tsk), str,
                        regs->ip, regs->sp, error_code);
                print_vma_addr(" in ", regs->ip);
                pr_cont("\n");
@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
        tsk->thread.error_code = error_code;
        tsk->thread.trap_nr = X86_TRAP_DF;
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
+               die("grsec: kernel stack overflow detected", regs, error_code); 
+#endif
+
 #ifdef CONFIG_DOUBLEFAULT
        df_debug(regs, error_code);
 #endif
@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
        conditional_sti(regs);
 
 #ifdef CONFIG_X86_32
-       if (regs->flags & X86_VM_MASK) {
+       if (v8086_mode(regs)) {
                local_irq_enable();
                handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
                goto exit;
@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
 #endif
 
        tsk = current;
-       if (!user_mode(regs)) {
+       if (!user_mode_novm(regs)) {
                if (fixup_exception(regs))
                        goto exit;
 
                tsk->thread.error_code = error_code;
                tsk->thread.trap_nr = X86_TRAP_GP;
                if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
-                              X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
+                              X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
+                       die("PAX: suspicious general protection fault", regs, error_code);
+               else
+#endif
+
                        die("general protection fault", regs, error_code);
+               }
                goto exit;
        }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+       if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
+               struct mm_struct *mm = tsk->mm;
+               unsigned long limit;
+
+               down_write(&mm->mmap_sem);
+               limit = mm->context.user_cs_limit;
+               if (limit < TASK_SIZE) {
+                       track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
+                       up_write(&mm->mmap_sem);
+                       return;
+               }
+               up_write(&mm->mmap_sem);
+       }
+#endif
+
        tsk->thread.error_code = error_code;
        tsk->thread.trap_nr = X86_TRAP_GP;
 
@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
                container_of(task_pt_regs(current),
                             struct bad_iret_stack, regs);
 
+       if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
+               new_stack = s;
+
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
 
        /* Copy the remainder of the stack from the current stack. */
        memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
 
-       BUG_ON(!user_mode_vm(&new_stack->regs));
+       BUG_ON(!user_mode(&new_stack->regs));
        return new_stack;
 }
 NOKPROBE_SYMBOL(fixup_bad_iret);
@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        /* It's safe to allow irq's after DR6 has been saved */
        preempt_conditional_sti(regs);
 
-       if (regs->flags & X86_VM_MASK) {
+       if (v8086_mode(regs)) {
                handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
                                        X86_TRAP_DB);
                preempt_conditional_cli(regs);
@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
         * We already checked v86 mode above, so we can check for kernel mode
         * by just checking the CPL of CS.
         */
-       if ((dr6 & DR_STEP) && !user_mode(regs)) {
+       if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
                tsk->thread.debugreg6 &= ~DR_STEP;
                set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
                regs->flags &= ~X86_EFLAGS_TF;
@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
                return;
        conditional_sti(regs);
 
-       if (!user_mode_vm(regs))
+       if (!user_mode(regs))
        {
                if (!fixup_exception(regs)) {
                        task->thread.error_code = error_code;
index 505449700e0cf4e66ea6284135482ac172fe756a..139f8f83914ccb44b092bf6aaa4b186639dc2fe6 100644 (file)
@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
         */
        smp_wmb();
 
-       ACCESS_ONCE(c2n->head) = data;
+       ACCESS_ONCE_RW(c2n->head) = data;
 }
 
 /*
index 8b96a947021ffe0ad3d05397d3fc44b975a98141..792b410b22d3dfe3cc10051a796209345b7f1b6b 100644 (file)
@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
        int ret = NOTIFY_DONE;
 
        /* We are only interested in userspace traps */
-       if (regs && !user_mode_vm(regs))
+       if (regs && !user_mode(regs))
                return NOTIFY_DONE;
 
        switch (val) {
@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
 
        if (nleft != rasize) {
                pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
-                       "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
+                       "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
 
                force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
        }
index b9242bacbe59a2c02baee2e0d89fe8247b13106b..50c5eddeac2b57ecd2555b5961d8024c91be549d 100644 (file)
@@ -20,6 +20,7 @@
  *     arch/x86/boot/compressed/head_64.S: Boot cpu verification
  *     arch/x86/kernel/trampoline_64.S: secondary processor verification
  *     arch/x86/kernel/head_32.S: processor startup
+ *     arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
  *
  *     verify_cpu, returns the status of longmode and SSE in register %eax.
  *             0: Success    1: Failure
index e8edcf52e06911fe5446e543f40368ea69114225..27f934407ee9b85dbd47ea153e51520dee1dd5d0 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/ptrace.h>
 #include <linux/audit.h>
 #include <linux/stddef.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
                do_exit(SIGSEGV);
        }
 
-       tss = &per_cpu(init_tss, get_cpu());
+       tss = init_tss + get_cpu();
        current->thread.sp0 = current->thread.saved_sp0;
        current->thread.sysenter_cs = __KERNEL_CS;
        load_sp0(tss, &current->thread);
@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
 
        if (tsk->thread.saved_sp0)
                return -EPERM;
+
+#ifdef CONFIG_GRKERNSEC_VM86
+       if (!capable(CAP_SYS_RAWIO)) {
+               gr_handle_vm86();
+               return -EPERM;
+       }
+#endif
+
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, vm86plus) -
                                       sizeof(info.regs));
@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
        int tmp;
        struct vm86plus_struct __user *v86;
 
+#ifdef CONFIG_GRKERNSEC_VM86
+       if (!capable(CAP_SYS_RAWIO)) {
+               gr_handle_vm86();
+               return -EPERM;
+       }
+#endif
+
        tsk = current;
        switch (cmd) {
        case VM86_REQUEST_IRQ:
@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        tsk->thread.saved_fs = info->regs32->fs;
        tsk->thread.saved_gs = get_user_gs(info->regs32);
 
-       tss = &per_cpu(init_tss, get_cpu());
+       tss = init_tss + get_cpu();
        tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
        if (cpu_has_sep)
                tsk->thread.sysenter_cs = 0;
@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
                goto cannot_handle;
        if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
                goto cannot_handle;
-       intr_ptr = (unsigned long __user *) (i << 2);
+       intr_ptr = (__force unsigned long __user *) (i << 2);
        if (get_user(segoffs, intr_ptr))
                goto cannot_handle;
        if ((segoffs >> 16) == BIOSSEG)
index 00bf300fd8468db0e5bcd2fd9e32fc4f80e48adb..129df8ef42b319d5de95e44e2a2a213280b64a80 100644 (file)
 #include <asm/page_types.h>
 #include <asm/cache.h>
 #include <asm/boot.h>
+#include <asm/segment.h>
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define __KERNEL_TEXT_OFFSET   (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
+#else
+#define __KERNEL_TEXT_OFFSET   0
+#endif
 
 #undef i386     /* in case the preprocessor is a 32bit one */
 
@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
 
 PHDRS {
        text PT_LOAD FLAGS(5);          /* R_E */
+#ifdef CONFIG_X86_32
+       module PT_LOAD FLAGS(5);        /* R_E */
+#endif
+#ifdef CONFIG_XEN
+       rodata PT_LOAD FLAGS(5);        /* R_E */
+#else
+       rodata PT_LOAD FLAGS(4);        /* R__ */
+#endif
        data PT_LOAD FLAGS(6);          /* RW_ */
-#ifdef CONFIG_X86_64
+       init.begin PT_LOAD FLAGS(6);    /* RW_ */
 #ifdef CONFIG_SMP
        percpu PT_LOAD FLAGS(6);        /* RW_ */
 #endif
+       text.init PT_LOAD FLAGS(5);     /* R_E */
+       text.exit PT_LOAD FLAGS(5);     /* R_E */
        init PT_LOAD FLAGS(7);          /* RWE */
-#endif
        note PT_NOTE FLAGS(0);          /* ___ */
 }
 
 SECTIONS
 {
 #ifdef CONFIG_X86_32
-        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
-        phys_startup_32 = startup_32 - LOAD_OFFSET;
+       . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
 #else
-        . = __START_KERNEL;
-        phys_startup_64 = startup_64 - LOAD_OFFSET;
+       . = __START_KERNEL;
 #endif
 
        /* Text and read-only data */
-       .text :  AT(ADDR(.text) - LOAD_OFFSET) {
-               _text = .;
+       .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
                /* bootstrapping code */
+#ifdef CONFIG_X86_32
+               phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+#else
+               phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+#endif
+               __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+               _text = .;
                HEAD_TEXT
                . = ALIGN(8);
                _stext = .;
@@ -104,13 +124,47 @@ SECTIONS
                IRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
-               /* End of text section */
-               _etext = .;
        } :text = 0x9090
 
-       NOTES :text :note
+       . += __KERNEL_TEXT_OFFSET;
+
+#ifdef CONFIG_X86_32
+       . = ALIGN(PAGE_SIZE);
+       .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+               MODULES_EXEC_VADDR = .;
+               BYTE(0)
+               . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
+               . = ALIGN(HPAGE_SIZE) - 1;
+               MODULES_EXEC_END = .;
+#endif
+
+       } :module
+#endif
 
-       EXCEPTION_TABLE(16) :text = 0x9090
+       .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
+               /* End of text section */
+               BYTE(0)
+               _etext = . - __KERNEL_TEXT_OFFSET;
+       }
+
+#ifdef CONFIG_X86_32
+       . = ALIGN(PAGE_SIZE);
+       .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
+               . = ALIGN(PAGE_SIZE);
+               *(.empty_zero_page)
+               *(.initial_pg_fixmap)
+               *(.initial_pg_pmd)
+               *(.initial_page_table)
+               *(.swapper_pg_dir)
+       } :rodata
+#endif
+
+       . = ALIGN(PAGE_SIZE);
+       NOTES :rodata :note
+
+       EXCEPTION_TABLE(16) :rodata
 
 #if defined(CONFIG_DEBUG_RODATA)
        /* .text should occupy whole number of pages */
@@ -122,16 +176,20 @@ SECTIONS
 
        /* Data */
        .data : AT(ADDR(.data) - LOAD_OFFSET) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+               . = ALIGN(HPAGE_SIZE);
+#else
+               . = ALIGN(PAGE_SIZE);
+#endif
+
                /* Start of data section */
                _sdata = .;
 
                /* init_task */
                INIT_TASK_DATA(THREAD_SIZE)
 
-#ifdef CONFIG_X86_32
-               /* 32 bit has nosave before _edata */
                NOSAVE_DATA
-#endif
 
                PAGE_ALIGNED_DATA(PAGE_SIZE)
 
@@ -174,12 +232,19 @@ SECTIONS
        . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
 
        /* Init code and data - will be freed after init */
-       . = ALIGN(PAGE_SIZE);
        .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
+               BYTE(0)
+
+#ifdef CONFIG_PAX_KERNEXEC
+               . = ALIGN(HPAGE_SIZE);
+#else
+               . = ALIGN(PAGE_SIZE);
+#endif
+
                __init_begin = .; /* paired with __init_end */
-       }
+       } :init.begin
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        /*
         * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
         * output PHDR, so the next output section - .init.text - should
@@ -190,12 +255,27 @@ SECTIONS
               "per-CPU data too large - increase CONFIG_PHYSICAL_START")
 #endif
 
-       INIT_TEXT_SECTION(PAGE_SIZE)
-#ifdef CONFIG_X86_64
-       :init
-#endif
+       . = ALIGN(PAGE_SIZE);
+       init_begin = .;
+       .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
+               VMLINUX_SYMBOL(_sinittext) = .;
+               INIT_TEXT
+               . = ALIGN(PAGE_SIZE);
+       } :text.init
 
-       INIT_DATA_SECTION(16)
+       /*
+        * .exit.text is discard at runtime, not link time, to deal with
+        *  references from .altinstructions and .eh_frame
+        */
+       .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+               EXIT_TEXT
+               VMLINUX_SYMBOL(_einittext) = .;
+               . = ALIGN(16);
+       } :text.exit
+       . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
+
+       . = ALIGN(PAGE_SIZE);
+       INIT_DATA_SECTION(16) :init
 
        .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
                __x86_cpu_dev_start = .;
@@ -266,19 +346,12 @@ SECTIONS
        }
 
        . = ALIGN(8);
-       /*
-        * .exit.text is discard at runtime, not link time, to deal with
-        *  references from .altinstructions and .eh_frame
-        */
-       .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
-               EXIT_TEXT
-       }
 
        .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
                EXIT_DATA
        }
 
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
+#ifndef CONFIG_SMP
        PERCPU_SECTION(INTERNODE_CACHE_BYTES)
 #endif
 
@@ -297,16 +370,10 @@ SECTIONS
        .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
                __smp_locks = .;
                *(.smp_locks)
-               . = ALIGN(PAGE_SIZE);
                __smp_locks_end = .;
+               . = ALIGN(PAGE_SIZE);
        }
 
-#ifdef CONFIG_X86_64
-       .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
-               NOSAVE_DATA
-       }
-#endif
-
        /* BSS */
        . = ALIGN(PAGE_SIZE);
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
@@ -322,6 +389,7 @@ SECTIONS
                __brk_base = .;
                . += 64 * 1024;         /* 64k alignment slop space */
                *(.brk_reservation)     /* areas brk users have reserved */
+               . = ALIGN(HPAGE_SIZE);
                __brk_limit = .;
        }
 
@@ -348,13 +416,12 @@ SECTIONS
  * for the boot processor.
  */
 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
-INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(irq_stack_union);
 
 /*
  * Build-time check on the image size:
  */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
           "kernel image bigger than KERNEL_IMAGE_SIZE");
 
 #ifdef CONFIG_SMP
index 2dcc6ff6fdcc6371a65a83f2e9c5288c1726e513..082dc7af9ac70e963ba3cac3ae452bae460efb4d 100644 (file)
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
 
 static int __init vsyscall_setup(char *str)
 {
        if (str) {
                if (!strcmp("emulate", str))
                        vsyscall_mode = EMULATE;
-               else if (!strcmp("native", str))
-                       vsyscall_mode = NATIVE;
                else if (!strcmp("none", str))
                        vsyscall_mode = NONE;
                else
@@ -264,8 +262,7 @@ do_ret:
        return true;
 
 sigsegv:
-       force_sig(SIGSEGV, current);
-       return true;
+       do_group_exit(SIGKILL);
 }
 
 /*
@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
 static struct vm_area_struct gate_vma = {
        .vm_start       = VSYSCALL_ADDR,
        .vm_end         = VSYSCALL_ADDR + PAGE_SIZE,
-       .vm_page_prot   = PAGE_READONLY_EXEC,
-       .vm_flags       = VM_READ | VM_EXEC,
+       .vm_page_prot   = PAGE_READONLY,
+       .vm_flags       = VM_READ,
        .vm_ops         = &gate_vma_ops,
 };
 
@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
        unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
 
        if (vsyscall_mode != NONE)
-               __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
-                            vsyscall_mode == NATIVE
-                            ? PAGE_KERNEL_VSYSCALL
-                            : PAGE_KERNEL_VVAR);
+               __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
 
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
                     (unsigned long)VSYSCALL_ADDR);
index 040681928e9d971670eb1bab49b1c86e115dd55d..4d75aa6b86c5f17ed1bd756e94f5034a9b0c42b7 100644 (file)
@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
 EXPORT_SYMBOL(copy_user_generic_unrolled);
 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
 EXPORT_SYMBOL(__copy_user_nocache);
-EXPORT_SYMBOL(_copy_from_user);
-EXPORT_SYMBOL(_copy_to_user);
 
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
 EXPORT_SYMBOL(___preempt_schedule_context);
 #endif
 #endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+EXPORT_SYMBOL(cpu_pgd);
+#endif
index 234b0722de53522d6d20bd060478ea3e97f8cbb9..b7ab191c581d77a84b4cdded76b4c61dd4bd70ab 100644 (file)
@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
 static void default_nmi_init(void) { };
 static int default_i8042_detect(void) { return 1; };
 
-struct x86_platform_ops x86_platform = {
+struct x86_platform_ops x86_platform __read_only = {
        .calibrate_tsc                  = native_calibrate_tsc,
        .get_wallclock                  = mach_get_cmos_time,
        .set_wallclock                  = mach_set_rtc_mmss,
@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
 EXPORT_SYMBOL_GPL(x86_platform);
 
 #if defined(CONFIG_PCI_MSI)
-struct x86_msi_ops x86_msi = {
+struct x86_msi_ops x86_msi __read_only = {
        .setup_msi_irqs         = native_setup_msi_irqs,
        .compose_msi_msg        = native_compose_msi_msg,
        .teardown_msi_irq       = native_teardown_msi_irq,
@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
 }
 #endif
 
-struct x86_io_apic_ops x86_io_apic_ops = {
+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
        .init                   = native_io_apic_init_mappings,
        .read                   = native_io_apic_read,
        .write                  = native_io_apic_write,
index 0de1fae2bdf000b5ed6cfae68d76ca21d5c246b9..298d037aeeb92bf2f80e98c4146844f257ea5252 100644 (file)
@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
 
        /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
        sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
-       err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
+       err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
 
        if (!use_xsave())
                return err;
 
-       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
 
        /*
         * Read the xstate_bv which we copied (directly from the cpu or
         * from the state in task struct) to the user buffers.
         */
-       err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+       err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
 
        /*
         * For legacy compatible, we always set FP/SSE bits in the bit
@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
         */
        xstate_bv |= XSTATE_FPSSE;
 
-       err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+       err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
 
        return err;
 }
@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
 {
        int err;
 
+       buf = (struct xsave_struct __user *)____m(buf);
        if (use_xsave())
                err = xsave_user(buf);
        else if (use_fxsr())
@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
  */
 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
 {
+       buf = (void __user *)____m(buf);
        if (use_xsave()) {
                if ((unsigned long)buf % 64 || fx_only) {
                        u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
index 8a80737ee6e6ec14bc7d9a6ffe08d9f580d3c890..bac4961779db3fd0e5591718a3414706821fefef 100644 (file)
@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
                              struct kvm_cpuid2 *cpuid,
                              struct kvm_cpuid_entry2 __user *entries)
 {
-       int r;
+       int r, i;
 
        r = -E2BIG;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
                goto out;
        r = -EFAULT;
-       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+       if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
                goto out;
+       for (i = 0; i < cpuid->nent; ++i) {
+               struct kvm_cpuid_entry2 cpuid_entry;
+               if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
+                       goto out;
+               vcpu->arch.cpuid_entries[i] = cpuid_entry;
+       }
        vcpu->arch.cpuid_nent = cpuid->nent;
        kvm_apic_set_version(vcpu);
        kvm_x86_ops->cpuid_update(vcpu);
@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
                              struct kvm_cpuid2 *cpuid,
                              struct kvm_cpuid_entry2 __user *entries)
 {
-       int r;
+       int r, i;
 
        r = -E2BIG;
        if (cpuid->nent < vcpu->arch.cpuid_nent)
                goto out;
        r = -EFAULT;
-       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
-                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+       if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
                goto out;
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
+               if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
+                       goto out;
+       }
        return 0;
 
 out:
index b24c2d84dc20d7ab14af3aca6c4f3a588babf096..e1e4e2597a8b4b25dbf4c908138862a6056193cd 100644 (file)
@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
        int cr = ctxt->modrm_reg;
        u64 efer = 0;
 
-       static u64 cr_reserved_bits[] = {
+       static const u64 cr_reserved_bits[] = {
                0xffffffff00000000ULL,
                0, 0, 0, /* CR3 checked later */
                CR4_RESERVED_BITS,
index d52dcf0776ea930df81ded94ed22af0b9d11e48b..cec7e844844fe3bd55e3c99041e9f14ad4fc5003 100644 (file)
@@ -55,7 +55,7 @@
 #define APIC_BUS_CYCLE_NS 1
 
 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
 
 #define APIC_LVT_NUM                   6
 /* 14 is the version for Xeon and Pentium 8.4.8*/
index fd49c867b25a11927fc2f6ef4522e1ef9ee80c11..77e1aa015adcaf06b19aea213b5bff2870b6f5e8 100644 (file)
@@ -343,7 +343,7 @@ retry_walk:
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
 
-               ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
+               ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
                if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
                        goto error;
                walker->ptep_user[walker->level - 1] = ptep_user;
index 41dd0387cccb639b49fcb99c99035d3691f2464a..de331cf907b437ed7fa86b9498d60baca54424da 100644 (file)
@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
        int cpu = raw_smp_processor_id();
 
        struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+       pax_open_kernel();
        sd->tss_desc->type = 9; /* available 32/64-bit TSS */
+       pax_close_kernel();
+
        load_TR_desc();
 }
 
@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 #endif
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       __set_fs(current_thread_info()->addr_limit);
+#endif
+
        reload_tss(vcpu);
 
        local_irq_disable();
index d4c58d884838d1539cb52e13df3c6893459a98af..eaf2568ce194e4b202a097ea7a92b9a03c391a32 100644 (file)
@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
 #endif
 }
 
-static void vmcs_clear_bits(unsigned long field, u32 mask)
+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
 {
        vmcs_writel(field, vmcs_readl(field) & ~mask);
 }
 
-static void vmcs_set_bits(unsigned long field, u32 mask)
+static void vmcs_set_bits(unsigned long field, unsigned long mask)
 {
        vmcs_writel(field, vmcs_readl(field) | mask);
 }
@@ -1645,7 +1645,11 @@ static void reload_tss(void)
        struct desc_struct *descs;
 
        descs = (void *)gdt->address;
+
+       pax_open_kernel();
        descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+       pax_close_kernel();
+
        load_TR_desc();
 }
 
@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
                vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+#endif
+
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
                vmx->loaded_vmcs->cpu = cpu;
@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
  * reads and returns guest's timestamp counter "register"
  * guest_tsc = host_tsc + tsc_offset    -- 21.3
  */
-static u64 guest_read_tsc(void)
+static u64 __intentional_overflow(-1) guest_read_tsc(void)
 {
        u64 host_tsc, tsc_offset;
 
@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
        unsigned long cr4;
 
        vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
        vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+#endif
 
        /* Save the most likely value for this task's CR4 in the VMCS. */
        cr4 = read_cr4();
@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
        vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
        vmx->host_idt_base = dt.address;
 
-       vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
+       vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
 
        rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
        vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
                 * page upon invalidation.  No need to do anything if the
                 * processor does not have the APIC_ACCESS_ADDR VMCS field.
                 */
-               kvm_x86_ops->set_apic_access_page_addr = NULL;
+               pax_open_kernel();
+               *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
+               pax_close_kernel();
        }
 
-       if (!cpu_has_vmx_tpr_shadow())
-               kvm_x86_ops->update_cr8_intercept = NULL;
+       if (!cpu_has_vmx_tpr_shadow()) {
+               pax_open_kernel();
+               *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
+               pax_close_kernel();
+       }
 
        if (enable_ept && !cpu_has_vmx_ept_2m_page())
                kvm_disable_largepages();
@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_apicv())
                enable_apicv = 0;
 
+       pax_open_kernel();
        if (enable_apicv)
-               kvm_x86_ops->update_cr8_intercept = NULL;
+               *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
        else {
-               kvm_x86_ops->hwapic_irr_update = NULL;
-               kvm_x86_ops->deliver_posted_interrupt = NULL;
-               kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+               *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
+               *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
+               *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
        }
+       pax_close_kernel();
 
        if (nested)
                nested_vmx_setup_ctls_msrs();
@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                "jmp 2f \n\t"
                "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
                "2: "
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               "ljmp %[cs],$3f\n\t"
+               "3: "
+#endif
+
                /* Save guest registers, load host registers, keep flags */
                "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
                "pop %0 \n\t"
@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
                [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
                [wordsize]"i"(sizeof(ulong))
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               ,[cs]"i"(__KERNEL_CS)
+#endif
+
              : "cc", "memory"
 #ifdef CONFIG_X86_64
                , "rax", "rbx", "rdi", "rsi"
@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (debugctlmsr)
                update_debugctlmsr(debugctlmsr);
 
-#ifndef CONFIG_X86_64
+#ifdef CONFIG_X86_32
        /*
         * The sysexit path does not restore ds/es, so we must set them to
         * a reasonable value ourselves.
@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * may be executed in interrupt context, which saves and restore segments
         * around it, nullifying its effect.
         */
-       loadsegment(ds, __USER_DS);
-       loadsegment(es, __USER_DS);
+       loadsegment(ds, __KERNEL_DS);
+       loadsegment(es, __KERNEL_DS);
+       loadsegment(ss, __KERNEL_DS);
+
+#ifdef CONFIG_PAX_KERNEXEC
+       loadsegment(fs, __KERNEL_PERCPU);
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       __set_fs(current_thread_info()->addr_limit);
+#endif
+
 #endif
 
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
index c259814200bd340b54014a1869d914ec3674caed..9a0345b1836655873519a5d25dcff6fb5124dd22 100644 (file)
@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm *kvm = vcpu->kvm;
        int lm = is_long_mode(vcpu);
-       u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
-               : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+       u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+               : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
        u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
                : kvm->arch.xen_hvm_config.blob_size_32;
        u32 page_num = data & ~PAGE_MASK;
@@ -2810,6 +2810,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
                if (n < msr_list.nmsrs)
                        goto out;
                r = -EFAULT;
+               if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
+                       goto out;
                if (copy_to_user(user_msr_list->indices, &msrs_to_save,
                                 num_msrs_to_save * sizeof(u32)))
                        goto out;
@@ -5746,7 +5748,7 @@ static struct notifier_block pvclock_gtod_notifier = {
 };
 #endif
 
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
        int r;
        struct kvm_x86_ops *ops = opaque;
index c1c1544b84859e9675ad71ac85cab1568994b623..f90c9d571a61846b6c98c3cef89b5817bfe9eb1f 100644 (file)
@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
  * Rebooting also tells the Host we're finished, but the RESTART flag tells the
  * Launcher to reboot us.
  */
-static void lguest_restart(char *reason)
+static __noreturn void lguest_restart(char *reason)
 {
        hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
+       BUG();
 }
 
 /*G:050
index 00933d5e992f7a8df394ee347717365925c0f58c..3a64af97c7009d7c3cd5e7cf639b4de85b5645ab 100644 (file)
@@ -48,6 +48,10 @@ BEGIN(read)
        movl  (v), %eax
        movl 4(v), %edx
 RET_ENDP
+BEGIN(read_unchecked)
+       movl  (v), %eax
+       movl 4(v), %edx
+RET_ENDP
 #undef v
 
 #define v %esi
@@ -55,6 +59,10 @@ BEGIN(set)
        movl %ebx,  (v)
        movl %ecx, 4(v)
 RET_ENDP
+BEGIN(set_unchecked)
+       movl %ebx,  (v)
+       movl %ecx, 4(v)
+RET_ENDP
 #undef v
 
 #define v  %esi
@@ -70,6 +78,20 @@ RET_ENDP
 BEGIN(add)
        addl %eax,  (v)
        adcl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+       jno 0f
+       subl %eax,  (v)
+       sbbl %edx, 4(v)
+       int $4
+0:
+       _ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(add_unchecked)
+       addl %eax,  (v)
+       adcl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -77,6 +99,24 @@ RET_ENDP
 BEGIN(add_return)
        addl  (v), %eax
        adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
+       movl %eax,  (v)
+       movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(add_return_unchecked)
+       addl  (v), %eax
+       adcl 4(v), %edx
        movl %eax,  (v)
        movl %edx, 4(v)
 RET_ENDP
@@ -86,6 +126,20 @@ RET_ENDP
 BEGIN(sub)
        subl %eax,  (v)
        sbbl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+       jno 0f
+       addl %eax,  (v)
+       adcl %edx, 4(v)
+       int $4
+0:
+       _ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(sub_unchecked)
+       subl %eax,  (v)
+       sbbl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -96,6 +150,27 @@ BEGIN(sub_return)
        sbbl $0, %edx
        addl  (v), %eax
        adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
+       movl %eax,  (v)
+       movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(sub_return_unchecked)
+       negl %edx
+       negl %eax
+       sbbl $0, %edx
+       addl  (v), %eax
+       adcl 4(v), %edx
        movl %eax,  (v)
        movl %edx, 4(v)
 RET_ENDP
@@ -105,6 +180,20 @@ RET_ENDP
 BEGIN(inc)
        addl $1,  (v)
        adcl $0, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+       jno 0f
+       subl $1,  (v)
+       sbbl $0, 4(v)
+       int $4
+0:
+       _ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(inc_unchecked)
+       addl $1,  (v)
+       adcl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -114,6 +203,26 @@ BEGIN(inc_return)
        movl 4(v), %edx
        addl $1, %eax
        adcl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
+       movl %eax,  (v)
+       movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(inc_return_unchecked)
+       movl  (v), %eax
+       movl 4(v), %edx
+       addl $1, %eax
+       adcl $0, %edx
        movl %eax,  (v)
        movl %edx, 4(v)
 RET_ENDP
@@ -123,6 +232,20 @@ RET_ENDP
 BEGIN(dec)
        subl $1,  (v)
        sbbl $0, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+       jno 0f
+       addl $1,  (v)
+       adcl $0, 4(v)
+       int $4
+0:
+       _ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(dec_unchecked)
+       subl $1,  (v)
+       sbbl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -132,6 +255,26 @@ BEGIN(dec_return)
        movl 4(v), %edx
        subl $1, %eax
        sbbl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
+       movl %eax,  (v)
+       movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(dec_return_unchecked)
+       movl  (v), %eax
+       movl 4(v), %edx
+       subl $1, %eax
+       sbbl $0, %edx
        movl %eax,  (v)
        movl %edx, 4(v)
 RET_ENDP
@@ -143,6 +286,13 @@ BEGIN(add_unless)
        adcl %edx, %edi
        addl  (v), %eax
        adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
        cmpl %eax, %ecx
        je 3f
 1:
@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
 1:
        addl $1, %eax
        adcl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
        movl %eax,  (v)
        movl %edx, 4(v)
        movl $1, %eax
@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
        movl 4(v), %edx
        subl $1, %eax
        sbbl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 1f)
+#endif
+
        js 1f
        movl %eax,  (v)
        movl %edx, 4(v)
index f5cc9eb1d51bc02ef817bd664c7810f4f5b928b1..51fa319621fb0717647c26b51a05f3ff058930d4 100644 (file)
@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
        CFI_STARTPROC
 
        read64 %ecx
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_read_cx8)
 
+ENTRY(atomic64_read_unchecked_cx8)
+       CFI_STARTPROC
+
+       read64 %ecx
+       pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ENDPROC(atomic64_read_unchecked_cx8)
+
 ENTRY(atomic64_set_cx8)
        CFI_STARTPROC
 
@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
        cmpxchg8b (%esi)
        jne 1b
 
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_set_cx8)
 
+ENTRY(atomic64_set_unchecked_cx8)
+       CFI_STARTPROC
+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+       cmpxchg8b (%esi)
+       jne 1b
+
+       pax_force_retaddr
+       ret
+       CFI_ENDPROC
+ENDPROC(atomic64_set_unchecked_cx8)
+
 ENTRY(atomic64_xchg_cx8)
        CFI_STARTPROC
 
@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
        cmpxchg8b (%esi)
        jne 1b
 
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_xchg_cx8)
 
-.macro addsub_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro addsub_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
        CFI_STARTPROC
        SAVE ebp
        SAVE ebx
@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
        movl %edx, %ecx
        \ins\()l %esi, %ebx
        \insc\()l %edi, %ecx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+2:
+       _ASM_EXTABLE(2b, 3f)
+#endif
+.endif
+
        LOCK_PREFIX
        cmpxchg8b (%ebp)
        jne 1b
-
-10:
        movl %ebx, %eax
        movl %ecx, %edx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+3:
+#endif
+.endif
+
        RESTORE edi
        RESTORE esi
        RESTORE ebx
        RESTORE ebp
+       pax_force_retaddr
        ret
        CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
 .endm
 
 addsub_return add add adc
 addsub_return sub sub sbb
+addsub_return add add adc _unchecked
+addsub_return sub sub sbb _unchecked
 
-.macro incdec_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro incdec_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
        CFI_STARTPROC
        SAVE ebx
 
@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
        movl %edx, %ecx
        \ins\()l $1, %ebx
        \insc\()l $0, %ecx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+2:
+       _ASM_EXTABLE(2b, 3f)
+#endif
+.endif
+
        LOCK_PREFIX
        cmpxchg8b (%esi)
        jne 1b
 
-10:
        movl %ebx, %eax
        movl %ecx, %edx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+3:
+#endif
+.endif
+
        RESTORE ebx
+       pax_force_retaddr
        ret
        CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
 .endm
 
 incdec_return inc add adc
 incdec_return dec sub sbb
+incdec_return inc add adc _unchecked
+incdec_return dec sub sbb _unchecked
 
 ENTRY(atomic64_dec_if_positive_cx8)
        CFI_STARTPROC
@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
        movl %edx, %ecx
        subl $1, %ebx
        sbb $0, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 2f)
+#endif
+
        js 2f
        LOCK_PREFIX
        cmpxchg8b (%esi)
@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
        movl %ebx, %eax
        movl %ecx, %edx
        RESTORE ebx
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
        movl %edx, %ecx
        addl %ebp, %ebx
        adcl %edi, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 3f)
+#endif
+
        LOCK_PREFIX
        cmpxchg8b (%esi)
        jne 1b
@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
        CFI_ADJUST_CFA_OFFSET -8
        RESTORE ebx
        RESTORE ebp
+       pax_force_retaddr
        ret
 4:
        cmpl %edx, 4(%esp)
@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
        xorl %ecx, %ecx
        addl $1, %ebx
        adcl %edx, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+       into
+1234:
+       _ASM_EXTABLE(1234b, 3f)
+#endif
+
        LOCK_PREFIX
        cmpxchg8b (%esi)
        jne 1b
@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
        movl $1, %eax
 3:
        RESTORE ebx
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
index e78b8eee66155df85844e8cf6dae4507fa313bc6..7e173a8fd3e974ee3edd7fbc6b6e6876eecfb2f9 100644 (file)
@@ -29,7 +29,8 @@
 #include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
-                               
+#include <asm/segment.h>
+
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
 
 #define ARGBASE 16             
 #define FP             12
-               
-ENTRY(csum_partial_copy_generic)
+
+ENTRY(csum_partial_copy_generic_to_user)
        CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pushl_cfi %gs
+       popl_cfi %es
+       jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pushl_cfi %gs
+       popl_cfi %ds
+#endif
+
+ENTRY(csum_partial_copy_generic)
        subl  $4,%esp   
        CFI_ADJUST_CFA_OFFSET 4
        pushl_cfi %edi
@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
        jmp 4f
 SRC(1: movw (%esi), %bx        )
        addl $2, %esi
-DST(   movw %bx, (%edi)        )
+DST(   movw %bx, %es:(%edi)    )
        addl $2, %edi
        addw %bx, %ax   
        adcl $0, %eax
@@ -329,30 +345,30 @@ DST(      movw %bx, (%edi)        )
 SRC(1: movl (%esi), %ebx       )
 SRC(   movl 4(%esi), %edx      )
        adcl %ebx, %eax
-DST(   movl %ebx, (%edi)       )
+DST(   movl %ebx, %es:(%edi)   )
        adcl %edx, %eax
-DST(   movl %edx, 4(%edi)      )
+DST(   movl %edx, %es:4(%edi)  )
 
 SRC(   movl 8(%esi), %ebx      )
 SRC(   movl 12(%esi), %edx     )
        adcl %ebx, %eax
-DST(   movl %ebx, 8(%edi)      )
+DST(   movl %ebx, %es:8(%edi)  )
        adcl %edx, %eax
-DST(   movl %edx, 12(%edi)     )
+DST(   movl %edx, %es:12(%edi) )
 
 SRC(   movl 16(%esi), %ebx     )
 SRC(   movl 20(%esi), %edx     )
        adcl %ebx, %eax
-DST(   movl %ebx, 16(%edi)     )
+DST(   movl %ebx, %es:16(%edi) )
        adcl %edx, %eax
-DST(   movl %edx, 20(%edi)     )
+DST(   movl %edx, %es:20(%edi) )
 
 SRC(   movl 24(%esi), %ebx     )
 SRC(   movl 28(%esi), %edx     )
        adcl %ebx, %eax
-DST(   movl %ebx, 24(%edi)     )
+DST(   movl %ebx, %es:24(%edi) )
        adcl %edx, %eax
-DST(   movl %edx, 28(%edi)     )
+DST(   movl %edx, %es:28(%edi) )
 
        lea 32(%esi), %esi
        lea 32(%edi), %edi
@@ -366,7 +382,7 @@ DST(        movl %edx, 28(%edi)     )
        shrl $2, %edx                   # This clears CF
 SRC(3: movl (%esi), %ebx       )
        adcl %ebx, %eax
-DST(   movl %ebx, (%edi)       )
+DST(   movl %ebx, %es:(%edi)   )
        lea 4(%esi), %esi
        lea 4(%edi), %edi
        dec %edx
@@ -378,12 +394,12 @@ DST(      movl %ebx, (%edi)       )
        jb 5f
 SRC(   movw (%esi), %cx        )
        leal 2(%esi), %esi
-DST(   movw %cx, (%edi)        )
+DST(   movw %cx, %es:(%edi)    )
        leal 2(%edi), %edi
        je 6f
        shll $16,%ecx
 SRC(5: movb (%esi), %cl        )
-DST(   movb %cl, (%edi)        )
+DST(   movb %cl, %es:(%edi)    )
 6:     addl %ecx, %eax
        adcl $0, %eax
 7:
@@ -394,7 +410,7 @@ DST(        movb %cl, (%edi)        )
 
 6001:
        movl ARGBASE+20(%esp), %ebx     # src_err_ptr
-       movl $-EFAULT, (%ebx)
+       movl $-EFAULT, %ss:(%ebx)
 
        # zero the complete destination - computing the rest
        # is too much work 
@@ -407,11 +423,15 @@ DST(      movb %cl, (%edi)        )
 
 6002:
        movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
-       movl $-EFAULT,(%ebx)
+       movl $-EFAULT,%ss:(%ebx)
        jmp 5000b
 
 .previous
 
+       pushl_cfi %ss
+       popl_cfi %ds
+       pushl_cfi %ss
+       popl_cfi %es
        popl_cfi %ebx
        CFI_RESTORE ebx
        popl_cfi %esi
@@ -421,26 +441,43 @@ DST(      movb %cl, (%edi)        )
        popl_cfi %ecx                   # equivalent to addl $4,%esp
        ret     
        CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
 
 #else
 
 /* Version for PentiumII/PPro */
 
 #define ROUND1(x) \
+       nop; nop; nop;                          \
        SRC(movl x(%esi), %ebx  )       ;       \
        addl %ebx, %eax                 ;       \
-       DST(movl %ebx, x(%edi)  )       ; 
+       DST(movl %ebx, %es:x(%edi))     ;
 
 #define ROUND(x) \
+       nop; nop; nop;                          \
        SRC(movl x(%esi), %ebx  )       ;       \
        adcl %ebx, %eax                 ;       \
-       DST(movl %ebx, x(%edi)  )       ;
+       DST(movl %ebx, %es:x(%edi))     ;
 
 #define ARGBASE 12
-               
-ENTRY(csum_partial_copy_generic)
+
+ENTRY(csum_partial_copy_generic_to_user)
        CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pushl_cfi %gs
+       popl_cfi %es
+       jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pushl_cfi %gs
+       popl_cfi %ds
+#endif
+
+ENTRY(csum_partial_copy_generic)
        pushl_cfi %ebx
        CFI_REL_OFFSET ebx, 0
        pushl_cfi %edi
@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
        subl %ebx, %edi  
        lea  -1(%esi),%edx
        andl $-32,%edx
-       lea 3f(%ebx,%ebx), %ebx
+       lea 3f(%ebx,%ebx,2), %ebx
        testl %esi, %esi 
        jmp *%ebx
 1:     addl $64,%esi
@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
        jb 5f
 SRC(   movw (%esi), %dx         )
        leal 2(%esi), %esi
-DST(   movw %dx, (%edi)         )
+DST(   movw %dx, %es:(%edi)     )
        leal 2(%edi), %edi
        je 6f
        shll $16,%edx
 5:
 SRC(   movb (%esi), %dl         )
-DST(   movb %dl, (%edi)         )
+DST(   movb %dl, %es:(%edi)     )
 6:     addl %edx, %eax
        adcl $0, %eax
 7:
 .section .fixup, "ax"
 6001:  movl    ARGBASE+20(%esp), %ebx  # src_err_ptr   
-       movl $-EFAULT, (%ebx)
+       movl $-EFAULT, %ss:(%ebx)
        # zero the complete destination (computing the rest is too much work)
        movl ARGBASE+8(%esp),%edi       # dst
        movl ARGBASE+12(%esp),%ecx      # len
@@ -502,10 +539,17 @@ DST(      movb %dl, (%edi)         )
        rep; stosb
        jmp 7b
 6002:  movl ARGBASE+24(%esp), %ebx     # dst_err_ptr
-       movl $-EFAULT, (%ebx)
+       movl $-EFAULT, %ss:(%ebx)
        jmp  7b                 
 .previous                              
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       pushl_cfi %ss
+       popl_cfi %ds
+       pushl_cfi %ss
+       popl_cfi %es
+#endif
+
        popl_cfi %esi
        CFI_RESTORE esi
        popl_cfi %edi
@@ -514,7 +558,7 @@ DST(        movb %dl, (%edi)         )
        CFI_RESTORE ebx
        ret
        CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
                                
 #undef ROUND
 #undef ROUND1          
index f2145cfa12a66830e834718340c4cc88e64a731a..cea889d92491a3aa1e321ca2a9f9e22133d89f89 100644 (file)
@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
        movl $4096/8,%ecx
        xorl %eax,%eax
        rep stosq
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(clear_page_c)
@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
        movl $4096,%ecx
        xorl %eax,%eax
        rep stosb
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(clear_page_c_e)
@@ -43,6 +45,7 @@ ENTRY(clear_page)
        leaq    64(%rdi),%rdi
        jnz     .Lloop
        nop
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 .Lclear_page_end:
@@ -58,7 +61,7 @@ ENDPROC(clear_page)
 
 #include <asm/cpufeature.h>
 
-       .section .altinstr_replacement,"ax"
+       .section .altinstr_replacement,"a"
 1:     .byte 0xeb                                      /* jmp <disp8> */
        .byte (clear_page_c - clear_page) - (2f - 1b)   /* offset */
 2:     .byte 0xeb                                      /* jmp <disp8> */
index 40a172541ee2cb2c2342a0a9934948769e87337f..5d12ac41a216c22459c06aac34d5c3a93781b0e8 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/percpu.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -46,12 +47,14 @@ CFI_STARTPROC
        CFI_REMEMBER_STATE
        popfq_cfi
        mov $1, %al
+       pax_force_retaddr
        ret
 
        CFI_RESTORE_STATE
 .Lnot_same:
        popfq_cfi
        xor %al,%al
+       pax_force_retaddr
        ret
 
 CFI_ENDPROC
index 176cca67212b7072687f42450ef56018c03ebaaa..e0d658ed3c4bb7fd9a48f184647000afa05fa92a 100644 (file)
@@ -9,6 +9,7 @@ copy_page_rep:
        CFI_STARTPROC
        movl    $4096/8, %ecx
        rep     movsq
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(copy_page_rep)
@@ -24,8 +25,8 @@ ENTRY(copy_page)
        CFI_ADJUST_CFA_OFFSET 2*8
        movq    %rbx,   (%rsp)
        CFI_REL_OFFSET rbx, 0
-       movq    %r12,   1*8(%rsp)
-       CFI_REL_OFFSET r12, 1*8
+       movq    %r13,   1*8(%rsp)
+       CFI_REL_OFFSET r13, 1*8
 
        movl    $(4096/64)-5,   %ecx
        .p2align 4
@@ -38,7 +39,7 @@ ENTRY(copy_page)
        movq    0x8*4(%rsi), %r9
        movq    0x8*5(%rsi), %r10
        movq    0x8*6(%rsi), %r11
-       movq    0x8*7(%rsi), %r12
+       movq    0x8*7(%rsi), %r13
 
        prefetcht0 5*64(%rsi)
 
@@ -49,7 +50,7 @@ ENTRY(copy_page)
        movq    %r9,  0x8*4(%rdi)
        movq    %r10, 0x8*5(%rdi)
        movq    %r11, 0x8*6(%rdi)
-       movq    %r12, 0x8*7(%rdi)
+       movq    %r13, 0x8*7(%rdi)
 
        leaq    64 (%rsi), %rsi
        leaq    64 (%rdi), %rdi
@@ -68,7 +69,7 @@ ENTRY(copy_page)
        movq    0x8*4(%rsi), %r9
        movq    0x8*5(%rsi), %r10
        movq    0x8*6(%rsi), %r11
-       movq    0x8*7(%rsi), %r12
+       movq    0x8*7(%rsi), %r13
 
        movq    %rax, 0x8*0(%rdi)
        movq    %rbx, 0x8*1(%rdi)
@@ -77,7 +78,7 @@ ENTRY(copy_page)
        movq    %r9,  0x8*4(%rdi)
        movq    %r10, 0x8*5(%rdi)
        movq    %r11, 0x8*6(%rdi)
-       movq    %r12, 0x8*7(%rdi)
+       movq    %r13, 0x8*7(%rdi)
 
        leaq    64(%rdi), %rdi
        leaq    64(%rsi), %rsi
@@ -85,10 +86,11 @@ ENTRY(copy_page)
 
        movq    (%rsp), %rbx
        CFI_RESTORE rbx
-       movq    1*8(%rsp), %r12
-       CFI_RESTORE r12
+       movq    1*8(%rsp), %r13
+       CFI_RESTORE r13
        addq    $2*8, %rsp
        CFI_ADJUST_CFA_OFFSET -2*8
+       pax_force_retaddr
        ret
 .Lcopy_page_end:
        CFI_ENDPROC
@@ -99,7 +101,7 @@ ENDPROC(copy_page)
 
 #include <asm/cpufeature.h>
 
-       .section .altinstr_replacement,"ax"
+       .section .altinstr_replacement,"a"
 1:     .byte 0xeb                                      /* jmp <disp8> */
        .byte (copy_page_rep - copy_page) - (2f - 1b)   /* offset */
 2:
index dee945d555941a078f40049b24b8b4731aba6f2c..a84067b8b87e321877cac37a51e3987c85dff2c7 100644 (file)
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-
-/*
- * By placing feature2 after feature1 in altinstructions section, we logically
- * implement:
- * If CPU has feature2, jmp to alt2 is used
- * else if CPU has feature1, jmp to alt1 is used
- * else jmp to orig is used.
- */
-       .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
-0:
-       .byte 0xe9      /* 32bit jump */
-       .long \orig-1f  /* by default jump to orig */
-1:
-       .section .altinstr_replacement,"ax"
-2:     .byte 0xe9                      /* near jump with 32bit immediate */
-       .long \alt1-1b /* offset */   /* or alternatively to alt1 */
-3:     .byte 0xe9                      /* near jump with 32bit immediate */
-       .long \alt2-1b /* offset */   /* or alternatively to alt2 */
-       .previous
-
-       .section .altinstructions,"a"
-       altinstruction_entry 0b,2b,\feature1,5,5
-       altinstruction_entry 0b,3b,\feature2,5,5
-       .previous
-       .endm
+#include <asm/pgtable.h>
 
        .macro ALIGN_DESTINATION
 #ifdef FIX_ALIGNMENT
 #endif
        .endm
 
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-       CFI_STARTPROC
-       GET_THREAD_INFO(%rax)
-       movq %rdi,%rcx
-       addq %rdx,%rcx
-       jc bad_to_user
-       cmpq TI_addr_limit(%rax),%rcx
-       ja bad_to_user
-       ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
-               copy_user_generic_unrolled,copy_user_generic_string,    \
-               copy_user_enhanced_fast_string
-       CFI_ENDPROC
-ENDPROC(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-       CFI_STARTPROC
-       GET_THREAD_INFO(%rax)
-       movq %rsi,%rcx
-       addq %rdx,%rcx
-       jc bad_from_user
-       cmpq TI_addr_limit(%rax),%rcx
-       ja bad_from_user
-       ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
-               copy_user_generic_unrolled,copy_user_generic_string,    \
-               copy_user_enhanced_fast_string
-       CFI_ENDPROC
-ENDPROC(_copy_from_user)
-
-       .section .fixup,"ax"
-       /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-       CFI_STARTPROC
-       movl %edx,%ecx
-       xorl %eax,%eax
-       rep
-       stosb
-bad_to_user:
-       movl %edx,%eax
-       ret
-       CFI_ENDPROC
-ENDPROC(bad_from_user)
-       .previous
-
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
  */
 ENTRY(copy_user_generic_unrolled)
        CFI_STARTPROC
+       ASM_PAX_OPEN_USERLAND
        ASM_STAC
        cmpl $8,%edx
        jb 20f          /* less then 8 bytes, go to byte copy loop */
@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
        jnz 21b
 23:    xor %eax,%eax
        ASM_CLAC
+       ASM_PAX_CLOSE_USERLAND
+       pax_force_retaddr
        ret
 
        .section .fixup,"ax"
@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
  */
 ENTRY(copy_user_generic_string)
        CFI_STARTPROC
+       ASM_PAX_OPEN_USERLAND
        ASM_STAC
        cmpl $8,%edx
        jb 2f           /* less than 8 bytes, go to byte copy loop */
@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
        movsb
        xorl %eax,%eax
        ASM_CLAC
+       ASM_PAX_CLOSE_USERLAND
+       pax_force_retaddr
        ret
 
        .section .fixup,"ax"
@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
  */
 ENTRY(copy_user_enhanced_fast_string)
        CFI_STARTPROC
+       ASM_PAX_OPEN_USERLAND
        ASM_STAC
        movl %edx,%ecx
 1:     rep
        movsb
        xorl %eax,%eax
        ASM_CLAC
+       ASM_PAX_CLOSE_USERLAND
+       pax_force_retaddr
        ret
 
        .section .fixup,"ax"
index 6a4f43c2d9e6d6eea725f5141eb98d9b12935b48..c70fb528860b31f4e6e37eb4fdb811eecb72e2b4 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 #define FIX_ALIGNMENT 1
 
@@ -16,6 +17,7 @@
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/pgtable.h>
 
        .macro ALIGN_DESTINATION
 #ifdef FIX_ALIGNMENT
  */
 ENTRY(__copy_user_nocache)
        CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       mov pax_user_shadow_base,%rcx
+       cmp %rcx,%rsi
+       jae 1f
+       add %rcx,%rsi
+1:
+#endif
+
+       ASM_PAX_OPEN_USERLAND
        ASM_STAC
        cmpl $8,%edx
        jb 20f          /* less then 8 bytes, go to byte copy loop */
@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
        jnz 21b
 23:    xorl %eax,%eax
        ASM_CLAC
+       ASM_PAX_CLOSE_USERLAND
        sfence
+       pax_force_retaddr
        ret
 
        .section .fixup,"ax"
index 2419d5fefae30ac2453fabf71c633ede013d2e4a..fe52d0e50f2efbc3e542459e3477ac52fc6a3aed 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
+#include <asm/alternative-asm.h>
 
 /*
  * Checksum copy with exception handling.
@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
        CFI_ADJUST_CFA_OFFSET 7*8
        movq  %rbx, 2*8(%rsp)
        CFI_REL_OFFSET rbx, 2*8
-       movq  %r12, 3*8(%rsp)
-       CFI_REL_OFFSET r12, 3*8
+       movq  %r15, 3*8(%rsp)
+       CFI_REL_OFFSET r15, 3*8
        movq  %r14, 4*8(%rsp)
        CFI_REL_OFFSET r14, 4*8
        movq  %r13, 5*8(%rsp)
@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
        movl  %edx, %ecx
 
        xorl  %r9d, %r9d
-       movq  %rcx, %r12
+       movq  %rcx, %r15
 
-       shrq  $6, %r12
+       shrq  $6, %r15
        jz      .Lhandle_tail       /* < 64 */
 
        clc
 
        /* main loop. clear in 64 byte blocks */
        /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
-       /* r11: temp3, rdx: temp4, r12 loopcnt */
+       /* r11: temp3, rdx: temp4, r15 loopcnt */
        /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
        .p2align 4
 .Lloop:
@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
        adcq  %r14, %rax
        adcq  %r13, %rax
 
-       decl %r12d
+       decl %r15d
 
        dest
        movq %rbx, (%rsi)
@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
 .Lende:
        movq 2*8(%rsp), %rbx
        CFI_RESTORE rbx
-       movq 3*8(%rsp), %r12
-       CFI_RESTORE r12
+       movq 3*8(%rsp), %r15
+       CFI_RESTORE r15
        movq 4*8(%rsp), %r14
        CFI_RESTORE r14
        movq 5*8(%rsp), %r13
@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
        CFI_RESTORE rbp
        addq $7*8, %rsp
        CFI_ADJUST_CFA_OFFSET -7*8
+       pax_force_retaddr
        ret
        CFI_RESTORE_STATE
 
index 1318f75d56e4f072885276a18ca00363f074fcd4..44c30fd26611bb2aeb0dab129d76d57d32358bc1 100644 (file)
@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
                        len -= 2;
                }
        }
+       pax_open_userland();
        stac();
-       isum = csum_partial_copy_generic((__force const void *)src,
+       isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
                                dst, len, isum, errp, NULL);
        clac();
+       pax_close_userland();
        if (unlikely(*errp))
                goto out_err;
 
@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
        }
 
        *errp = 0;
+       pax_open_userland();
        stac();
-       ret = csum_partial_copy_generic(src, (void __force *)dst,
+       ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
                                        len, isum, NULL, errp);
        clac();
+       pax_close_userland();
        return ret;
 }
 EXPORT_SYMBOL(csum_partial_copy_to_user);
index a4512359656aea8fcda1d2c19d45f3bb4936bee3..1daa95616dfc6958f5294aaf94c333adad029792 100644 (file)
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg gs;
+#else
+#define __copyuser_seg
+#endif
 
        .text
 ENTRY(__get_user_1)
        CFI_STARTPROC
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
-1:     movzbl (%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_DX
+       cmp %_ASM_DX,%_ASM_AX
+       jae 1234f
+       add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+1:     __copyuser_seg movzbl (%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(__get_user_1)
@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
 ENTRY(__get_user_2)
        CFI_STARTPROC
        add $1,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
        jc bad_get_user
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
-2:     movzwl -1(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_DX
+       cmp %_ASM_DX,%_ASM_AX
+       jae 1234f
+       add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+2:     __copyuser_seg movzwl -1(%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(__get_user_2)
@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
 ENTRY(__get_user_4)
        CFI_STARTPROC
        add $3,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
        jc bad_get_user
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
-3:     movl -3(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_DX
+       cmp %_ASM_DX,%_ASM_AX
+       jae 1234f
+       add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+3:     __copyuser_seg movl -3(%_ASM_AX),%edx
        xor %eax,%eax
        ASM_CLAC
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(__get_user_4)
@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+       mov pax_user_shadow_base,%_ASM_DX
+       cmp %_ASM_DX,%_ASM_AX
+       jae 1234f
+       add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
        ASM_STAC
 4:     movq -7(%_ASM_AX),%rdx
        xor %eax,%eax
        ASM_CLAC
+       pax_force_retaddr
        ret
 #else
        add $7,%_ASM_AX
@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user_8
        ASM_STAC
-4:     movl -7(%_ASM_AX),%edx
-5:     movl -3(%_ASM_AX),%ecx
+4:     __copyuser_seg movl -7(%_ASM_AX),%edx
+5:     __copyuser_seg movl -3(%_ASM_AX),%ecx
        xor %eax,%eax
        ASM_CLAC
+       pax_force_retaddr
        ret
 #endif
        CFI_ENDPROC
@@ -113,6 +175,7 @@ bad_get_user:
        xor %edx,%edx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 END(bad_get_user)
@@ -124,6 +187,7 @@ bad_get_user_8:
        xor %ecx,%ecx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 END(bad_get_user_8)
index 1313ae6b478b6c439741ee032a8c33b86868ee2c..84f25ead47401867c038157c9ce2162f3df0d4f2 100644 (file)
 
 #ifdef __KERNEL__
 #include <linux/string.h>
+#include <asm/pgtable_types.h>
 #else
 #include <string.h>
+#define ktla_ktva(addr) addr
 #endif
 #include <asm/inat.h>
 #include <asm/insn.h>
@@ -53,9 +55,9 @@
 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
 {
        memset(insn, 0, sizeof(*insn));
-       insn->kaddr = kaddr;
-       insn->end_kaddr = kaddr + buf_len;
-       insn->next_byte = kaddr;
+       insn->kaddr = ktla_ktva(kaddr);
+       insn->end_kaddr = insn->kaddr + buf_len;
+       insn->next_byte = insn->kaddr;
        insn->x86_64 = x86_64 ? 1 : 0;
        insn->opnd_bytes = 4;
        if (x86_64)
index 05a95e713da885e8686bc2bba1fdf544c03d2d74..326f2fa956ed160b6b07988ff8faadc42dfe305c 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 /*
  * override generic version in lib/iomap_copy.c
@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
        CFI_STARTPROC
        movl %edx,%ecx
        rep movsd
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(__iowrite32_copy)
index 56313a3261888d0e7eb866788c24f806acf5d47a..0db417ec2e0b24aa7cdeff17f28b5a78d861fd3e 100644 (file)
@@ -24,7 +24,7 @@
  * This gets patched over the unrolled variant (below) via the
  * alternative instructions framework:
  */
-       .section .altinstr_replacement, "ax", @progbits
+       .section .altinstr_replacement, "a", @progbits
 .Lmemcpy_c:
        movq %rdi, %rax
        movq %rdx, %rcx
@@ -33,6 +33,7 @@
        rep movsq
        movl %edx, %ecx
        rep movsb
+       pax_force_retaddr
        ret
 .Lmemcpy_e:
        .previous
  * This gets patched over the unrolled variant (below) via the
  * alternative instructions framework:
  */
-       .section .altinstr_replacement, "ax", @progbits
+       .section .altinstr_replacement, "a", @progbits
 .Lmemcpy_c_e:
        movq %rdi, %rax
        movq %rdx, %rcx
        rep movsb
+       pax_force_retaddr
        ret
 .Lmemcpy_e_e:
        .previous
@@ -136,6 +138,7 @@ ENTRY(memcpy)
        movq %r9,       1*8(%rdi)
        movq %r10,      -2*8(%rdi, %rdx)
        movq %r11,      -1*8(%rdi, %rdx)
+       pax_force_retaddr
        retq
        .p2align 4
 .Lless_16bytes:
@@ -148,6 +151,7 @@ ENTRY(memcpy)
        movq -1*8(%rsi, %rdx),  %r9
        movq %r8,       0*8(%rdi)
        movq %r9,       -1*8(%rdi, %rdx)
+       pax_force_retaddr
        retq
        .p2align 4
 .Lless_8bytes:
@@ -161,6 +165,7 @@ ENTRY(memcpy)
        movl -4(%rsi, %rdx), %r8d
        movl %ecx, (%rdi)
        movl %r8d, -4(%rdi, %rdx)
+       pax_force_retaddr
        retq
        .p2align 4
 .Lless_3bytes:
@@ -179,6 +184,7 @@ ENTRY(memcpy)
        movb %cl, (%rdi)
 
 .Lend:
+       pax_force_retaddr
        retq
        CFI_ENDPROC
 ENDPROC(memcpy)
index 65268a6104f45e09d5e45f75cb63edf6527164a0..dd1de11d7d7d3b2d5e85de68b1c3e37a61d65afc 100644 (file)
@@ -202,14 +202,16 @@ ENTRY(memmove)
        movb (%rsi), %r11b
        movb %r11b, (%rdi)
 13:
+       pax_force_retaddr
        retq
        CFI_ENDPROC
 
-       .section .altinstr_replacement,"ax"
+       .section .altinstr_replacement,"a"
 .Lmemmove_begin_forward_efs:
        /* Forward moving data. */
        movq %rdx, %rcx
        rep movsb
+       pax_force_retaddr
        retq
 .Lmemmove_end_forward_efs:
        .previous
index 2dcb3808cbdab6c91b9fbbf0d58790466780bb55..2eb79fe145e1a611f6abc4cb0801fb3ce4322912 100644 (file)
@@ -16,7 +16,7 @@
  * 
  * rax   original destination
  */    
-       .section .altinstr_replacement, "ax", @progbits
+       .section .altinstr_replacement, "a", @progbits
 .Lmemset_c:
        movq %rdi,%r9
        movq %rdx,%rcx
@@ -30,6 +30,7 @@
        movl %edx,%ecx
        rep stosb
        movq %r9,%rax
+       pax_force_retaddr
        ret
 .Lmemset_e:
        .previous
  *
  * rax   original destination
  */
-       .section .altinstr_replacement, "ax", @progbits
+       .section .altinstr_replacement, "a", @progbits
 .Lmemset_c_e:
        movq %rdi,%r9
        movb %sil,%al
        movq %rdx,%rcx
        rep stosb
        movq %r9,%rax
+       pax_force_retaddr
        ret
 .Lmemset_e_e:
        .previous
@@ -118,6 +120,7 @@ ENTRY(__memset)
 
 .Lende:
        movq    %r10,%rax
+       pax_force_retaddr
        ret
 
        CFI_RESTORE_STATE
index c9f2d9ba8dd8c2da54b0bbd07c37be1d38aa49c4..e7fd2c0616b4032d359108d8b5559d5463a5447d 100644 (file)
@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
        void *p;
        int i;
+       unsigned long cr0;
 
        if (unlikely(in_interrupt()))
                return __memcpy(to, from, len);
@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
        kernel_fpu_begin();
 
        __asm__ __volatile__ (
-               "1: prefetch (%0)\n"            /* This set is 28 bytes */
-               "   prefetch 64(%0)\n"
-               "   prefetch 128(%0)\n"
-               "   prefetch 192(%0)\n"
-               "   prefetch 256(%0)\n"
+               "1: prefetch (%1)\n"            /* This set is 28 bytes */
+               "   prefetch 64(%1)\n"
+               "   prefetch 128(%1)\n"
+               "   prefetch 192(%1)\n"
+               "   prefetch 256(%1)\n"
                "2:  \n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+               "3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
                        _ASM_EXTABLE(1b, 3b)
-                       : : "r" (from));
+                       : "=&r" (cr0) : "r" (from) : "ax");
 
        for ( ; i > 5; i--) {
                __asm__ __volatile__ (
-               "1:  prefetch 320(%0)\n"
-               "2:  movq (%0), %%mm0\n"
-               "  movq 8(%0), %%mm1\n"
-               "  movq 16(%0), %%mm2\n"
-               "  movq 24(%0), %%mm3\n"
-               "  movq %%mm0, (%1)\n"
-               "  movq %%mm1, 8(%1)\n"
-               "  movq %%mm2, 16(%1)\n"
-               "  movq %%mm3, 24(%1)\n"
-               "  movq 32(%0), %%mm0\n"
-               "  movq 40(%0), %%mm1\n"
-               "  movq 48(%0), %%mm2\n"
-               "  movq 56(%0), %%mm3\n"
-               "  movq %%mm0, 32(%1)\n"
-               "  movq %%mm1, 40(%1)\n"
-               "  movq %%mm2, 48(%1)\n"
-               "  movq %%mm3, 56(%1)\n"
+               "1:  prefetch 320(%1)\n"
+               "2:  movq (%1), %%mm0\n"
+               "  movq 8(%1), %%mm1\n"
+               "  movq 16(%1), %%mm2\n"
+               "  movq 24(%1), %%mm3\n"
+               "  movq %%mm0, (%2)\n"
+               "  movq %%mm1, 8(%2)\n"
+               "  movq %%mm2, 16(%2)\n"
+               "  movq %%mm3, 24(%2)\n"
+               "  movq 32(%1), %%mm0\n"
+               "  movq 40(%1), %%mm1\n"
+               "  movq 48(%1), %%mm2\n"
+               "  movq 56(%1), %%mm3\n"
+               "  movq %%mm0, 32(%2)\n"
+               "  movq %%mm1, 40(%2)\n"
+               "  movq %%mm2, 48(%2)\n"
+               "  movq %%mm3, 56(%2)\n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+               "3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
                        _ASM_EXTABLE(1b, 3b)
-                       : : "r" (from), "r" (to) : "memory");
+                       : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
                from += 64;
                to += 64;
@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
 static void fast_copy_page(void *to, void *from)
 {
        int i;
+       unsigned long cr0;
 
        kernel_fpu_begin();
 
@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
         * but that is for later. -AV
         */
        __asm__ __volatile__(
-               "1: prefetch (%0)\n"
-               "   prefetch 64(%0)\n"
-               "   prefetch 128(%0)\n"
-               "   prefetch 192(%0)\n"
-               "   prefetch 256(%0)\n"
+               "1: prefetch (%1)\n"
+               "   prefetch 64(%1)\n"
+               "   prefetch 128(%1)\n"
+               "   prefetch 192(%1)\n"
+               "   prefetch 256(%1)\n"
                "2:  \n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+               "3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
-                       _ASM_EXTABLE(1b, 3b) : : "r" (from));
+                       _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
 
        for (i = 0; i < (4096-320)/64; i++) {
                __asm__ __volatile__ (
-               "1: prefetch 320(%0)\n"
-               "2: movq (%0), %%mm0\n"
-               "   movntq %%mm0, (%1)\n"
-               "   movq 8(%0), %%mm1\n"
-               "   movntq %%mm1, 8(%1)\n"
-               "   movq 16(%0), %%mm2\n"
-               "   movntq %%mm2, 16(%1)\n"
-               "   movq 24(%0), %%mm3\n"
-               "   movntq %%mm3, 24(%1)\n"
-               "   movq 32(%0), %%mm4\n"
-               "   movntq %%mm4, 32(%1)\n"
-               "   movq 40(%0), %%mm5\n"
-               "   movntq %%mm5, 40(%1)\n"
-               "   movq 48(%0), %%mm6\n"
-               "   movntq %%mm6, 48(%1)\n"
-               "   movq 56(%0), %%mm7\n"
-               "   movntq %%mm7, 56(%1)\n"
+               "1: prefetch 320(%1)\n"
+               "2: movq (%1), %%mm0\n"
+               "   movntq %%mm0, (%2)\n"
+               "   movq 8(%1), %%mm1\n"
+               "   movntq %%mm1, 8(%2)\n"
+               "   movq 16(%1), %%mm2\n"
+               "   movntq %%mm2, 16(%2)\n"
+               "   movq 24(%1), %%mm3\n"
+               "   movntq %%mm3, 24(%2)\n"
+               "   movq 32(%1), %%mm4\n"
+               "   movntq %%mm4, 32(%2)\n"
+               "   movq 40(%1), %%mm5\n"
+               "   movntq %%mm5, 40(%2)\n"
+               "   movq 48(%1), %%mm6\n"
+               "   movntq %%mm6, 48(%2)\n"
+               "   movq 56(%1), %%mm7\n"
+               "   movntq %%mm7, 56(%2)\n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+               "3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
-               _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
+               _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
                from += 64;
                to += 64;
@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
 static void fast_copy_page(void *to, void *from)
 {
        int i;
+       unsigned long cr0;
 
        kernel_fpu_begin();
 
        __asm__ __volatile__ (
-               "1: prefetch (%0)\n"
-               "   prefetch 64(%0)\n"
-               "   prefetch 128(%0)\n"
-               "   prefetch 192(%0)\n"
-               "   prefetch 256(%0)\n"
+               "1: prefetch (%1)\n"
+               "   prefetch 64(%1)\n"
+               "   prefetch 128(%1)\n"
+               "   prefetch 192(%1)\n"
+               "   prefetch 256(%1)\n"
                "2:  \n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+               "3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
-                       _ASM_EXTABLE(1b, 3b) : : "r" (from));
+                       _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
 
        for (i = 0; i < 4096/64; i++) {
                __asm__ __volatile__ (
-               "1: prefetch 320(%0)\n"
-               "2: movq (%0), %%mm0\n"
-               "   movq 8(%0), %%mm1\n"
-               "   movq 16(%0), %%mm2\n"
-               "   movq 24(%0), %%mm3\n"
-               "   movq %%mm0, (%1)\n"
-               "   movq %%mm1, 8(%1)\n"
-               "   movq %%mm2, 16(%1)\n"
-               "   movq %%mm3, 24(%1)\n"
-               "   movq 32(%0), %%mm0\n"
-               "   movq 40(%0), %%mm1\n"
-               "   movq 48(%0), %%mm2\n"
-               "   movq 56(%0), %%mm3\n"
-               "   movq %%mm0, 32(%1)\n"
-               "   movq %%mm1, 40(%1)\n"
-               "   movq %%mm2, 48(%1)\n"
-               "   movq %%mm3, 56(%1)\n"
+               "1: prefetch 320(%1)\n"
+               "2: movq (%1), %%mm0\n"
+               "   movq 8(%1), %%mm1\n"
+               "   movq 16(%1), %%mm2\n"
+               "   movq 24(%1), %%mm3\n"
+               "   movq %%mm0, (%2)\n"
+               "   movq %%mm1, 8(%2)\n"
+               "   movq %%mm2, 16(%2)\n"
+               "   movq %%mm3, 24(%2)\n"
+               "   movq 32(%1), %%mm0\n"
+               "   movq 40(%1), %%mm1\n"
+               "   movq 48(%1), %%mm2\n"
+               "   movq 56(%1), %%mm3\n"
+               "   movq %%mm0, 32(%2)\n"
+               "   movq %%mm1, 40(%2)\n"
+               "   movq %%mm2, 48(%2)\n"
+               "   movq %%mm3, 56(%2)\n"
                ".section .fixup, \"ax\"\n"
-               "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+               "3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %%cr0, %0\n"
+               "   movl %0, %%eax\n"
+               "   andl $0xFFFEFFFF, %%eax\n"
+               "   movl %%eax, %%cr0\n"
+#endif
+
+               "   movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+               "   movl %0, %%cr0\n"
+#endif
+
                "   jmp 2b\n"
                ".previous\n"
                        _ASM_EXTABLE(1b, 3b)
-                       : : "r" (from), "r" (to) : "memory");
+                       : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
                from += 64;
                to += 64;
index f6d13eefad1063d5e525dbc0cfff1ccc12b34bc9..d78944054cfff46f5630a51a2f5763dcf73dd8b9 100644 (file)
@@ -3,6 +3,7 @@
 #include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/msr.h>
+#include <asm/alternative-asm.h>
 
 #ifdef CONFIG_X86_64
 /*
@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
        movl    %edi, 28(%r10)
        popq_cfi %rbp
        popq_cfi %rbx
+       pax_force_retaddr
        ret
 3:
        CFI_RESTORE_STATE
index fc6ba17a7eec2a957fc2022d8a370bb1b9e5b15f..d4d989dd8822d34f3f33d9eca783b2d1b45bc960 100644 (file)
@@ -16,7 +16,9 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
 
 /*
  * __put_user_X
  * as they get called from within inline assembly.
  */
 
-#define ENTER  CFI_STARTPROC ; \
-               GET_THREAD_INFO(%_ASM_BX)
-#define EXIT   ASM_CLAC ;      \
-               ret ;           \
+#define ENTER  CFI_STARTPROC
+#define EXIT   ASM_CLAC ;              \
+               pax_force_retaddr ;     \
+               ret ;                   \
                CFI_ENDPROC
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define _DEST %_ASM_CX,%_ASM_BX
+#else
+#define _DEST %_ASM_CX
+#endif
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg gs;
+#else
+#define __copyuser_seg
+#endif
+
 .text
 ENTRY(__put_user_1)
        ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       GET_THREAD_INFO(%_ASM_BX)
        cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
        jae bad_put_user
        ASM_STAC
-1:     movb %al,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jb 1234f
+       xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+1:     __copyuser_seg movb %al,(_DEST)
        xor %eax,%eax
        EXIT
 ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
        ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       GET_THREAD_INFO(%_ASM_BX)
        mov TI_addr_limit(%_ASM_BX),%_ASM_BX
        sub $1,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
        ASM_STAC
-2:     movw %ax,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jb 1234f
+       xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+2:     __copyuser_seg movw %ax,(_DEST)
        xor %eax,%eax
        EXIT
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
        ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       GET_THREAD_INFO(%_ASM_BX)
        mov TI_addr_limit(%_ASM_BX),%_ASM_BX
        sub $3,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
        ASM_STAC
-3:     movl %eax,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jb 1234f
+       xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+3:     __copyuser_seg movl %eax,(_DEST)
        xor %eax,%eax
        EXIT
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
        ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+       GET_THREAD_INFO(%_ASM_BX)
        mov TI_addr_limit(%_ASM_BX),%_ASM_BX
        sub $7,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
        ASM_STAC
-4:     mov %_ASM_AX,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       mov pax_user_shadow_base,%_ASM_BX
+       cmp %_ASM_BX,%_ASM_CX
+       jb 1234f
+       xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+4:     __copyuser_seg mov %_ASM_AX,(_DEST)
 #ifdef CONFIG_X86_32
-5:     movl %edx,4(%_ASM_CX)
+5:     __copyuser_seg movl %edx,4(_DEST)
 #endif
        xor %eax,%eax
        EXIT
index 5dff5f042468acfedf6e4058643855d59c75315d..cadebf48f0efff2baf54cbca67795f31f0a92474 100644 (file)
@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
        __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
        CFI_RESTORE __ASM_REG(dx)
        restore_common_regs
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(call_rwsem_down_read_failed)
@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
        movq %rax,%rdi
        call rwsem_down_write_failed
        restore_common_regs
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(call_rwsem_down_write_failed)
@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
        movq %rax,%rdi
        call rwsem_wake
        restore_common_regs
-1:     ret
+1:     pax_force_retaddr
+       ret
        CFI_ENDPROC
 ENDPROC(call_rwsem_wake)
 
@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
        __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
        CFI_RESTORE __ASM_REG(dx)
        restore_common_regs
+       pax_force_retaddr
        ret
        CFI_ENDPROC
 ENDPROC(call_rwsem_downgrade_wake)
index b30b5ebd614ada18d25b3fb7403f7a32e32a96d9..2b5705221305a4043cc60154c99f490a22e3ff41 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/dwarf2.h>
 #include <asm/calling.h>
 #include <asm/asm.h>
+#include <asm/alternative-asm.h>
 
        /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
        .macro THUNK name, func, put_ret_addr_in_rdi=0
 \name:
        CFI_STARTPROC
 
-       /* this one pushes 9 elems, the next one would be %rIP */
-       SAVE_ARGS
+       /* this one pushes 15+1 elems, the next one would be %rIP */
+       SAVE_ARGS 8
 
        .if \put_ret_addr_in_rdi
-       movq_cfi_restore 9*8, rdi
+       movq_cfi_restore RIP, rdi
        .endif
 
        call \func
 
        /* SAVE_ARGS below is used only for the .cfi directives it contains. */
        CFI_STARTPROC
-       SAVE_ARGS
+       SAVE_ARGS 8
 restore:
-       RESTORE_ARGS
+       RESTORE_ARGS 1,8
+       pax_force_retaddr
        ret
        CFI_ENDPROC
        _ASM_NOKPROBE(restore)
index e2f5e21c03b3044a14ed12cb460ea2e3c0a0e13f..4b22130919b9ef91d05c9064a8c399c3c4257f54 100644 (file)
@@ -42,11 +42,13 @@ do {                                                                        \
        int __d0;                                                       \
        might_fault();                                                  \
        __asm__ __volatile__(                                           \
+               __COPYUSER_SET_ES                                       \
                ASM_STAC "\n"                                           \
                "0:     rep; stosl\n"                                   \
                "       movl %2,%0\n"                                   \
                "1:     rep; stosb\n"                                   \
                "2: " ASM_CLAC "\n"                                     \
+               __COPYUSER_RESTORE_ES                                   \
                ".section .fixup,\"ax\"\n"                              \
                "3:     lea 0(%2,%0,4),%0\n"                            \
                "       jmp 2b\n"                                       \
@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
 
 #ifdef CONFIG_X86_INTEL_USERCOPY
 static unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
 {
        int d0, d1;
        __asm__ __volatile__(
@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
                       "       .align 2,0x90\n"
                       "3:     movl 0(%4), %%eax\n"
                       "4:     movl 4(%4), %%edx\n"
-                      "5:     movl %%eax, 0(%3)\n"
-                      "6:     movl %%edx, 4(%3)\n"
+                      "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
+                      "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
                       "7:     movl 8(%4), %%eax\n"
                       "8:     movl 12(%4),%%edx\n"
-                      "9:     movl %%eax, 8(%3)\n"
-                      "10:    movl %%edx, 12(%3)\n"
+                      "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
+                      "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
                       "11:    movl 16(%4), %%eax\n"
                       "12:    movl 20(%4), %%edx\n"
-                      "13:    movl %%eax, 16(%3)\n"
-                      "14:    movl %%edx, 20(%3)\n"
+                      "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
+                      "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
                       "15:    movl 24(%4), %%eax\n"
                       "16:    movl 28(%4), %%edx\n"
-                      "17:    movl %%eax, 24(%3)\n"
-                      "18:    movl %%edx, 28(%3)\n"
+                      "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
+                      "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
                       "19:    movl 32(%4), %%eax\n"
                       "20:    movl 36(%4), %%edx\n"
-                      "21:    movl %%eax, 32(%3)\n"
-                      "22:    movl %%edx, 36(%3)\n"
+                      "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
+                      "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
                       "23:    movl 40(%4), %%eax\n"
                       "24:    movl 44(%4), %%edx\n"
-                      "25:    movl %%eax, 40(%3)\n"
-                      "26:    movl %%edx, 44(%3)\n"
+                      "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
+                      "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
                       "27:    movl 48(%4), %%eax\n"
                       "28:    movl 52(%4), %%edx\n"
-                      "29:    movl %%eax, 48(%3)\n"
-                      "30:    movl %%edx, 52(%3)\n"
+                      "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
+                      "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
                       "31:    movl 56(%4), %%eax\n"
                       "32:    movl 60(%4), %%edx\n"
-                      "33:    movl %%eax, 56(%3)\n"
-                      "34:    movl %%edx, 60(%3)\n"
+                      "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
+                      "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
                       "       addl $-64, %0\n"
                       "       addl $64, %4\n"
                       "       addl $64, %3\n"
@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
                       "       shrl  $2, %0\n"
                       "       andl  $3, %%eax\n"
                       "       cld\n"
+                      __COPYUSER_SET_ES
                       "99:    rep; movsl\n"
                       "36:    movl %%eax, %0\n"
                       "37:    rep; movsb\n"
                       "100:\n"
+                      __COPYUSER_RESTORE_ES
+                      ".section .fixup,\"ax\"\n"
+                      "101:   lea 0(%%eax,%0,4),%0\n"
+                      "       jmp 100b\n"
+                      ".previous\n"
+                      _ASM_EXTABLE(1b,100b)
+                      _ASM_EXTABLE(2b,100b)
+                      _ASM_EXTABLE(3b,100b)
+                      _ASM_EXTABLE(4b,100b)
+                      _ASM_EXTABLE(5b,100b)
+                      _ASM_EXTABLE(6b,100b)
+                      _ASM_EXTABLE(7b,100b)
+                      _ASM_EXTABLE(8b,100b)
+                      _ASM_EXTABLE(9b,100b)
+                      _ASM_EXTABLE(10b,100b)
+                      _ASM_EXTABLE(11b,100b)
+                      _ASM_EXTABLE(12b,100b)
+                      _ASM_EXTABLE(13b,100b)
+                      _ASM_EXTABLE(14b,100b)
+                      _ASM_EXTABLE(15b,100b)
+                      _ASM_EXTABLE(16b,100b)
+                      _ASM_EXTABLE(17b,100b)
+                      _ASM_EXTABLE(18b,100b)
+                      _ASM_EXTABLE(19b,100b)
+                      _ASM_EXTABLE(20b,100b)
+                      _ASM_EXTABLE(21b,100b)
+                      _ASM_EXTABLE(22b,100b)
+                      _ASM_EXTABLE(23b,100b)
+                      _ASM_EXTABLE(24b,100b)
+                      _ASM_EXTABLE(25b,100b)
+                      _ASM_EXTABLE(26b,100b)
+                      _ASM_EXTABLE(27b,100b)
+                      _ASM_EXTABLE(28b,100b)
+                      _ASM_EXTABLE(29b,100b)
+                      _ASM_EXTABLE(30b,100b)
+                      _ASM_EXTABLE(31b,100b)
+                      _ASM_EXTABLE(32b,100b)
+                      _ASM_EXTABLE(33b,100b)
+                      _ASM_EXTABLE(34b,100b)
+                      _ASM_EXTABLE(35b,100b)
+                      _ASM_EXTABLE(36b,100b)
+                      _ASM_EXTABLE(37b,100b)
+                      _ASM_EXTABLE(99b,101b)
+                      : "=&c"(size), "=&D" (d0), "=&S" (d1)
+                      :  "1"(to), "2"(from), "0"(size)
+                      : "eax", "edx", "memory");
+       return size;
+}
+
+static unsigned long
+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
+{
+       int d0, d1;
+       __asm__ __volatile__(
+                      "       .align 2,0x90\n"
+                      "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "       cmpl $67, %0\n"
+                      "       jbe 3f\n"
+                      "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
+                      "       .align 2,0x90\n"
+                      "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
+                      "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
+                      "5:     movl %%eax, 0(%3)\n"
+                      "6:     movl %%edx, 4(%3)\n"
+                      "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
+                      "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
+                      "9:     movl %%eax, 8(%3)\n"
+                      "10:    movl %%edx, 12(%3)\n"
+                      "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
+                      "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
+                      "13:    movl %%eax, 16(%3)\n"
+                      "14:    movl %%edx, 20(%3)\n"
+                      "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
+                      "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
+                      "17:    movl %%eax, 24(%3)\n"
+                      "18:    movl %%edx, 28(%3)\n"
+                      "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
+                      "21:    movl %%eax, 32(%3)\n"
+                      "22:    movl %%edx, 36(%3)\n"
+                      "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
+                      "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
+                      "25:    movl %%eax, 40(%3)\n"
+                      "26:    movl %%edx, 44(%3)\n"
+                      "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
+                      "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
+                      "29:    movl %%eax, 48(%3)\n"
+                      "30:    movl %%edx, 52(%3)\n"
+                      "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
+                      "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
+                      "33:    movl %%eax, 56(%3)\n"
+                      "34:    movl %%edx, 60(%3)\n"
+                      "       addl $-64, %0\n"
+                      "       addl $64, %4\n"
+                      "       addl $64, %3\n"
+                      "       cmpl $63, %0\n"
+                      "       ja  1b\n"
+                      "35:    movl  %0, %%eax\n"
+                      "       shrl  $2, %0\n"
+                      "       andl  $3, %%eax\n"
+                      "       cld\n"
+                      "99:    rep; "__copyuser_seg" movsl\n"
+                      "36:    movl %%eax, %0\n"
+                      "37:    rep; "__copyuser_seg" movsb\n"
+                      "100:\n"
                       ".section .fixup,\"ax\"\n"
                       "101:   lea 0(%%eax,%0,4),%0\n"
                       "       jmp 100b\n"
@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
        int d0, d1;
        __asm__ __volatile__(
                       "        .align 2,0x90\n"
-                      "0:      movl 32(%4), %%eax\n"
+                      "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
                       "        cmpl $67, %0\n"
                       "        jbe 2f\n"
-                      "1:      movl 64(%4), %%eax\n"
+                      "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
                       "        .align 2,0x90\n"
-                      "2:      movl 0(%4), %%eax\n"
-                      "21:     movl 4(%4), %%edx\n"
+                      "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+                      "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
                       "        movl %%eax, 0(%3)\n"
                       "        movl %%edx, 4(%3)\n"
-                      "3:      movl 8(%4), %%eax\n"
-                      "31:     movl 12(%4),%%edx\n"
+                      "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+                      "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
                       "        movl %%eax, 8(%3)\n"
                       "        movl %%edx, 12(%3)\n"
-                      "4:      movl 16(%4), %%eax\n"
-                      "41:     movl 20(%4), %%edx\n"
+                      "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+                      "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
                       "        movl %%eax, 16(%3)\n"
                       "        movl %%edx, 20(%3)\n"
-                      "10:     movl 24(%4), %%eax\n"
-                      "51:     movl 28(%4), %%edx\n"
+                      "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+                      "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
                       "        movl %%eax, 24(%3)\n"
                       "        movl %%edx, 28(%3)\n"
-                      "11:     movl 32(%4), %%eax\n"
-                      "61:     movl 36(%4), %%edx\n"
+                      "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+                      "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
                       "        movl %%eax, 32(%3)\n"
                       "        movl %%edx, 36(%3)\n"
-                      "12:     movl 40(%4), %%eax\n"
-                      "71:     movl 44(%4), %%edx\n"
+                      "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+                      "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
                       "        movl %%eax, 40(%3)\n"
                       "        movl %%edx, 44(%3)\n"
-                      "13:     movl 48(%4), %%eax\n"
-                      "81:     movl 52(%4), %%edx\n"
+                      "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+                      "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
                       "        movl %%eax, 48(%3)\n"
                       "        movl %%edx, 52(%3)\n"
-                      "14:     movl 56(%4), %%eax\n"
-                      "91:     movl 60(%4), %%edx\n"
+                      "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+                      "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
                       "        movl %%eax, 56(%3)\n"
                       "        movl %%edx, 60(%3)\n"
                       "        addl $-64, %0\n"
@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
                       "        shrl  $2, %0\n"
                       "        andl $3, %%eax\n"
                       "        cld\n"
-                      "6:      rep; movsl\n"
+                      "6:      rep; "__copyuser_seg" movsl\n"
                       "        movl %%eax,%0\n"
-                      "7:      rep; movsb\n"
+                      "7:      rep; "__copyuser_seg" movsb\n"
                       "8:\n"
                       ".section .fixup,\"ax\"\n"
                       "9:      lea 0(%%eax,%0,4),%0\n"
@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
 
        __asm__ __volatile__(
               "        .align 2,0x90\n"
-              "0:      movl 32(%4), %%eax\n"
+              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
               "        cmpl $67, %0\n"
               "        jbe 2f\n"
-              "1:      movl 64(%4), %%eax\n"
+              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
               "        .align 2,0x90\n"
-              "2:      movl 0(%4), %%eax\n"
-              "21:     movl 4(%4), %%edx\n"
+              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
               "        movnti %%eax, 0(%3)\n"
               "        movnti %%edx, 4(%3)\n"
-              "3:      movl 8(%4), %%eax\n"
-              "31:     movl 12(%4),%%edx\n"
+              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
               "        movnti %%eax, 8(%3)\n"
               "        movnti %%edx, 12(%3)\n"
-              "4:      movl 16(%4), %%eax\n"
-              "41:     movl 20(%4), %%edx\n"
+              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
               "        movnti %%eax, 16(%3)\n"
               "        movnti %%edx, 20(%3)\n"
-              "10:     movl 24(%4), %%eax\n"
-              "51:     movl 28(%4), %%edx\n"
+              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
               "        movnti %%eax, 24(%3)\n"
               "        movnti %%edx, 28(%3)\n"
-              "11:     movl 32(%4), %%eax\n"
-              "61:     movl 36(%4), %%edx\n"
+              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
               "        movnti %%eax, 32(%3)\n"
               "        movnti %%edx, 36(%3)\n"
-              "12:     movl 40(%4), %%eax\n"
-              "71:     movl 44(%4), %%edx\n"
+              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
               "        movnti %%eax, 40(%3)\n"
               "        movnti %%edx, 44(%3)\n"
-              "13:     movl 48(%4), %%eax\n"
-              "81:     movl 52(%4), %%edx\n"
+              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
               "        movnti %%eax, 48(%3)\n"
               "        movnti %%edx, 52(%3)\n"
-              "14:     movl 56(%4), %%eax\n"
-              "91:     movl 60(%4), %%edx\n"
+              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
               "        movnti %%eax, 56(%3)\n"
               "        movnti %%edx, 60(%3)\n"
               "        addl $-64, %0\n"
@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
               "        shrl  $2, %0\n"
               "        andl $3, %%eax\n"
               "        cld\n"
-              "6:      rep; movsl\n"
+              "6:      rep; "__copyuser_seg" movsl\n"
               "        movl %%eax,%0\n"
-              "7:      rep; movsb\n"
+              "7:      rep; "__copyuser_seg" movsb\n"
               "8:\n"
               ".section .fixup,\"ax\"\n"
               "9:      lea 0(%%eax,%0,4),%0\n"
@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
 
        __asm__ __volatile__(
               "        .align 2,0x90\n"
-              "0:      movl 32(%4), %%eax\n"
+              "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
               "        cmpl $67, %0\n"
               "        jbe 2f\n"
-              "1:      movl 64(%4), %%eax\n"
+              "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
               "        .align 2,0x90\n"
-              "2:      movl 0(%4), %%eax\n"
-              "21:     movl 4(%4), %%edx\n"
+              "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+              "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
               "        movnti %%eax, 0(%3)\n"
               "        movnti %%edx, 4(%3)\n"
-              "3:      movl 8(%4), %%eax\n"
-              "31:     movl 12(%4),%%edx\n"
+              "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+              "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
               "        movnti %%eax, 8(%3)\n"
               "        movnti %%edx, 12(%3)\n"
-              "4:      movl 16(%4), %%eax\n"
-              "41:     movl 20(%4), %%edx\n"
+              "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+              "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
               "        movnti %%eax, 16(%3)\n"
               "        movnti %%edx, 20(%3)\n"
-              "10:     movl 24(%4), %%eax\n"
-              "51:     movl 28(%4), %%edx\n"
+              "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+              "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
               "        movnti %%eax, 24(%3)\n"
               "        movnti %%edx, 28(%3)\n"
-              "11:     movl 32(%4), %%eax\n"
-              "61:     movl 36(%4), %%edx\n"
+              "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+              "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
               "        movnti %%eax, 32(%3)\n"
               "        movnti %%edx, 36(%3)\n"
-              "12:     movl 40(%4), %%eax\n"
-              "71:     movl 44(%4), %%edx\n"
+              "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+              "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
               "        movnti %%eax, 40(%3)\n"
               "        movnti %%edx, 44(%3)\n"
-              "13:     movl 48(%4), %%eax\n"
-              "81:     movl 52(%4), %%edx\n"
+              "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+              "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
               "        movnti %%eax, 48(%3)\n"
               "        movnti %%edx, 52(%3)\n"
-              "14:     movl 56(%4), %%eax\n"
-              "91:     movl 60(%4), %%edx\n"
+              "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+              "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
               "        movnti %%eax, 56(%3)\n"
               "        movnti %%edx, 60(%3)\n"
               "        addl $-64, %0\n"
@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
               "        shrl  $2, %0\n"
               "        andl $3, %%eax\n"
               "        cld\n"
-              "6:      rep; movsl\n"
+              "6:      rep; "__copyuser_seg" movsl\n"
               "        movl %%eax,%0\n"
-              "7:      rep; movsb\n"
+              "7:      rep; "__copyuser_seg" movsb\n"
               "8:\n"
               ".section .fixup,\"ax\"\n"
               "9:      lea 0(%%eax,%0,4),%0\n"
@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
  */
 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
                                        unsigned long size);
-unsigned long __copy_user_intel(void __user *to, const void *from,
+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
+                                       unsigned long size);
+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
                                        unsigned long size);
 unsigned long __copy_user_zeroing_intel_nocache(void *to,
                                const void __user *from, unsigned long size);
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
-#define __copy_user(to, from, size)                                    \
+#define __copy_user(to, from, size, prefix, set, restore)              \
 do {                                                                   \
        int __d0, __d1, __d2;                                           \
        __asm__ __volatile__(                                           \
+               set                                                     \
                "       cmp  $7,%0\n"                                   \
                "       jbe  1f\n"                                      \
                "       movl %1,%0\n"                                   \
                "       negl %0\n"                                      \
                "       andl $7,%0\n"                                   \
                "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
+               "4:     rep; "prefix"movsb\n"                           \
                "       movl %3,%0\n"                                   \
                "       shrl $2,%0\n"                                   \
                "       andl $3,%3\n"                                   \
                "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
+               "0:     rep; "prefix"movsl\n"                           \
                "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
+               "1:     rep; "prefix"movsb\n"                           \
                "2:\n"                                                  \
+               restore                                                 \
                ".section .fixup,\"ax\"\n"                              \
                "5:     addl %3,%0\n"                                   \
                "       jmp 2b\n"                                       \
@@ -538,14 +650,14 @@ do {                                                                      \
                "       negl %0\n"                                      \
                "       andl $7,%0\n"                                   \
                "       subl %0,%3\n"                                   \
-               "4:     rep; movsb\n"                                   \
+               "4:     rep; "__copyuser_seg"movsb\n"                   \
                "       movl %3,%0\n"                                   \
                "       shrl $2,%0\n"                                   \
                "       andl $3,%3\n"                                   \
                "       .align 2,0x90\n"                                \
-               "0:     rep; movsl\n"                                   \
+               "0:     rep; "__copyuser_seg"movsl\n"                   \
                "       movl %3,%0\n"                                   \
-               "1:     rep; movsb\n"                                   \
+               "1:     rep; "__copyuser_seg"movsb\n"                   \
                "2:\n"                                                  \
                ".section .fixup,\"ax\"\n"                              \
                "5:     addl %3,%0\n"                                   \
@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
 {
        stac();
        if (movsl_is_ok(to, from, n))
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
        else
-               n = __copy_user_intel(to, from, n);
+               n = __generic_copy_to_user_intel(to, from, n);
        clac();
        return n;
 }
@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
 {
        stac();
        if (movsl_is_ok(to, from, n))
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, __copyuser_seg, "", "");
        else
-               n = __copy_user_intel((void __user *)to,
-                                     (const void *)from, n);
+               n = __generic_copy_from_user_intel(to, from, n);
        clac();
        return n;
 }
@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
        if (n > 64 && cpu_has_xmm2)
                n = __copy_user_intel_nocache(to, from, n);
        else
-               __copy_user(to, from, n);
+               __copy_user(to, from, n, __copyuser_seg, "", "");
 #else
-       __copy_user(to, from, n);
+       __copy_user(to, from, n, __copyuser_seg, "", "");
 #endif
        clac();
        return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
 
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+void __set_fs(mm_segment_t x)
 {
-       if (access_ok(VERIFY_WRITE, to, n))
-               n = __copy_to_user(to, from, n);
-       return n;
+       switch (x.seg) {
+       case 0:
+               loadsegment(gs, 0);
+               break;
+       case TASK_SIZE_MAX:
+               loadsegment(gs, __USER_DS);
+               break;
+       case -1UL:
+               loadsegment(gs, __KERNEL_DS);
+               break;
+       default:
+               BUG();
+       }
 }
-EXPORT_SYMBOL(_copy_to_user);
+EXPORT_SYMBOL(__set_fs);
 
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+void set_fs(mm_segment_t x)
 {
-       if (access_ok(VERIFY_READ, from, n))
-               n = __copy_from_user(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
+       current_thread_info()->addr_limit = x;
+       __set_fs(x);
 }
-EXPORT_SYMBOL(_copy_from_user);
+EXPORT_SYMBOL(set_fs);
+#endif
index c905e89e19feb5ff79778a11a162dfbaa88852a9..01ab9283a971f99f77c18f62ad80a513f16975dd 100644 (file)
@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
        might_fault();
        /* no memory constraint because it doesn't change any memory gcc knows
           about */
+       pax_open_userland();
        stac();
        asm volatile(
                "       testq  %[size8],%[size8]\n"
@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
                _ASM_EXTABLE(0b,3b)
                _ASM_EXTABLE(1b,2b)
                : [size8] "=&c"(size), [dst] "=&D" (__d0)
-               : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+               : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
                  [zero] "r" (0UL), [eight] "r" (8UL));
        clac();
+       pax_close_userland();
        return size;
 }
 EXPORT_SYMBOL(__clear_user);
@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
 }
 EXPORT_SYMBOL(clear_user);
 
-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
 {
-       if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
-               return copy_user_generic((__force void *)to, (__force void *)from, len);
-       } 
-       return len;             
+       if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
+               return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
+       return len;
 }
 EXPORT_SYMBOL(copy_in_user);
 
@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
  * it is not necessary to optimize tail handling.
  */
 __visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
 {
        char c;
        unsigned zero_len;
 
+       clac();
+       pax_close_userland();
        for (; len; --len, to++) {
                if (__get_user_nocheck(c, from++, sizeof(char)))
                        break;
@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
        for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
                if (__put_user_nocheck(c, to++, sizeof(char)))
                        break;
-       clac();
        return len;
 }
index ecfdc46a024a45ae8f73f29b3b6107eb6e54ee17..55b930909dca16281ce143b89bc8d634b5cc9b4e 100644 (file)
@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU)                += numa_emulation.o
 obj-$(CONFIG_MEMTEST)          += memtest.o
 
 obj-$(CONFIG_X86_INTEL_MPX)    += mpx.o
+
+quote:="
+obj-$(CONFIG_X86_64)           += uderef_64.o
+CFLAGS_uderef_64.o             := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
index 903ec1e9c3263f7f7ce6e1f5a59e7a32b224dc85..c4166b27b1936a8d4d4509603afb9ed82fca0098 100644 (file)
@@ -6,12 +6,24 @@
 static inline unsigned long
 ex_insn_addr(const struct exception_table_entry *x)
 {
-       return (unsigned long)&x->insn + x->insn;
+       unsigned long reloc = 0;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
+       return (unsigned long)&x->insn + x->insn + reloc;
 }
 static inline unsigned long
 ex_fixup_addr(const struct exception_table_entry *x)
 {
-       return (unsigned long)&x->fixup + x->fixup;
+       unsigned long reloc = 0;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
+       return (unsigned long)&x->fixup + x->fixup + reloc;
 }
 
 int fixup_exception(struct pt_regs *regs)
@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
        unsigned long new_ip;
 
 #ifdef CONFIG_PNPBIOS
-       if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
+       if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
                extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
                extern u32 pnp_bios_is_utter_crap;
                pnp_bios_is_utter_crap = 1;
@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
                i += 4;
                p->fixup -= i;
                i += 4;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
+               p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+               p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
        }
 }
 
index e3ff27a5b6348ffb2dcff6f592abafe48b6b6396..f38f7c05c1f184a269fcaf86980fe982faed6345 100644 (file)
 #include <linux/hugetlb.h>             /* hstate_index_to_shift        */
 #include <linux/prefetch.h>            /* prefetchw                    */
 #include <linux/context_tracking.h>    /* exception_enter(), ...       */
+#include <linux/unistd.h>
+#include <linux/compiler.h>
 
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
 #include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
 #include <asm/fixmap.h>                        /* VSYSCALL_ADDR                */
 #include <asm/vsyscall.h>              /* emulate_vsyscall             */
+#include <asm/tlbflush.h>
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/stacktrace.h>
+#endif
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
        int ret = 0;
 
        /* kprobe_running() needs smp_processor_id() */
-       if (kprobes_built_in() && !user_mode_vm(regs)) {
+       if (kprobes_built_in() && !user_mode(regs)) {
                preempt_disable();
                if (kprobe_running() && kprobe_fault_handler(regs, 14))
                        ret = 1;
@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
                return !instr_lo || (instr_lo>>1) == 1;
        case 0x00:
                /* Prefetch instruction is 0x0F0D or 0x0F18 */
-               if (probe_kernel_address(instr, opcode))
+               if (user_mode(regs)) {
+                       if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
+                               return 0;
+               } else if (probe_kernel_address(instr, opcode))
                        return 0;
 
                *prefetch = (instr_lo == 0xF) &&
@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
        while (instr < max_instr) {
                unsigned char opcode;
 
-               if (probe_kernel_address(instr, opcode))
+               if (user_mode(regs)) {
+                       if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
+                               break;
+               } else if (probe_kernel_address(instr, opcode))
                        break;
 
                instr++;
@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
        force_sig_info(si_signo, &info, tsk);
 }
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+static int pax_handle_fetch_fault(struct pt_regs *regs);
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               return NULL;
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               return NULL;
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
+               return NULL;
+       return pmd;
+}
+#endif
+
 DEFINE_SPINLOCK(pgd_lock);
 LIST_HEAD(pgd_list);
 
@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
        for (address = VMALLOC_START & PMD_MASK;
             address >= TASK_SIZE && address < FIXADDR_TOP;
             address += PMD_SIZE) {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               unsigned long cpu;
+#else
                struct page *page;
+#endif
 
                spin_lock(&pgd_lock);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+                       pgd_t *pgd = get_cpu_pgd(cpu, user);
+                       pmd_t *ret;
+
+                       ret = vmalloc_sync_one(pgd, address);
+                       if (!ret)
+                               break;
+                       pgd = get_cpu_pgd(cpu, kernel);
+#else
                list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
                        spinlock_t *pgt_lock;
                        pmd_t *ret;
 
@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
                        spin_lock(pgt_lock);
-                       ret = vmalloc_sync_one(page_address(page), address);
+                       pgd = page_address(page);
+#endif
+
+                       ret = vmalloc_sync_one(pgd, address);
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
                        spin_unlock(pgt_lock);
+#endif
 
                        if (!ret)
                                break;
@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
         * an interrupt in the middle of a task switch..
         */
        pgd_paddr = read_cr3();
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
+       vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
+#endif
+
        pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
        if (!pmd_k)
                return -1;
@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
         * happen within a race in page table update. In the later
         * case just flush:
         */
-       pgd = pgd_offset(current->active_mm, address);
+
        pgd_ref = pgd_offset_k(address);
        if (pgd_none(*pgd_ref))
                return -1;
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
+       pgd = pgd_offset_cpu(smp_processor_id(), user, address);
+       if (pgd_none(*pgd)) {
+               set_pgd(pgd, *pgd_ref);
+               arch_flush_lazy_mmu_mode();
+       } else {
+               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+       }
+       pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
+#else
+       pgd = pgd_offset(current->active_mm, address);
+#endif
+
        if (pgd_none(*pgd)) {
                set_pgd(pgd, *pgd_ref);
                arch_flush_lazy_mmu_mode();
@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
 static int is_errata100(struct pt_regs *regs, unsigned long address)
 {
 #ifdef CONFIG_X86_64
-       if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
+       if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
                return 1;
 #endif
        return 0;
@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 }
 
 static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
 static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
 
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
        if (!oops_may_print())
                return;
 
-       if (error_code & PF_INSTR) {
+       if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
                unsigned int level;
                pgd_t *pgd;
                pte_t *pte;
@@ -597,12 +681,24 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+                       printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (read_cr4() & X86_CR4_SMEP))
-                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
+       }
+
+#ifdef CONFIG_PAX_KERNEXEC
+       if (init_mm.start_code <= address && address < init_mm.end_code) {
+               if (current->signal->curr_ip)
+                       printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
+                                       &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
+               else
+                       printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
+                                       from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
        }
+#endif
 
        printk(KERN_ALERT "BUG: unable to handle kernel ");
        if (address < PAGE_SIZE)
@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                                return;
                }
 #endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+               if (pax_is_fetch_fault(regs, error_code, address)) {
+
+#ifdef CONFIG_PAX_EMUTRAMP
+                       switch (pax_handle_fetch_fault(regs)) {
+                       case 2:
+                               return;
+                       }
+#endif
+
+                       pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
+                       do_group_exit(SIGKILL);
+               }
+#endif
+
                /* Kernel addresses are always protection faults: */
                if (address >= TASK_SIZE)
                        error_code |= PF_PROT;
@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
                printk(KERN_ERR
        "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
-                       tsk->comm, tsk->pid, address);
+                       tsk->comm, task_pid_nr(tsk), address);
                code = BUS_MCEERR_AR;
        }
 #endif
@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
        return 1;
 }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
+{
+       pte_t *pte;
+       pmd_t *pmd;
+       spinlock_t *ptl;
+       unsigned char pte_mask;
+
+       if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
+           !(mm->pax_flags & MF_PAX_PAGEEXEC))
+               return 0;
+
+       /* PaX: it's our fault, let's handle it if we can */
+
+       /* PaX: take a look at read faults before acquiring any locks */
+       if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
+               /* instruction fetch attempt from a protected page in user mode */
+               up_read(&mm->mmap_sem);
+
+#ifdef CONFIG_PAX_EMUTRAMP
+               switch (pax_handle_fetch_fault(regs)) {
+               case 2:
+                       return 1;
+               }
+#endif
+
+               pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
+               do_group_exit(SIGKILL);
+       }
+
+       pmd = pax_get_pmd(mm, address);
+       if (unlikely(!pmd))
+               return 0;
+
+       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
+               pte_unmap_unlock(pte, ptl);
+               return 0;
+       }
+
+       if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
+               /* write attempt to a protected page in user mode */
+               pte_unmap_unlock(pte, ptl);
+               return 0;
+       }
+
+#ifdef CONFIG_SMP
+       if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
+#else
+       if (likely(address > get_limit(regs->cs)))
+#endif
+       {
+               set_pte(pte, pte_mkread(*pte));
+               __flush_tlb_one(address);
+               pte_unmap_unlock(pte, ptl);
+               up_read(&mm->mmap_sem);
+               return 1;
+       }
+
+       pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
+
+       /*
+        * PaX: fill DTLB with user rights and retry
+        */
+       __asm__ __volatile__ (
+               "orb %2,(%1)\n"
+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
+/*
+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
+ * page fault when examined during a TLB load attempt. this is true not only
+ * for PTEs holding a non-present entry but also present entries that will
+ * raise a page fault (such as those set up by PaX, or the copy-on-write
+ * mechanism). in effect it means that we do *not* need to flush the TLBs
+ * for our target pages since their PTEs are simply not in the TLBs at all.
+
+ * the best thing in omitting it is that we gain around 15-20% speed in the
+ * fast path of the page fault handler and can get rid of tracing since we
+ * can no longer flush unintended entries.
+ */
+               "invlpg (%0)\n"
+#endif
+               __copyuser_seg"testb $0,(%0)\n"
+               "xorb %3,(%1)\n"
+               :
+               : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
+               : "memory", "cc");
+       pte_unmap_unlock(pte, ptl);
+       up_read(&mm->mmap_sem);
+       return 1;
+}
+#endif
+
 /*
  * Handle a spurious fault caused by a stale TLB entry.
  *
@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
 static inline int
 access_error(unsigned long error_code, struct vm_area_struct *vma)
 {
+       if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
+               return 1;
+
        if (error_code & PF_WRITE) {
                /* write, present and write, not present: */
                if (unlikely(!(vma->vm_flags & VM_WRITE)))
@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
        if (error_code & PF_USER)
                return false;
 
-       if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
+       if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
                return false;
 
        return true;
@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
        tsk = current;
        mm = tsk->mm;
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+       if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
+               if (!search_exception_tables(regs->ip)) {
+                       printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
+                       bad_area_nosemaphore(regs, error_code, address);
+                       return;
+               }
+               if (address < pax_user_shadow_base) {
+                       printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
+                       printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
+                       show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
+               } else
+                       address -= pax_user_shadow_base;
+       }
+#endif
+
        /*
         * Detect and handle instructions that would cause a page fault for
         * both a tracked kernel page and a userspace page.
@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
         * User-mode registers count as a user access even for any
         * potential system fault or CPU buglet:
         */
-       if (user_mode_vm(regs)) {
+       if (user_mode(regs)) {
                local_irq_enable();
                error_code |= PF_USER;
                flags |= FAULT_FLAG_USER;
@@ -1187,6 +1411,11 @@ retry:
                might_sleep();
        }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+       if (pax_handle_pageexec_fault(regs, mm, address, error_code))
+               return;
+#endif
+
        vma = find_vma(mm, address);
        if (unlikely(!vma)) {
                bad_area(regs, error_code, address);
@@ -1198,18 +1427,24 @@ retry:
                bad_area(regs, error_code, address);
                return;
        }
-       if (error_code & PF_USER) {
-               /*
-                * Accessing the stack below %sp is always a bug.
-                * The large cushion allows instructions like enter
-                * and pusha to work. ("enter $65535, $31" pushes
-                * 32 pointers and then decrements %sp by 65535.)
-                */
-               if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
-                       bad_area(regs, error_code, address);
-                       return;
-               }
+       /*
+        * Accessing the stack below %sp is always a bug.
+        * The large cushion allows instructions like enter
+        * and pusha to work. ("enter $65535, $31" pushes
+        * 32 pointers and then decrements %sp by 65535.)
+        */
+       if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
+               bad_area(regs, error_code, address);
+               return;
+       }
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
+               bad_area(regs, error_code, address);
+               return;
        }
+#endif
+
        if (unlikely(expand_stack(vma, address))) {
                bad_area(regs, error_code, address);
                return;
@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
 }
 NOKPROBE_SYMBOL(trace_do_page_fault);
 #endif /* CONFIG_TRACING */
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+{
+       struct mm_struct *mm = current->mm;
+       unsigned long ip = regs->ip;
+
+       if (v8086_mode(regs))
+               ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+               if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
+                       return true;
+               if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
+                       return true;
+               return false;
+       }
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+       if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
+                       return true;
+               return false;
+       }
+#endif
+
+       return false;
+}
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
+{
+       int err;
+
+       do { /* PaX: libffi trampoline emulation */
+               unsigned char mov, jmp;
+               unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+               if ((regs->ip + 9) >> 32)
+                       break;
+#endif
+
+               err = get_user(mov, (unsigned char __user *)regs->ip);
+               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+               err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
+               err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+
+               if (err)
+                       break;
+
+               if (mov == 0xB8 && jmp == 0xE9) {
+                       regs->ax = addr1;
+                       regs->ip = (unsigned int)(regs->ip + addr2 + 10);
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: gcc trampoline emulation #1 */
+               unsigned char mov1, mov2;
+               unsigned short jmp;
+               unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+               if ((regs->ip + 11) >> 32)
+                       break;
+#endif
+
+               err = get_user(mov1, (unsigned char __user *)regs->ip);
+               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+               err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
+               err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+               err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
+
+               if (err)
+                       break;
+
+               if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
+                       regs->cx = addr1;
+                       regs->ax = addr2;
+                       regs->ip = addr2;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: gcc trampoline emulation #2 */
+               unsigned char mov, jmp;
+               unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+               if ((regs->ip + 9) >> 32)
+                       break;
+#endif
+
+               err = get_user(mov, (unsigned char __user *)regs->ip);
+               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+               err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
+               err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+
+               if (err)
+                       break;
+
+               if (mov == 0xB9 && jmp == 0xE9) {
+                       regs->cx = addr1;
+                       regs->ip = (unsigned int)(regs->ip + addr2 + 10);
+                       return 2;
+               }
+       } while (0);
+
+       return 1; /* PaX in action */
+}
+
+#ifdef CONFIG_X86_64
+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
+{
+       int err;
+
+       do { /* PaX: libffi trampoline emulation */
+               unsigned short mov1, mov2, jmp1;
+               unsigned char stcclc, jmp2;
+               unsigned long addr1, addr2;
+
+               err = get_user(mov1, (unsigned short __user *)regs->ip);
+               err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
+               err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
+               err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
+               err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
+               err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
+               err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
+
+               if (err)
+                       break;
+
+               if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
+                       regs->r11 = addr1;
+                       regs->r10 = addr2;
+                       if (stcclc == 0xF8)
+                               regs->flags &= ~X86_EFLAGS_CF;
+                       else
+                               regs->flags |= X86_EFLAGS_CF;
+                       regs->ip = addr1;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: gcc trampoline emulation #1 */
+               unsigned short mov1, mov2, jmp1;
+               unsigned char jmp2;
+               unsigned int addr1;
+               unsigned long addr2;
+
+               err = get_user(mov1, (unsigned short __user *)regs->ip);
+               err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
+               err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
+               err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
+               err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
+               err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
+
+               if (err)
+                       break;
+
+               if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
+                       regs->r11 = addr1;
+                       regs->r10 = addr2;
+                       regs->ip = addr1;
+                       return 2;
+               }
+       } while (0);
+
+       do { /* PaX: gcc trampoline emulation #2 */
+               unsigned short mov1, mov2, jmp1;
+               unsigned char jmp2;
+               unsigned long addr1, addr2;
+
+               err = get_user(mov1, (unsigned short __user *)regs->ip);
+               err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
+               err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
+               err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
+               err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
+               err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
+
+               if (err)
+                       break;
+
+               if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
+                       regs->r11 = addr1;
+                       regs->r10 = addr2;
+                       regs->ip = addr1;
+                       return 2;
+               }
+       } while (0);
+
+       return 1; /* PaX in action */
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->ip = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when gcc trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+       if (v8086_mode(regs))
+               return 1;
+
+       if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
+               return 1;
+
+#ifdef CONFIG_X86_32
+       return pax_handle_fetch_fault_32(regs);
+#else
+       if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
+               return pax_handle_fetch_fault_32(regs);
+       else
+               return pax_handle_fetch_fault_64(regs);
+#endif
+}
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+       long i;
+
+       printk(KERN_ERR "PAX: bytes at PC: ");
+       for (i = 0; i < 20; i++) {
+               unsigned char c;
+               if (get_user(c, (unsigned char __force_user *)pc+i))
+                       printk(KERN_CONT "?? ");
+               else
+                       printk(KERN_CONT "%02x ", c);
+       }
+       printk("\n");
+
+       printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
+       for (i = -1; i < 80 / (long)sizeof(long); i++) {
+               unsigned long c;
+               if (get_user(c, (unsigned long __force_user *)sp+i)) {
+#ifdef CONFIG_X86_32
+                       printk(KERN_CONT "???????? ");
+#else
+                       if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
+                               printk(KERN_CONT "???????? ???????? ");
+                       else
+                               printk(KERN_CONT "???????????????? ");
+#endif
+               } else {
+#ifdef CONFIG_X86_64
+                       if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
+                               printk(KERN_CONT "%08x ", (unsigned int)c);
+                               printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
+                       } else
+#endif
+                               printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
+               }
+       }
+       printk("\n");
+}
+#endif
+
+/**
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
+{
+       long ret;
+       mm_segment_t old_fs = get_fs();
+
+       set_fs(KERNEL_DS);
+       pagefault_disable();
+       pax_open_kernel();
+       ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+       pax_close_kernel();
+       pagefault_enable();
+       set_fs(old_fs);
+
+       return ret ? -EFAULT : 0;
+}
index 224b14235e967b62052c81eec48b2daf335f94fc..c2c94234aacae615938e1702ae41e43e46e98749 100644 (file)
@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
-       if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+       if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
                                        (void __user *)start, len)))
                return 0;
 
@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                goto slow_irqon;
 #endif
 
+       if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
+                                       (void __user *)start, len)))
+               return 0;
+
        /*
         * XXX: batch / limit 'nr', to avoid large irq off latency
         * needs some instrumenting to determine the common sizes used by
index 4500142bc4aa46429cb2be41a7ee3407426f6155..53a363c83226bd6c3d75d69a11ac61fcac5efd25 100644 (file)
@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
+
+       pax_open_kernel();
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       pax_close_kernel();
+
        arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
index 006cc914994b737138967b22ebbeafb7e1b9443c..bf05a83f34defd2ed710e33a4187b4fc4017b857 100644 (file)
@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
 #ifdef CONFIG_HUGETLB_PAGE
 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
                unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+               unsigned long pgoff, unsigned long flags, unsigned long offset)
 {
        struct hstate *h = hstate_file(file);
        struct vm_unmapped_area_info info;
-
+       
        info.flags = 0;
        info.length = len;
        info.low_limit = current->mm->mmap_legacy_base;
        info.high_limit = TASK_SIZE;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 
 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
                unsigned long addr0, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+               unsigned long pgoff, unsigned long flags, unsigned long offset)
 {
        struct hstate *h = hstate_file(file);
        struct vm_unmapped_area_info info;
@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
        info.high_limit = current->mm->mmap_base;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += current->mm->delta_mmap;
+#endif
+
                info.high_limit = TASK_SIZE;
                addr = vm_unmapped_area(&info);
        }
@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct hstate *h = hstate_file(file);
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
+       unsigned long pax_task_size = TASK_SIZE;
+       unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
-       if (len > TASK_SIZE)
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC)
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+       pax_task_size -= PAGE_SIZE;
+
+       if (len > pax_task_size)
                return -ENOMEM;
 
        if (flags & MAP_FIXED) {
@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return addr;
        }
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-                               pgoff, flags);
+                               pgoff, flags, offset);
        else
                return hugetlb_get_unmapped_area_topdown(file, addr, len,
-                               pgoff, flags);
+                               pgoff, flags, offset);
 }
 #endif /* CONFIG_HUGETLB_PAGE */
 
index 079c3b6a3ff181277a7cb4270895f27d9a1d6f8b..706902371828af3843e2356c2851827401aabb1a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/swap.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>     /* for max_low_pfn */
+#include <linux/tboot.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -17,6 +18,8 @@
 #include <asm/proto.h>
 #include <asm/dma.h>           /* for MAX_DMA_PFN */
 #include <asm/microcode.h>
+#include <asm/desc.h>
+#include <asm/bios_ebda.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
        early_ioremap_page_table_range_init();
 #endif
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+       clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
+                       swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+                       KERNEL_PGD_PTRS);
+       load_cr3(get_cpu_pgd(0, kernel));
+#else
        load_cr3(swapper_pg_dir);
+#endif
+
        __flush_tlb_all();
 
        early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  * mmio resources as well as potential bios/acpi data regions.
  */
+
+#ifdef CONFIG_GRKERNSEC_KMEM
+static unsigned int ebda_start __read_only;
+static unsigned int ebda_end __read_only;
+#endif
+
 int devmem_is_allowed(unsigned long pagenr)
 {
-       if (pagenr < 256)
+#ifdef CONFIG_GRKERNSEC_KMEM
+       /* allow BDA */
+       if (!pagenr)
+               return 1;
+       /* allow EBDA */
+       if (pagenr >= ebda_start && pagenr < ebda_end)
                return 1;
+       /* if tboot is in use, allow access to its hardcoded serial log range */
+       if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
+               return 1;
+#else
+       if (!pagenr)
+               return 1;
+#ifdef CONFIG_VM86
+       if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
+               return 1;
+#endif
+#endif
+
+       if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+               return 1;
+#ifdef CONFIG_GRKERNSEC_KMEM
+       /* throw out everything else below 1MB */
+       if (pagenr <= 256)
+               return 0;
+#endif
        if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
                return 0;
        if (!page_is_ram(pagenr))
@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 #endif
 }
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+static inline void gr_init_ebda(void)
+{
+       unsigned int ebda_addr;
+       unsigned int ebda_size = 0;
+
+       ebda_addr = get_bios_ebda();
+       if (ebda_addr) {
+               ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
+               ebda_size <<= 10;
+       }
+       if (ebda_addr && ebda_size) {
+               ebda_start = ebda_addr >> PAGE_SHIFT;
+               ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
+       } else {
+               ebda_start = 0x9f000 >> PAGE_SHIFT;
+               ebda_end = 0xa0000 >> PAGE_SHIFT;
+       }
+}
+#else
+static inline void gr_init_ebda(void) { }
+#endif
+
 void free_initmem(void)
 {
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+       /* PaX: limit KERNEL_CS to actual size */
+       unsigned long addr, limit;
+       struct desc_struct d;
+       int cpu;
+#else
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       unsigned long addr, end;
+#endif
+#endif
+
+       gr_init_ebda();
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+       limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
+       limit = (limit - 1UL) >> PAGE_SHIFT;
+
+       memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
+       for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+               pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
+               write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
+               write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
+       }
+
+       /* PaX: make KERNEL_CS read-only */
+       addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
+       if (!paravirt_enabled())
+               set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
+/*
+               for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
+                       pgd = pgd_offset_k(addr);
+                       pud = pud_offset(pgd, addr);
+                       pmd = pmd_offset(pud, addr);
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+               }
+*/
+#ifdef CONFIG_X86_PAE
+       set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
+/*
+       for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
+               pgd = pgd_offset_k(addr);
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+       set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+       }
+*/
+#endif
+
+#ifdef CONFIG_MODULES
+       set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
+#endif
+
+#else
+       /* PaX: make kernel code/rodata read-only, rest non-executable */
+       for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
+               pgd = pgd_offset_k(addr);
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+               if (!pmd_present(*pmd))
+                       continue;
+               if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+               else
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+       }
+
+       addr = (unsigned long)__va(__pa(__START_KERNEL_map));
+       end = addr + KERNEL_IMAGE_SIZE;
+       for (; addr < end; addr += PMD_SIZE) {
+       pgd = pgd_offset_k(addr);
+               pud = pud_offset(pgd, addr);
+               pmd = pmd_offset(pud, addr);
+               if (!pmd_present(*pmd))
+                       continue;
+               if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+       }
+#endif
+
+       flush_tlb_all();
+#endif
+
        free_init_pages("unused kernel",
                        (unsigned long)(&__init_begin),
                        (unsigned long)(&__init_end));
index c8140e12816a51f77702b273289222071ecb2350..59257fccf6fe20e013bd6dd9ae5dbc11497e84e0 100644 (file)
@@ -61,33 +61,6 @@ static noinline int do_test_wp_bit(void);
 
 bool __read_mostly __vmalloc_start_set = false;
 
-/*
- * Creates a middle page table and puts a pointer to it in the
- * given global directory entry. This only returns the gd entry
- * in non-PAE compilation mode, since the middle layer is folded.
- */
-static pmd_t * __init one_md_table_init(pgd_t *pgd)
-{
-       pud_t *pud;
-       pmd_t *pmd_table;
-
-#ifdef CONFIG_X86_PAE
-       if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-               pmd_table = (pmd_t *)alloc_low_page();
-               paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
-               set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-               pud = pud_offset(pgd, 0);
-               BUG_ON(pmd_table != pmd_offset(pud, 0));
-
-               return pmd_table;
-       }
-#endif
-       pud = pud_offset(pgd, 0);
-       pmd_table = pmd_offset(pud, 0);
-
-       return pmd_table;
-}
-
 /*
  * Create a page table and place a pointer to it in a middle page
  * directory entry:
@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
                pte_t *page_table = (pte_t *)alloc_low_page();
 
                paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+               set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
+#else
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+#endif
                BUG_ON(page_table != pte_offset_kernel(pmd, 0));
        }
 
        return pte_offset_kernel(pmd, 0);
 }
 
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+       pud_t *pud;
+       pmd_t *pmd_table;
+
+       pud = pud_offset(pgd, 0);
+       pmd_table = pmd_offset(pud, 0);
+
+       return pmd_table;
+}
+
 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 {
        int pgd_idx = pgd_index(vaddr);
@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
        int pgd_idx, pmd_idx;
        unsigned long vaddr;
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
        unsigned long count = page_table_range_init_count(start, end);
@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
        pgd = pgd_base + pgd_idx;
 
        for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-               pmd = one_md_table_init(pgd);
-               pmd = pmd + pmd_index(vaddr);
+               pud = pud_offset(pgd, vaddr);
+               pmd = pmd_offset(pud, vaddr);
+
+#ifdef CONFIG_X86_PAE
+               paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
+#endif
+
                for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
                                                        pmd++, pmd_idx++) {
                        pte = page_table_kmap_check(one_page_table_init(pmd),
@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
        }
 }
 
-static inline int is_kernel_text(unsigned long addr)
+static inline int is_kernel_text(unsigned long start, unsigned long end)
 {
-       if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
-               return 1;
-       return 0;
+       if ((start >= ktla_ktva((unsigned long)_etext) ||
+            end <= ktla_ktva((unsigned long)_stext)) &&
+           (start >= ktla_ktva((unsigned long)_einittext) ||
+            end <= ktla_ktva((unsigned long)_sinittext)) &&
+
+#ifdef CONFIG_ACPI_SLEEP
+           (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
+#endif
+
+           (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
+               return 0;
+       return 1;
 }
 
 /*
@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
        unsigned long last_map_addr = end;
        unsigned long start_pfn, end_pfn;
        pgd_t *pgd_base = swapper_pg_dir;
-       int pgd_idx, pmd_idx, pte_ofs;
+       unsigned int pgd_idx, pmd_idx, pte_ofs;
        unsigned long pfn;
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
        unsigned pages_2m, pages_4k;
@@ -291,8 +295,13 @@ repeat:
        pfn = start_pfn;
        pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
        pgd = pgd_base + pgd_idx;
-       for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-               pmd = one_md_table_init(pgd);
+       for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
+               pud = pud_offset(pgd, 0);
+               pmd = pmd_offset(pud, 0);
+
+#ifdef CONFIG_X86_PAE
+               paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
+#endif
 
                if (pfn >= end_pfn)
                        continue;
@@ -304,14 +313,13 @@ repeat:
 #endif
                for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
                     pmd++, pmd_idx++) {
-                       unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
+                       unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
 
                        /*
                         * Map with big pages if possible, otherwise
                         * create normal page tables:
                         */
                        if (use_pse) {
-                               unsigned int addr2;
                                pgprot_t prot = PAGE_KERNEL_LARGE;
                                /*
                                 * first pass will use the same initial
@@ -322,11 +330,7 @@ repeat:
                                                 _PAGE_PSE);
 
                                pfn &= PMD_MASK >> PAGE_SHIFT;
-                               addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
-                                       PAGE_OFFSET + PAGE_SIZE-1;
-
-                               if (is_kernel_text(addr) ||
-                                   is_kernel_text(addr2))
+                               if (is_kernel_text(address, address + PMD_SIZE))
                                        prot = PAGE_KERNEL_LARGE_EXEC;
 
                                pages_2m++;
@@ -343,7 +347,7 @@ repeat:
                        pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
                        pte += pte_ofs;
                        for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
-                            pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
+                            pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
                                pgprot_t prot = PAGE_KERNEL;
                                /*
                                 * first pass will use the same initial
@@ -351,7 +355,7 @@ repeat:
                                 */
                                pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
 
-                               if (is_kernel_text(addr))
+                               if (is_kernel_text(address, address + PAGE_SIZE))
                                        prot = PAGE_KERNEL_EXEC;
 
                                pages_4k++;
@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
 
                pud = pud_offset(pgd, va);
                pmd = pmd_offset(pud, va);
-               if (!pmd_present(*pmd))
+               if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
                        break;
 
                /* should not be large page here */
@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
 
 static void __init pagetable_init(void)
 {
-       pgd_t *pgd_base = swapper_pg_dir;
-
-       permanent_kmaps_init(pgd_base);
+       permanent_kmaps_init(swapper_pg_dir);
 }
 
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 /* user-defined highmem size */
@@ -787,10 +789,10 @@ void __init mem_init(void)
                ((unsigned long)&__init_end -
                 (unsigned long)&__init_begin) >> 10,
 
-               (unsigned long)&_etext, (unsigned long)&_edata,
-               ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+               (unsigned long)&_sdata, (unsigned long)&_edata,
+               ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
 
-               (unsigned long)&_text, (unsigned long)&_etext,
+               ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
                ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 
        /*
@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
        if (!kernel_set_to_readonly)
                return;
 
+       start = ktla_ktva(start);
        pr_debug("Set kernel text: %lx - %lx for read write\n",
                 start, start+size);
 
@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
        if (!kernel_set_to_readonly)
                return;
 
+       start = ktla_ktva(start);
        pr_debug("Set kernel text: %lx - %lx for read only\n",
                 start, start+size);
 
@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
+       start = ktla_ktva(start);
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
        printk(KERN_INFO "Write protecting the kernel text: %luk\n",
                size >> 10);
index 30eb05ae7061624f7faee0077ce41bddb4991ef7..ae671ac414230fa336d2c707e53ecceb56cd6d7f 100644 (file)
@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
  * around without checking the pgd every time.
  */
 
-pteval_t __supported_pte_mask __read_mostly = ~0;
+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 int force_personality32;
@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
 
        for (address = start; address <= end; address += PGDIR_SIZE) {
                const pgd_t *pgd_ref = pgd_offset_k(address);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               unsigned long cpu;
+#else
                struct page *page;
+#endif
 
                /*
                 * When it is called after memory hot remove, pgd_none()
@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
                        continue;
 
                spin_lock(&pgd_lock);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+                       pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
+
+                       if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+                               BUG_ON(pgd_page_vaddr(*pgd)
+                                      != pgd_page_vaddr(*pgd_ref));
+
+                       if (removed) {
+                               if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
+                                       pgd_clear(pgd);
+                       } else {
+                               if (pgd_none(*pgd))
+                                       set_pgd(pgd, *pgd_ref);
+                       }
+
+                       pgd = pgd_offset_cpu(cpu, kernel, address);
+#else
                list_for_each_entry(page, &pgd_list, lru) {
                        pgd_t *pgd;
                        spinlock_t *pgt_lock;
@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
                        /* the pgt_lock only for Xen */
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
                        spin_lock(pgt_lock);
+#endif
 
                        if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
                                BUG_ON(pgd_page_vaddr(*pgd)
@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
                                        set_pgd(pgd, *pgd_ref);
                        }
 
+#ifndef CONFIG_PAX_PER_CPU_PGD
                        spin_unlock(pgt_lock);
+#endif
+
                }
                spin_unlock(&pgd_lock);
        }
@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 {
        if (pgd_none(*pgd)) {
                pud_t *pud = (pud_t *)spp_getpage();
-               pgd_populate(&init_mm, pgd, pud);
+               pgd_populate_kernel(&init_mm, pgd, pud);
                if (pud != pud_offset(pgd, 0))
                        printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
                               pud, pud_offset(pgd, 0));
@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 {
        if (pud_none(*pud)) {
                pmd_t *pmd = (pmd_t *) spp_getpage();
-               pud_populate(&init_mm, pud, pmd);
+               pud_populate_kernel(&init_mm, pud, pmd);
                if (pmd != pmd_offset(pud, 0))
                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
                               pmd, pmd_offset(pud, 0));
@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
        pmd = fill_pmd(pud, vaddr);
        pte = fill_pte(pmd, vaddr);
 
+       pax_open_kernel();
        set_pte(pte, new_pte);
+       pax_close_kernel();
 
        /*
         * It's enough to flush this one mapping.
@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
                pgd = pgd_offset_k((unsigned long)__va(phys));
                if (pgd_none(*pgd)) {
                        pud = (pud_t *) spp_getpage();
-                       set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
-                                               _PAGE_USER));
+                       set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
                }
                pud = pud_offset(pgd, (unsigned long)__va(phys));
                if (pud_none(*pud)) {
                        pmd = (pmd_t *) spp_getpage();
-                       set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
-                                               _PAGE_USER));
+                       set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
                }
                pmd = pmd_offset(pud, phys);
                BUG_ON(!pmd_none(*pmd));
@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                                              prot);
 
                spin_lock(&init_mm.page_table_lock);
-               pud_populate(&init_mm, pud, pmd);
+               pud_populate_kernel(&init_mm, pud, pmd);
                spin_unlock(&init_mm.page_table_lock);
        }
        __flush_tlb_all();
@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
                                                 page_size_mask);
 
                spin_lock(&init_mm.page_table_lock);
-               pgd_populate(&init_mm, pgd, pud);
+               pgd_populate_kernel(&init_mm, pgd, pud);
                spin_unlock(&init_mm.page_table_lock);
                pgd_changed = true;
        }
index 9ca35fc60cfeaa1a8c461f76956cd22d587226a7..4b2b7b75e5f96dc650c90ae1efb61a9431abb159 100644 (file)
@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
        type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+       pax_open_kernel();
        set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+       pax_close_kernel();
+
        arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
index fdf617c00e2fecad500e4a58c5fc71fc801a105d..b9e85bc2901afb2dd1fecb95e1dd0e3120c2c4e6 100644 (file)
@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
        unsigned long i;
 
        for (i = 0; i < nr_pages; ++i)
-               if (pfn_valid(start_pfn + i) &&
-                   !PageReserved(pfn_to_page(start_pfn + i)))
+               if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
+                   !PageReserved(pfn_to_page(start_pfn + i))))
                        return 1;
 
        WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  */
-void iounmap(volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
 {
        struct vm_struct *p, *o;
 
@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
  */
 void *xlate_dev_mem_ptr(phys_addr_t phys)
 {
-       void *addr;
-       unsigned long start = phys & PAGE_MASK;
-
        /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
-       if (page_is_ram(start >> PAGE_SHIFT))
+       if (page_is_ram(phys >> PAGE_SHIFT))
+#ifdef CONFIG_HIGHMEM
+       if ((phys >> PAGE_SHIFT) < max_low_pfn)
+#endif
                return __va(phys);
 
-       addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
-       if (addr)
-               addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
-
-       return addr;
+       return (void __force *)ioremap_cache(phys, PAGE_SIZE);
 }
 
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 {
        if (page_is_ram(phys >> PAGE_SHIFT))
+#ifdef CONFIG_HIGHMEM
+       if ((phys >> PAGE_SHIFT) < max_low_pfn)
+#endif
                return;
 
        iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
        return;
 }
 
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
        early_ioremap_setup();
 
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-       memset(bm_pte, 0, sizeof(bm_pte));
-       pmd_populate_kernel(&init_mm, pmd, bm_pte);
+       pmd_populate_user(&init_mm, pmd, bm_pte);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
index b4f2e7e9e90785534e10d017a94e787aa5e8fc03..96c9c3e43f764804005fd3d7277eaf241d60173b 100644 (file)
@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
         * memory (e.g. tracked pages)? For now, we need this to avoid
         * invoking kmemcheck for PnP BIOS calls.
         */
-       if (regs->flags & X86_VM_MASK)
+       if (v8086_mode(regs))
                return false;
-       if (regs->cs != __KERNEL_CS)
+       if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
                return false;
 
        pte = kmemcheck_pte_lookup(address);
index df4552bd239e03b4a02e6505454e41420d530461..12c129c11c1f2eddd02fee374705aecf01cbc1c2 100644 (file)
@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
  * Leave an at least ~128 MB hole with possible stack randomization.
  */
 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
-#define MAX_GAP (TASK_SIZE/6*5)
+#define MAX_GAP (pax_task_size/6*5)
 
 static int mmap_is_legacy(void)
 {
@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
        return rnd << PAGE_SHIFT;
 }
 
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(struct mm_struct *mm)
 {
        unsigned long gap = rlimit(RLIMIT_STACK);
+       unsigned long pax_task_size = TASK_SIZE;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC)
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
 
        if (gap < MIN_GAP)
                gap = MIN_GAP;
        else if (gap > MAX_GAP)
                gap = MAX_GAP;
 
-       return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+       return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
 }
 
 /*
  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
  * does, but not when emulating X86_32
  */
-static unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(struct mm_struct *mm)
 {
-       if (mmap_is_ia32())
+       if (mmap_is_ia32()) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               if (mm->pax_flags & MF_PAX_SEGMEXEC)
+                       return SEGMEXEC_TASK_UNMAPPED_BASE;
+               else
+#endif
+
                return TASK_UNMAPPED_BASE;
-       else
+       else
                return TASK_UNMAPPED_BASE + mmap_rnd();
 }
 
@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
  */
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
-       mm->mmap_legacy_base = mmap_legacy_base();
-       mm->mmap_base = mmap_base();
+       mm->mmap_legacy_base = mmap_legacy_base(mm);
+       mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP) {
+               mm->mmap_legacy_base += mm->delta_mmap;
+               mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+       }
+#endif
 
        if (mmap_is_legacy()) {
                mm->mmap_base = mm->mmap_legacy_base;
index 0057a7accfb17467600eac646a7c489a9deb7be3..95c7edd62e8096c847ba66acdf3b660783297a2c 100644 (file)
@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
                break;
        default:
                {
-                       unsigned char *ip = (unsigned char *)instptr;
+                       unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
                        my_trace->opcode = MMIO_UNKNOWN_OP;
                        my_trace->width = 0;
                        my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
                                                        void __iomem *addr)
 {
-       static atomic_t next_id;
+       static atomic_unchecked_t next_id;
        struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
        /* These are page-unaligned. */
        struct mmiotrace_map map = {
@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
                        .private = trace
                },
                .phys = offset,
-               .id = atomic_inc_return(&next_id)
+               .id = atomic_inc_return_unchecked(&next_id)
        };
        map.map_id = trace->id;
 
@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
        ioremap_trace_core(offset, size, addr);
 }
 
-static void iounmap_trace_core(volatile void __iomem *addr)
+static void iounmap_trace_core(const volatile void __iomem *addr)
 {
        struct mmiotrace_map map = {
                .phys = 0,
@@ -328,7 +328,7 @@ not_enabled:
        }
 }
 
-void mmiotrace_iounmap(volatile void __iomem *addr)
+void mmiotrace_iounmap(const volatile void __iomem *addr)
 {
        might_sleep();
        if (is_enabled()) /* recheck and proper locking in *_core() */
index 1a883705a12a8a12410914be93b2ee65807cc423..3f598b56daeaf8b9af6d14ce48b23b4b3e832fc7 100644 (file)
@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
        }
 }
 
-static int __init numa_register_memblks(struct numa_meminfo *mi)
+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
 {
        unsigned long uninitialized_var(pfn_align);
        int i, nid;
index 536ea2fb6e335677df559390520c3312976dcb81..f42c293c26bda959c2b67b326e7b7bcdff6fc766 100644 (file)
@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
         */
 #ifdef CONFIG_PCI_BIOS
        if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
-               pgprot_val(forbidden) |= _PAGE_NX;
+               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
 #endif
 
        /*
@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
         * Does not cover __inittext since that is gone later on. On
         * 64bit we do not enforce !NX on the low mapping
         */
-       if (within(address, (unsigned long)_text, (unsigned long)_etext))
-               pgprot_val(forbidden) |= _PAGE_NX;
+       if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
+               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
 
+#ifdef CONFIG_DEBUG_RODATA
        /*
         * The .rodata section needs to be read-only. Using the pfn
         * catches all aliases.
@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
        if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
                   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
                pgprot_val(forbidden) |= _PAGE_RW;
+#endif
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
        /*
@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
        }
 #endif
 
+#ifdef CONFIG_PAX_KERNEXEC
+       if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
+               pgprot_val(forbidden) |= _PAGE_RW;
+               pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+       }
+#endif
+
        prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
 
        return prot;
@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
 {
        /* change init_mm */
+       pax_open_kernel();
        set_pte_atomic(kpte, pte);
+
 #ifdef CONFIG_X86_32
        if (!SHARED_KERNEL_PMD) {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               unsigned long cpu;
+#else
                struct page *page;
+#endif
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+               for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+                       pgd_t *pgd = get_cpu_pgd(cpu, kernel);
+#else
                list_for_each_entry(page, &pgd_list, lru) {
-                       pgd_t *pgd;
+                       pgd_t *pgd = (pgd_t *)page_address(page);
+#endif
+
                        pud_t *pud;
                        pmd_t *pmd;
 
-                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       pgd += pgd_index(address);
                        pud = pud_offset(pgd, address);
                        pmd = pmd_offset(pud, address);
                        set_pte_atomic((pte_t *)pmd, pte);
                }
        }
 #endif
+       pax_close_kernel();
 }
 
 static int
index 7ac68698406c3b35e5ce0b0e98c73c5441e869a3..c0ba541104ee32989c94f2e48b7ed0fd1fb344ed 100644 (file)
@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
        unsigned long pg_flags = pg->flags & _PGMT_MASK;
 
        if (pg_flags == _PGMT_DEFAULT)
-               return -1;
+               return _PAGE_CACHE_MODE_NUM;
        else if (pg_flags == _PGMT_WC)
                return _PAGE_CACHE_MODE_WC;
        else if (pg_flags == _PGMT_UC_MINUS)
@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
 
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
-               if (type != -1) {
+               if (type != _PAGE_CACHE_MODE_NUM) {
                        pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
                                start, end - 1, type, req_type);
                        if (new_type)
@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
 
        if (!entry) {
                printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
-                      current->comm, current->pid, start, end - 1);
+                       current->comm, task_pid_nr(current), start, end - 1);
                return -EINVAL;
        }
 
@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
                page = pfn_to_page(paddr >> PAGE_SHIFT);
                rettype = get_page_memtype(page);
                /*
-                * -1 from get_page_memtype() implies RAM page is in its
+                * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
                 * default state and not reserved, and hence of type WB
                 */
-               if (rettype == -1)
+               if (rettype == _PAGE_CACHE_MODE_NUM)
                        rettype = _PAGE_CACHE_MODE_WB;
 
                return rettype;
@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
-                               current->comm, from, to - 1);
+                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
+                               current->comm, from, to - 1, cursor);
                        return 0;
                }
                cursor += PAGE_SIZE;
@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
                printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
                        "for [mem %#010Lx-%#010Lx]\n",
-                       current->comm, current->pid,
+                       current->comm, task_pid_nr(current),
                        cattr_name(pcm),
                        base, (unsigned long long)(base + size-1));
                return -EINVAL;
@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                pcm = lookup_memtype(paddr);
                if (want_pcm != pcm) {
                        printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
-                               current->comm, current->pid,
+                               current->comm, task_pid_nr(current),
                                cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
                                " for [mem %#010Lx-%#010Lx], got %s\n",
-                               current->comm, current->pid,
+                               current->comm, task_pid_nr(current),
                                cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
index 6582adcc8bd935b96df0c428308255fa262710ea..fcc5d0bec53400e60df041e8978a236f5fb3c61e 100644 (file)
@@ -161,7 +161,7 @@ success:
 
 failure:
        printk(KERN_INFO "%s:%d conflicting memory types "
-               "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+               "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
                end, cattr_name(found_type), cattr_name(match->type));
        return -EBUSY;
 }
index 9f0614daea85fecfd62f7d8e35ebc68aaa3cedad..92ae64a88614f1f38e96298bd54f0efced490ee7 100644 (file)
@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
        int i;
        enum reason_type rv = OTHERS;
 
-       p = (unsigned char *)ins_addr;
+       p = (unsigned char *)ktla_ktva(ins_addr);
        p += skip_prefix(p, &prf);
        p += get_opcode(p, &opcode);
 
@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
        struct prefix_bits prf;
        int i;
 
-       p = (unsigned char *)ins_addr;
+       p = (unsigned char *)ktla_ktva(ins_addr);
        p += skip_prefix(p, &prf);
        p += get_opcode(p, &opcode);
 
@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
        struct prefix_bits prf;
        int i;
 
-       p = (unsigned char *)ins_addr;
+       p = (unsigned char *)ktla_ktva(ins_addr);
        p += skip_prefix(p, &prf);
        p += get_opcode(p, &opcode);
 
@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
        struct prefix_bits prf;
        int i;
 
-       p = (unsigned char *)ins_addr;
+       p = (unsigned char *)ktla_ktva(ins_addr);
        p += skip_prefix(p, &prf);
        p += get_opcode(p, &opcode);
        for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
        struct prefix_bits prf;
        int i;
 
-       p = (unsigned char *)ins_addr;
+       p = (unsigned char *)ktla_ktva(ins_addr);
        p += skip_prefix(p, &prf);
        p += get_opcode(p, &opcode);
        for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
index 6fb6927f9e76f3dc0b868ceaeca05fe79f09e5a6..4fc13c094a6599092402c4c38212a604747d6a30 100644 (file)
@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
        list_del(&page->lru);
 }
 
-#define UNSHARED_PTRS_PER_PGD                          \
-       (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
 
+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
+{
+       unsigned int count = USER_PGD_PTRS;
+
+       if (!pax_user_shadow_base)
+               return;
+
+       while (count--)
+               *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
+}
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
+{
+       unsigned int count = USER_PGD_PTRS;
+
+       while (count--) {
+               pgd_t pgd;
+
+#ifdef CONFIG_X86_64
+               pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
+#else
+               pgd = *src++;
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+               pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
+#endif
 
+               *dst++ = pgd;
+       }
+
+}
+#endif
+
+#ifdef CONFIG_X86_64
+#define pxd_t                          pud_t
+#define pyd_t                          pgd_t
+#define paravirt_release_pxd(pfn)      paravirt_release_pud(pfn)
+#define pgtable_pxd_page_ctor(page)    true
+#define pgtable_pxd_page_dtor(page)
+#define pxd_free(mm, pud)              pud_free((mm), (pud))
+#define pyd_populate(mm, pgd, pud)     pgd_populate((mm), (pgd), (pud))
+#define pyd_offset(mm, address)                pgd_offset((mm), (address))
+#define PYD_SIZE                       PGDIR_SIZE
+#else
+#define pxd_t                          pmd_t
+#define pyd_t                          pud_t
+#define paravirt_release_pxd(pfn)      paravirt_release_pmd(pfn)
+#define pgtable_pxd_page_ctor(page)    pgtable_pmd_page_ctor(page)
+#define pgtable_pxd_page_dtor(page)    pgtable_pmd_page_dtor(page)
+#define pxd_free(mm, pud)              pmd_free((mm), (pud))
+#define pyd_populate(mm, pgd, pud)     pud_populate((mm), (pgd), (pud))
+#define pyd_offset(mm, address)                pud_offset((mm), (address))
+#define PYD_SIZE                       PUD_SIZE
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
+static inline void pgd_dtor(pgd_t *pgd) {}
+#else
 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
 {
        BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
        pgd_list_del(pgd);
        spin_unlock(&pgd_lock);
 }
+#endif
 
 /*
  * List of all pgd's needed for non-PAE so it can invalidate entries
@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
  * -- nyc
  */
 
-#ifdef CONFIG_X86_PAE
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
 /*
  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
  * updating the top-level pagetable entries to guarantee the
@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
  * and initialize the kernel pmds here.
  */
-#define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
+#define PREALLOCATED_PXDS      (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 
 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 {
@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
         */
        flush_tlb_mm(mm);
 }
+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
+#define PREALLOCATED_PXDS      USER_PGD_PTRS
 #else  /* !CONFIG_X86_PAE */
 
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
-#define PREALLOCATED_PMDS      0
+#define PREALLOCATED_PXDS      0
 
 #endif /* CONFIG_X86_PAE */
 
-static void free_pmds(pmd_t *pmds[])
+static void free_pxds(pxd_t *pxds[])
 {
        int i;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++)
-               if (pmds[i]) {
-                       pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
-                       free_page((unsigned long)pmds[i]);
+       for(i = 0; i < PREALLOCATED_PXDS; i++)
+               if (pxds[i]) {
+                       pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
+                       free_page((unsigned long)pxds[i]);
                }
 }
 
-static int preallocate_pmds(pmd_t *pmds[])
+static int preallocate_pxds(pxd_t *pxds[])
 {
        int i;
        bool failed = false;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++) {
-               pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
-               if (!pmd)
+       for(i = 0; i < PREALLOCATED_PXDS; i++) {
+               pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
+               if (!pxd)
                        failed = true;
-               if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
-                       free_page((unsigned long)pmd);
-                       pmd = NULL;
+               if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
+                       free_page((unsigned long)pxd);
+                       pxd = NULL;
                        failed = true;
                }
-               pmds[i] = pmd;
+               pxds[i] = pxd;
        }
 
        if (failed) {
-               free_pmds(pmds);
+               free_pxds(pxds);
                return -ENOMEM;
        }
 
@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
  * preallocate which never got a corresponding vma will need to be
  * freed manually.
  */
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
 {
        int i;
 
-       for(i = 0; i < PREALLOCATED_PMDS; i++) {
+       for(i = 0; i < PREALLOCATED_PXDS; i++) {
                pgd_t pgd = pgdp[i];
 
                if (pgd_val(pgd) != 0) {
-                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+                       pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
 
-                       pgdp[i] = native_make_pgd(0);
+                       set_pgd(pgdp + i, native_make_pgd(0));
 
-                       paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
-                       pmd_free(mm, pmd);
+                       paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
+                       pxd_free(mm, pxd);
                }
        }
 }
 
-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
 {
-       pud_t *pud;
+       pyd_t *pyd;
        int i;
 
-       if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
+       if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
                return;
 
-       pud = pud_offset(pgd, 0);
-
-       for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
-               pmd_t *pmd = pmds[i];
+#ifdef CONFIG_X86_64
+       pyd = pyd_offset(mm, 0L);
+#else
+       pyd = pyd_offset(pgd, 0L);
+#endif
 
+       for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
+               pxd_t *pxd = pxds[i];
                if (i >= KERNEL_PGD_BOUNDARY)
-                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
-                              sizeof(pmd_t) * PTRS_PER_PMD);
+                       memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+                              sizeof(pxd_t) * PTRS_PER_PMD);
 
-               pud_populate(mm, pud, pmd);
+               pyd_populate(mm, pyd, pxd);
        }
 }
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
-       pmd_t *pmds[PREALLOCATED_PMDS];
+       pxd_t *pxds[PREALLOCATED_PXDS];
 
        pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
        mm->pgd = pgd;
 
-       if (preallocate_pmds(pmds) != 0)
+       if (preallocate_pxds(pxds) != 0)
                goto out_free_pgd;
 
        if (paravirt_pgd_alloc(mm) != 0)
-               goto out_free_pmds;
+               goto out_free_pxds;
 
        /*
         * Make sure that pre-populating the pmds is atomic with
@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        spin_lock(&pgd_lock);
 
        pgd_ctor(mm, pgd);
-       pgd_prepopulate_pmd(mm, pgd, pmds);
+       pgd_prepopulate_pxd(mm, pgd, pxds);
 
        spin_unlock(&pgd_lock);
 
        return pgd;
 
-out_free_pmds:
-       free_pmds(pmds);
+out_free_pxds:
+       free_pxds(pxds);
 out_free_pgd:
        free_page((unsigned long)pgd);
 out:
@@ -313,7 +380,7 @@ out:
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       pgd_mop_up_pmds(mm, pgd);
+       pgd_mop_up_pxds(mm, pgd);
        pgd_dtor(pgd);
        paravirt_pgd_free(mm, pgd);
        free_page((unsigned long)pgd);
index 75cc0978d45d7d7acc43d65615b766c78df9ab1a..79a097f6ff064ef9bad5931e8e879488ed95492a 100644 (file)
@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
                return;
        }
        pte = pte_offset_kernel(pmd, vaddr);
+
+       pax_open_kernel();
        if (pte_val(pteval))
                set_pte_at(&init_mm, vaddr, pte, pteval);
        else
                pte_clear(&init_mm, vaddr, pte);
+       pax_close_kernel();
 
        /*
         * It's enough to flush this one mapping.
index e666cbbb926191e18995e0cc30c955a726bf462b..61788c455c5ab90065958bdd83b8f5ade3543df0 100644 (file)
@@ -10,7 +10,7 @@
 #ifdef CONFIG_X86_64
 
 #ifdef CONFIG_DEBUG_VIRTUAL
-unsigned long __phys_addr(unsigned long x)
+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
 {
        unsigned long y = x - __START_KERNEL_map;
 
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
 #else
 
 #ifdef CONFIG_DEBUG_VIRTUAL
-unsigned long __phys_addr(unsigned long x)
+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
 {
        unsigned long phys_addr = x - PAGE_OFFSET;
        /* VMALLOC_* aren't constants  */
index 90555bf60aa45dce625c0171e4783e6014529490..f5f1828c19eca76ce93f29eb9eff135bcffb2cf2 100644 (file)
@@ -5,8 +5,10 @@
 #include <asm/pgtable.h>
 #include <asm/proto.h>
 
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 static int disable_nx;
 
+#ifndef CONFIG_PAX_PAGEEXEC
 /*
  * noexec = on|off
  *
@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
        return 0;
 }
 early_param("noexec", noexec_setup);
+#endif
+
+#endif
 
 void x86_configure_nx(void)
 {
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
        if (cpu_has_nx && !disable_nx)
                __supported_pte_mask |= _PAGE_NX;
        else
+#endif
                __supported_pte_mask &= ~_PAGE_NX;
 }
 
index ee61c36d64f84dd944873ec0acf80b4f8ad06da7..e6fedeb838c1c332af385a8e2e8f58d7dc82ec11 100644 (file)
@@ -48,7 +48,11 @@ void leave_mm(int cpu)
                BUG();
        if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
                cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
                load_cr3(swapper_pg_dir);
+#endif
+
                /*
                 * This gets called in the idle path where RCU
                 * functions differently.  Tracing normally
index 6440221ced0d4925d3fee4a0c11b424a6b696f2d..f84b5c7ddedcc60181252086b6b8b5169b9e13a6 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 /*
  * Calling convention :
@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
        jle     bpf_slow_path_word
        mov     (SKBDATA,%rsi),%eax
        bswap   %eax                    /* ntohl() */
+       pax_force_retaddr
        ret
 
 sk_load_half:
@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
        jle     bpf_slow_path_half
        movzwl  (SKBDATA,%rsi),%eax
        rol     $8,%ax                  # ntohs()
+       pax_force_retaddr
        ret
 
 sk_load_byte:
@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
        cmp     %esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
        jle     bpf_slow_path_byte
        movzbl  (SKBDATA,%rsi),%eax
+       pax_force_retaddr
        ret
 
 /* rsi contains offset and can be scratched */
@@ -90,6 +94,7 @@ bpf_slow_path_word:
        js      bpf_error
        mov     - MAX_BPF_STACK + 32(%rbp),%eax
        bswap   %eax
+       pax_force_retaddr
        ret
 
 bpf_slow_path_half:
@@ -98,12 +103,14 @@ bpf_slow_path_half:
        mov     - MAX_BPF_STACK + 32(%rbp),%ax
        rol     $8,%ax
        movzwl  %ax,%eax
+       pax_force_retaddr
        ret
 
 bpf_slow_path_byte:
        bpf_slow_path_common(1)
        js      bpf_error
        movzbl  - MAX_BPF_STACK + 32(%rbp),%eax
+       pax_force_retaddr
        ret
 
 #define sk_negative_common(SIZE)                               \
@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
        sk_negative_common(4)
        mov     (%rax), %eax
        bswap   %eax
+       pax_force_retaddr
        ret
 
 bpf_slow_path_half_neg:
@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
        mov     (%rax),%ax
        rol     $8,%ax
        movzwl  %ax,%eax
+       pax_force_retaddr
        ret
 
 bpf_slow_path_byte_neg:
@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
        .globl  sk_load_byte_negative_offset
        sk_negative_common(1)
        movzbl  (%rax), %eax
+       pax_force_retaddr
        ret
 
 bpf_error:
@@ -156,4 +166,5 @@ bpf_error:
        mov     - MAX_BPF_STACK + 16(%rbp),%r14
        mov     - MAX_BPF_STACK + 24(%rbp),%r15
        leaveq
+       pax_force_retaddr
        ret
index 987514396c1e443376bffe70e050b72f86509102..00f665656d886f5a41127a8cd5bf17e882598579 100644 (file)
 #include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
 
+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
+int bpf_jit_enable __read_only;
+#else
 int bpf_jit_enable __read_mostly;
+#endif
 
 /*
  * assembly code in arch/x86/net/bpf_jit.S
@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 static void jit_fill_hole(void *area, unsigned int size)
 {
        /* fill whole space with int3 instructions */
+       pax_open_kernel();
        memset(area, 0xcc, size);
+       pax_close_kernel();
 }
 
 struct jit_context {
@@ -896,7 +902,9 @@ common_load:
                                pr_err("bpf_jit_compile fatal error\n");
                                return -EFAULT;
                        }
+                       pax_open_kernel();
                        memcpy(image + proglen, temp, ilen);
+                       pax_close_kernel();
                }
                proglen += ilen;
                addrs[i] = proglen;
@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
 
        if (image) {
                bpf_flush_icache(header, image + proglen);
-               set_memory_ro((unsigned long)header, header->pages);
                prog->bpf_func = (void *)image;
                prog->jited = true;
        }
@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
        struct bpf_binary_header *header = (void *)addr;
 
-       if (!fp->jited)
-               goto free_filter;
+       if (fp->jited)
+               bpf_jit_binary_free(header);
 
-       set_memory_rw(addr, header->pages);
-       bpf_jit_binary_free(header);
-
-free_filter:
        bpf_prog_unlock_free(fp);
 }
index 5d04be5efb6401b1b7512f4ca519a24a016b20d2..2beeaa2d2047e8264f104338c62ba40214b89f6e 100644 (file)
@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
        struct stack_frame_ia32 *fp;
        unsigned long bytes;
 
-       bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+       bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
        if (bytes != 0)
                return NULL;
 
-       fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
+       fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
 
        oprofile_add_trace(bufhead[0].return_address);
 
@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
        struct stack_frame bufhead[2];
        unsigned long bytes;
 
-       bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+       bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
        if (bytes != 0)
                return NULL;
 
@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
        struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
 
-       if (!user_mode_vm(regs)) {
+       if (!user_mode(regs)) {
                unsigned long stack = kernel_stack_pointer(regs);
                if (depth)
                        dump_trace(NULL, regs, (unsigned long *)stack, 0,
index 1d2e6392f5faaf6f450cdfa598b9b2a734064962..f6ef82a1ef7187c7a7c1450f25257c7bff46441e 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/nmi.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
+#include <asm/pgtable.h>
 
 #include "op_counter.h"
 #include "op_x86_model.h"
@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        if (ret)
                return ret;
 
-       if (!model->num_virt_counters)
-               model->num_virt_counters = model->num_counters;
+       if (!model->num_virt_counters) {
+               pax_open_kernel();
+               *(unsigned int *)&model->num_virt_counters = model->num_counters;
+               pax_close_kernel();
+       }
 
        mux_init(ops);
 
index 50d86c0e9ba4973b707de5917cfae78692fd6f68..79853183e6ab386510ef0848ce35b388fb776153 100644 (file)
@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
                num_counters = AMD64_NUM_COUNTERS;
        }
 
-       op_amd_spec.num_counters = num_counters;
-       op_amd_spec.num_controls = num_counters;
-       op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
+       pax_open_kernel();
+       *(unsigned int *)&op_amd_spec.num_counters = num_counters;
+       *(unsigned int *)&op_amd_spec.num_controls = num_counters;
+       *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
+       pax_close_kernel();
 
        return 0;
 }
index d90528ea541206b57f3048e191d4340ee070b40b..0127e2b6584d1258e91db48ba4bcc0b78c9722d7 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
+#include <asm/pgtable.h>
 
 #include "op_x86_model.h"
 #include "op_counter.h"
@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
 
        num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
 
-       op_arch_perfmon_spec.num_counters = num_counters;
-       op_arch_perfmon_spec.num_controls = num_counters;
+       pax_open_kernel();
+       *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
+       *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
+       pax_close_kernel();
 }
 
 static int arch_perfmon_init(struct oprofile_operations *ignore)
index 71e8a67337e23bfa0675a0c86aa768dea588baf1..6a313bb0bd1fcff28aff41804a5fc6e75c42824a 100644 (file)
@@ -52,7 +52,7 @@ struct op_x86_model_spec {
        void            (*switch_ctrl)(struct op_x86_model_spec const *model,
                                       struct op_msrs const * const msrs);
 #endif
-};
+} __do_const;
 
 struct op_counter_config;
 
index 44b9271580b5b0532bddf121af554cc0ec951779..4c5a988cda9e4a0de77f4b1877eec36c451e2d8f 100644 (file)
@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
        pci_mmcfg_late_init();
        pcibios_enable_irq = intel_mid_pci_irq_enable;
        pcibios_disable_irq = intel_mid_pci_irq_disable;
-       pci_root_ops = intel_mid_pci_ops;
+       memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
        pci_soc_mode = 1;
        /* Continue with standard init */
        return 1;
index 5dc6ca5e174131d2c7208ea1ed86739ef4532d22..25c03f559f1fe5e916549cc853640c3bb0dae5bd 100644 (file)
@@ -51,7 +51,7 @@ struct irq_router {
 struct irq_router_handler {
        u16 vendor;
        int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-};
+} __do_const;
 
 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
        return 0;
 }
 
-static __initdata struct irq_router_handler pirq_routers[] = {
+static __initconst const struct irq_router_handler pirq_routers[] = {
        { PCI_VENDOR_ID_INTEL, intel_router_probe },
        { PCI_VENDOR_ID_AL, ali_router_probe },
        { PCI_VENDOR_ID_ITE, ite_router_probe },
@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
 static void __init pirq_find_router(struct irq_router *r)
 {
        struct irq_routing_table *rt = pirq_table;
-       struct irq_router_handler *h;
+       const struct irq_router_handler *h;
 
 #ifdef CONFIG_PCI_BIOS
        if (!rt->signature) {
@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
        return 0;
 }
 
-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
        {
                .callback = fix_broken_hp_bios_irq9,
                .ident = "HP Pavilion N5400 Series Laptop",
index 9b83b9051ae7bb78daf009e1d6c95927c74f2877..4112152d3c5286c0c97db977c85049d506f47565 100644 (file)
@@ -79,7 +79,7 @@ union bios32 {
 static struct {
        unsigned long address;
        unsigned short segment;
-} bios32_indirect __initdata = { 0, __KERNEL_CS };
+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
 
 /*
  * Returns the entry point for the given service, NULL on error
@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
        unsigned long length;           /* %ecx */
        unsigned long entry;            /* %edx */
        unsigned long flags;
+       struct desc_struct d, *gdt;
 
        local_irq_save(flags);
-       __asm__("lcall *(%%edi); cld"
+
+       gdt = get_cpu_gdt_table(smp_processor_id());
+
+       pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
+       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
+       pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
+       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
+
+       __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
                : "=a" (return_code),
                  "=b" (address),
                  "=c" (length),
                  "=d" (entry)
                : "0" (service),
                  "1" (0),
-                 "D" (&bios32_indirect));
+                 "D" (&bios32_indirect),
+                 "r"(__PCIBIOS_DS)
+               : "memory");
+
+       pax_open_kernel();
+       gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
+       gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
+       gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
+       gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
+       pax_close_kernel();
+
        local_irq_restore(flags);
 
        switch (return_code) {
-               case 0:
-                       return address + entry;
-               case 0x80:      /* Not present */
-                       printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
-                       return 0;
-               default: /* Shouldn't happen */
-                       printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
-                               service, return_code);
+       case 0: {
+               int cpu;
+               unsigned char flags;
+
+               printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
+               if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
+                       printk(KERN_WARNING "bios32_service: not valid\n");
                        return 0;
+               }
+               address = address + PAGE_OFFSET;
+               length += 16UL; /* some BIOSs underreport this... */
+               flags = 4;
+               if (length >= 64*1024*1024) {
+                       length >>= PAGE_SHIFT;
+                       flags |= 8;
+               }
+
+               for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+                       gdt = get_cpu_gdt_table(cpu);
+                       pack_descriptor(&d, address, length, 0x9b, flags);
+                       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
+                       pack_descriptor(&d, address, length, 0x93, flags);
+                       write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
+               }
+               return entry;
+       }
+       case 0x80:      /* Not present */
+               printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+               return 0;
+       default: /* Shouldn't happen */
+               printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+                       service, return_code);
+               return 0;
        }
 }
 
 static struct {
        unsigned long address;
        unsigned short segment;
-} pci_indirect = { 0, __KERNEL_CS };
+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
 
-static int pci_bios_present;
+static int pci_bios_present __read_only;
 
 static int __init check_pcibios(void)
 {
@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
        unsigned long flags, pcibios_entry;
 
        if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
-               pci_indirect.address = pcibios_entry + PAGE_OFFSET;
+               pci_indirect.address = pcibios_entry;
 
                local_irq_save(flags);
-               __asm__(
-                       "lcall *(%%edi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%edi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
                          "=b" (ebx),
                          "=c" (ecx)
                        : "1" (PCIBIOS_PCI_BIOS_PRESENT),
-                         "D" (&pci_indirect)
+                         "D" (&pci_indirect),
+                         "r" (__PCIBIOS_DS)
                        : "memory");
                local_irq_restore(flags);
 
@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
 
        switch (len) {
        case 1:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
                        : "1" (PCIBIOS_READ_CONFIG_BYTE),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                /*
                 * Zero-extend the result beyond 8 bits, do not trust the
                 * BIOS having done it:
@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
                *value &= 0xff;
                break;
        case 2:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
                        : "1" (PCIBIOS_READ_CONFIG_WORD),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                /*
                 * Zero-extend the result beyond 16 bits, do not trust the
                 * BIOS having done it:
@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
                *value &= 0xffff;
                break;
        case 4:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
                        : "1" (PCIBIOS_READ_CONFIG_DWORD),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                break;
        }
 
@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
 
        switch (len) {
        case 1:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
                          "c" (value),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                break;
        case 2:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
                          "c" (value),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                break;
        case 4:
-               __asm__("lcall *(%%esi); cld\n\t"
+               __asm__("movw %w6, %%ds\n\t"
+                       "lcall *%%ss:(%%esi); cld\n\t"
+                       "push %%ss\n\t"
+                       "pop %%ds\n\t"
                        "jc 1f\n\t"
                        "xor %%ah, %%ah\n"
                        "1:"
@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
                          "c" (value),
                          "b" (bx),
                          "D" ((long)reg),
-                         "S" (&pci_indirect));
+                         "S" (&pci_indirect),
+                         "r" (__PCIBIOS_DS));
                break;
        }
 
@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
 
        DBG("PCI: Fetching IRQ routing table... ");
        __asm__("push %%es\n\t"
+               "movw %w8, %%ds\n\t"
                "push %%ds\n\t"
                "pop  %%es\n\t"
-               "lcall *(%%esi); cld\n\t"
+               "lcall *%%ss:(%%esi); cld\n\t"
                "pop %%es\n\t"
+               "push %%ss\n\t"
+               "pop %%ds\n"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
                  "1" (0),
                  "D" ((long) &opt),
                  "S" (&pci_indirect),
-                 "m" (opt)
+                 "m" (opt),
+                 "r" (__PCIBIOS_DS)
                : "memory");
        DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
        if (ret & 0xff00)
@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
 {
        int ret;
 
-       __asm__("lcall *(%%esi); cld\n\t"
+       __asm__("movw %w5, %%ds\n\t"
+               "lcall *%%ss:(%%esi); cld\n\t"
+               "push %%ss\n\t"
+               "pop %%ds\n"
                "jc 1f\n\t"
                "xor %%ah, %%ah\n"
                "1:"
@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
                : "0" (PCIBIOS_SET_PCI_HW_INT),
                  "b" ((dev->bus->number << 8) | dev->devfn),
                  "c" ((irq << 8) | (pin + 10)),
-                 "S" (&pci_indirect));
+                 "S" (&pci_indirect),
+                 "r" (__PCIBIOS_DS));
        return !(ret & 0xff00);
 }
 EXPORT_SYMBOL(pcibios_set_irq_routing);
index 40e7cda529365133b50bab8745d9995c47557d53..c7e6672fd4bbf5b67e90b0dc306d9f7b086bef89 100644 (file)
@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
 {
        struct desc_ptr gdt_descr;
 
+#ifdef CONFIG_PAX_KERNEXEC
+       struct desc_struct d;
+#endif
+
        local_irq_save(efi_rt_eflags);
 
        load_cr3(initial_page_table);
        __flush_tlb_all();
 
+#ifdef CONFIG_PAX_KERNEXEC
+       pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
+       write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
+       pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
+       write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
+#endif
+
        gdt_descr.address = __pa(get_cpu_gdt_table(0));
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
 {
        struct desc_ptr gdt_descr;
 
+#ifdef CONFIG_PAX_KERNEXEC
+       struct desc_struct d;
+
+       memset(&d, 0, sizeof d);
+       write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
+       write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
+#endif
+
        gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+#else
        load_cr3(swapper_pg_dir);
+#endif
+
        __flush_tlb_all();
 
        local_irq_restore(efi_rt_eflags);
index 17e80d829df0391536ea49379108d1383a39c46f..9fa6e41d0fb84a7d44018ef150072045f8ffcf91 100644 (file)
@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
                vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
        }
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       load_cr3(swapper_pg_dir);
+#endif
+
        __flush_tlb_all();
 }
 
@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
        for (pgd = 0; pgd < n_pgds; pgd++)
                set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
        kfree(save_pgd);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+#endif
+
        __flush_tlb_all();
        local_irq_restore(efi_flags);
        early_code_mapping_set_exec(0);
@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        unsigned npages;
        pgd_t *pgd;
 
-       if (efi_enabled(EFI_OLD_MEMMAP))
+       if (efi_enabled(EFI_OLD_MEMMAP)) {
+               /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
+                * able to execute the EFI services.
+                */
+               if (__supported_pte_mask & _PAGE_NX) {
+                       unsigned long addr = (unsigned long) __va(0);
+                       pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) &  ~_PAGE_NX);
+
+                       pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
+#ifdef CONFIG_PAX_PER_CPU_PGD
+                       set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
+#endif
+                       set_pgd(pgd_offset_k(addr), pe);
+               }
+
                return 0;
+       }
 
        efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
        pgd = __va(efi_scratch.efi_pgt);
index 040192b50d0209b46b64e2607b0e41fe2bb0123b..7d3300f766a12c9f4d437059c043ce3927668a6b 100644 (file)
@@ -6,7 +6,9 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/init.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
 /*
  * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -20,7 +22,7 @@
  * service functions will comply with gcc calling convention, too.
  */
 
-.text
+__INIT
 ENTRY(efi_call_phys)
        /*
         * 0. The function can only be called in Linux kernel. So CS has been
@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
         * The mapping of lower virtual memory has been created in prolog and
         * epilog.
         */
-       movl    $1f, %edx
-       subl    $__PAGE_OFFSET, %edx
-       jmp     *%edx
+#ifdef CONFIG_PAX_KERNEXEC
+       movl    $(__KERNEXEC_EFI_DS), %edx
+       mov     %edx, %ds
+       mov     %edx, %es
+       mov     %edx, %ss
+       addl    $2f,(1f)
+       ljmp    *(1f)
+
+__INITDATA
+1:     .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
+.previous
+
+2:
+       subl    $2b,(1b)
+#else
+       jmp     1f-__PAGE_OFFSET
 1:
+#endif
 
        /*
         * 2. Now on the top of stack is the return
@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
         * parameter 2, ..., param n. To make things easy, we save the return
         * address of efi_call_phys in a global variable.
         */
-       popl    %edx
-       movl    %edx, saved_return_addr
-       /* get the function pointer into ECX*/
-       popl    %ecx
-       movl    %ecx, efi_rt_function_ptr
-       movl    $2f, %edx
-       subl    $__PAGE_OFFSET, %edx
-       pushl   %edx
+       popl    (saved_return_addr)
+       popl    (efi_rt_function_ptr)
 
        /*
         * 3. Clear PG bit in %CR0.
@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
        /*
         * 5. Call the physical function.
         */
-       jmp     *%ecx
+       call    *(efi_rt_function_ptr-__PAGE_OFFSET)
 
-2:
        /*
         * 6. After EFI runtime service returns, control will return to
         * following instruction. We'd better readjust stack pointer first.
@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
        movl    %cr0, %edx
        orl     $0x80000000, %edx
        movl    %edx, %cr0
-       jmp     1f
-1:
+
        /*
         * 8. Now restore the virtual mode from flat mode by
         * adding EIP with PAGE_OFFSET.
         */
-       movl    $1f, %edx
-       jmp     *%edx
+#ifdef CONFIG_PAX_KERNEXEC
+       movl    $(__KERNEL_DS), %edx
+       mov     %edx, %ds
+       mov     %edx, %es
+       mov     %edx, %ss
+       ljmp    $(__KERNEL_CS),$1f
+#else
+       jmp     1f+__PAGE_OFFSET
+#endif
 1:
 
        /*
         * 9. Balance the stack. And because EAX contain the return value,
         * we'd better not clobber it.
         */
-       leal    efi_rt_function_ptr, %edx
-       movl    (%edx), %ecx
-       pushl   %ecx
+       pushl   (efi_rt_function_ptr)
 
        /*
-        * 10. Push the saved return address onto the stack and return.
+        * 10. Return to the saved return address.
         */
-       leal    saved_return_addr, %edx
-       movl    (%edx), %ecx
-       pushl   %ecx
-       ret
+       jmpl    *(saved_return_addr)
 ENDPROC(efi_call_phys)
 .previous
 
-.data
+__INITDATA
 saved_return_addr:
        .long 0
 efi_rt_function_ptr:
index 86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9..6d499f49074c2f6dc6245a06b827a77cd0f02c5b 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/msr.h>
 #include <asm/processor-flags.h>
 #include <asm/page_types.h>
+#include <asm/alternative-asm.h>
 
 #define SAVE_XMM                       \
        mov %rsp, %rax;                 \
@@ -88,6 +89,7 @@ ENTRY(efi_call)
        RESTORE_PGT
        addq $48, %rsp
        RESTORE_XMM
+       pax_force_retaddr 0, 1
        ret
 ENDPROC(efi_call)
 
index 1bbedc4b0f88d46bee5000779c4ef5aa4e4d0411..eb795b51b9db05e6f153c95b48a4e9624ca6ff5b 100644 (file)
@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
 {
 };
 
-static void intel_mid_reboot(void)
+static void __noreturn intel_mid_reboot(void)
 {
        intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+       BUG();
 }
 
 static unsigned long __init intel_mid_calibrate_tsc(void)
index 3c1c3866d82b683334951d9151dcbfd522189388..59a68ed4cabc965751c00a45a621cb030c93991b 100644 (file)
@@ -13,6 +13,6 @@
 /* For every CPU addition a new get_<cpuname>_ops interface needs
  * to be added.
  */
-extern void *get_penwell_ops(void);
-extern void *get_cloverview_ops(void);
-extern void *get_tangier_ops(void);
+extern const void *get_penwell_ops(void);
+extern const void *get_cloverview_ops(void);
+extern const void *get_tangier_ops(void);
index 23381d2174ae1d4d2795ebf9a568ff7e43cf8876..8ddc10ee292ec7cb35392af2bbbe4e7cf6e415ad 100644 (file)
@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
        pm_power_off = mfld_power_off;
 }
 
-void *get_penwell_ops(void)
+const void *get_penwell_ops(void)
 {
        return &penwell_ops;
 }
 
-void *get_cloverview_ops(void)
+const void *get_cloverview_ops(void)
 {
        return &penwell_ops;
 }
index aaca91753d3267b94a98af40f7b66f8b5cbbb21c..66eadbc09ce1db7f3dd221aec84c6a10e738a8e3 100644 (file)
@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
        .arch_setup = tangier_arch_setup,
 };
 
-void *get_tangier_ops(void)
+const void *get_tangier_ops(void)
 {
        return &tangier_ops;
 }
index d6ee9298692006115edb0c8e7124e7e24d4f9edc..3637cb584c4a50a0cb51ff1416587ae685151499 100644 (file)
@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
        return res;
 }
 
-static struct of_pdt_ops prom_olpc_ops __initdata = {
+static struct of_pdt_ops prom_olpc_ops __initconst = {
        .nextprop = olpc_dt_nextprop,
        .getproplen = olpc_dt_getproplen,
        .getproperty = olpc_dt_getproperty,
index 6ec7910f59bfe2e0b831eedd772b36a5c5cebc35..ecdbb113e3a079acaadc0bbd3d623758ab880493 100644 (file)
@@ -137,11 +137,8 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(init_tss, cpu);
-#ifdef CONFIG_X86_64
-       struct desc_struct *desc = get_cpu_gdt_table(cpu);
-       tss_desc tss;
-#endif
+       struct tss_struct *t = init_tss + cpu;
+
        set_tss_desc(cpu, t);   /*
                                 * This just modifies memory; should not be
                                 * necessary. But... This is necessary, because
@@ -150,10 +147,6 @@ static void fix_processor_context(void)
                                 */
 
 #ifdef CONFIG_X86_64
-       memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
-       tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
-       write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
-
        syscall_init();                         /* This sets MSR_*STAR and related */
 #endif
        load_TR_desc();                         /* This does ltr */
index bad628a620c4ef7647986bf22f63402b7d451be2..a102610e28eba80c66424e015850050e9d3645eb 100644 (file)
@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
                __va(real_mode_header->trampoline_header);
 
 #ifdef CONFIG_X86_32
-       trampoline_header->start = __pa_symbol(startup_32_smp);
+       trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
+
+#ifdef CONFIG_PAX_KERNEXEC
+       trampoline_header->start -= LOAD_PHYSICAL_ADDR;
+#endif
+
+       trampoline_header->boot_cs = __BOOT_CS;
        trampoline_header->gdt_limit = __BOOT_DS + 7;
        trampoline_header->gdt_base = __pa_symbol(boot_gdt);
 #else
@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
        *trampoline_cr4_features = read_cr4();
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
-       trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+       trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
        trampoline_pgd[511] = init_level4_pgt[511].pgd;
 #endif
 }
index 7c0d7be176a5843932cb3d8b84ebae04e5ac4827..d24dc887f8b245293c5eb844385305a5151053d5 100644 (file)
@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
 
 KBUILD_CFLAGS  := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
                   -I$(srctree)/arch/x86/boot
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS  += -fplugin-arg-constify_plugin-no-constify
+endif
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
index a28221d94e69eb60fc18aab9c5997ed52a7a763f..93c40f1b8fdc96bfd08a73813a83bf980327e280 100644 (file)
@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
 #endif
        /* APM/BIOS reboot */
        .long   pa_machine_real_restart_asm
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_32
+       .long   __KERNEL_CS
+#else
        .long   __KERNEL32_CS
 #endif
 END(real_mode_header)
index 48ddd76bc4c3d202fe77740bda68a91edd8b1ee2..c26749f66f4c78242c841c7f569b691cc11082c1 100644 (file)
 #include <asm/page_types.h>
 #include "realmode.h"
 
+#ifdef CONFIG_PAX_KERNEXEC
+#define ta(X) (X)
+#else
+#define ta(X) (pa_ ## X)
+#endif
+
        .text
        .code16
 
@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
 
        cli                     # We should be safe anyway
 
-       movl    tr_start, %eax  # where we need to go
-
        movl    $0xA5A5A5A5, trampoline_status
                                # write marker for master knows we're running
 
@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
        movw    $1, %dx                 # protected mode (PE) bit
        lmsw    %dx                     # into protected mode
 
-       ljmpl   $__BOOT_CS, $pa_startup_32
+       ljmpl *(trampoline_header)
 
        .section ".text32","ax"
        .code32
@@ -66,7 +70,7 @@ ENTRY(startup_32)                     # note: also used from wakeup_asm.S
        .balign 8
 GLOBAL(trampoline_header)
        tr_start:               .space  4
-       tr_gdt_pad:             .space  2
+       tr_boot_cs:             .space  2
        tr_gdt:                 .space  6
 END(trampoline_header)
        
index dac7b20d2f9de40f0244f623e8560c304394c178..72dbaca0d0da01c0e05db449e8f4aee6c555e053 100644 (file)
@@ -93,6 +93,7 @@ ENTRY(startup_32)
        movl    %edx, %gs
 
        movl    pa_tr_cr4, %eax
+       andl    $~X86_CR4_PCIDE, %eax
        movl    %eax, %cr4              # Enable PAE mode
 
        # Setup trampoline 4 level pagetables
@@ -106,7 +107,7 @@ ENTRY(startup_32)
        wrmsr
 
        # Enable paging and in turn activate Long Mode
-       movl    $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
+       movl    $(X86_CR0_PG | X86_CR0_PE), %eax
        movl    %eax, %cr0
 
        /*
index 9e7e14797a72dda0b85d4c70afca61fdb2fdf92e..25a4158e3ae37f92444c591b593014d19d1d22c0 100644 (file)
@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
        lgdtl   pmode_gdt
 
        /* This really couldn't... */
-       movl    pmode_entry, %eax
        movl    pmode_cr0, %ecx
        movl    %ecx, %cr0
-       ljmpl   $__KERNEL_CS, $pa_startup_32
-       /* -> jmp *%eax in trampoline_32.S */
+
+       ljmpl   *pmode_entry
 #else
        jmp     trampoline_start
 #endif
index 604a37efd4d5fd10ae54a1455a98e608a963f6d7..e49702a20f24c2dd9c14a3eb6a430e5dea89bdc1 100644 (file)
@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
 
 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
 
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
 hostprogs-y    += relocs
 relocs-objs     := relocs_32.o relocs_64.o relocs_common.o
 PHONY += relocs
index 0c2fae8d929df154ff0324994df5f5341f2d4c52..88036b7798a96bd2aedda9435fc6d55480c8a6c5 100644 (file)
@@ -1,5 +1,7 @@
 /* This is included from relocs_32/64.c */
 
+#include "../../../include/generated/autoconf.h"
+
 #define ElfW(type)             _ElfW(ELF_BITS, type)
 #define _ElfW(bits, type)      __ElfW(bits, type)
 #define __ElfW(bits, type)     Elf##bits##_##type
@@ -11,6 +13,7 @@
 #define Elf_Sym                        ElfW(Sym)
 
 static Elf_Ehdr ehdr;
+static Elf_Phdr *phdr;
 
 struct relocs {
        uint32_t        *offset;
@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
        }
 }
 
+static void read_phdrs(FILE *fp)
+{
+       unsigned int i;
+
+       phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
+       if (!phdr) {
+               die("Unable to allocate %d program headers\n",
+                   ehdr.e_phnum);
+       }
+       if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
+               die("Seek to %d failed: %s\n",
+                       ehdr.e_phoff, strerror(errno));
+       }
+       if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
+               die("Cannot read ELF program headers: %s\n",
+                       strerror(errno));
+       }
+       for(i = 0; i < ehdr.e_phnum; i++) {
+               phdr[i].p_type      = elf_word_to_cpu(phdr[i].p_type);
+               phdr[i].p_offset    = elf_off_to_cpu(phdr[i].p_offset);
+               phdr[i].p_vaddr     = elf_addr_to_cpu(phdr[i].p_vaddr);
+               phdr[i].p_paddr     = elf_addr_to_cpu(phdr[i].p_paddr);
+               phdr[i].p_filesz    = elf_word_to_cpu(phdr[i].p_filesz);
+               phdr[i].p_memsz     = elf_word_to_cpu(phdr[i].p_memsz);
+               phdr[i].p_flags     = elf_word_to_cpu(phdr[i].p_flags);
+               phdr[i].p_align     = elf_word_to_cpu(phdr[i].p_align);
+       }
+
+}
+
 static void read_shdrs(FILE *fp)
 {
-       int i;
+       unsigned int i;
        Elf_Shdr shdr;
 
        secs = calloc(ehdr.e_shnum, sizeof(struct section));
@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
 
 static void read_strtabs(FILE *fp)
 {
-       int i;
+       unsigned int i;
        for (i = 0; i < ehdr.e_shnum; i++) {
                struct section *sec = &secs[i];
                if (sec->shdr.sh_type != SHT_STRTAB) {
@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
 
 static void read_symtabs(FILE *fp)
 {
-       int i,j;
+       unsigned int i,j;
        for (i = 0; i < ehdr.e_shnum; i++) {
                struct section *sec = &secs[i];
                if (sec->shdr.sh_type != SHT_SYMTAB) {
@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
 }
 
 
-static void read_relocs(FILE *fp)
+static void read_relocs(FILE *fp, int use_real_mode)
 {
-       int i,j;
+       unsigned int i,j;
+       uint32_t base;
+
        for (i = 0; i < ehdr.e_shnum; i++) {
                struct section *sec = &secs[i];
                if (sec->shdr.sh_type != SHT_REL_TYPE) {
@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
                        die("Cannot read symbol table: %s\n",
                                strerror(errno));
                }
+               base = 0;
+
+#ifdef CONFIG_X86_32
+               for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
+                       if (phdr[j].p_type != PT_LOAD )
+                               continue;
+                       if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
+                               continue;
+                       base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
+                       break;
+               }
+#endif
+
                for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
                        Elf_Rel *rel = &sec->reltab[j];
-                       rel->r_offset = elf_addr_to_cpu(rel->r_offset);
+                       rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
                        rel->r_info   = elf_xword_to_cpu(rel->r_info);
 #if (SHT_REL_TYPE == SHT_RELA)
                        rel->r_addend = elf_xword_to_cpu(rel->r_addend);
@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
 
 static void print_absolute_symbols(void)
 {
-       int i;
+       unsigned int i;
        const char *format;
 
        if (ELF_BITS == 64)
@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
        for (i = 0; i < ehdr.e_shnum; i++) {
                struct section *sec = &secs[i];
                char *sym_strtab;
-               int j;
+               unsigned int j;
 
                if (sec->shdr.sh_type != SHT_SYMTAB) {
                        continue;
@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
 
 static void print_absolute_relocs(void)
 {
-       int i, printed = 0;
+       unsigned int i, printed = 0;
        const char *format;
 
        if (ELF_BITS == 64)
@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
                struct section *sec_applies, *sec_symtab;
                char *sym_strtab;
                Elf_Sym *sh_symtab;
-               int j;
+               unsigned int j;
                if (sec->shdr.sh_type != SHT_REL_TYPE) {
                        continue;
                }
@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
                        Elf_Sym *sym, const char *symname))
 {
-       int i;
+       unsigned int i;
        /* Walk through the relocations */
        for (i = 0; i < ehdr.e_shnum; i++) {
                char *sym_strtab;
                Elf_Sym *sh_symtab;
                struct section *sec_applies, *sec_symtab;
-               int j;
+               unsigned int j;
                struct section *sec = &secs[i];
 
                if (sec->shdr.sh_type != SHT_REL_TYPE) {
@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
 {
        unsigned r_type = ELF32_R_TYPE(rel->r_info);
        int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
+       char *sym_strtab = sec->link->link->strtab;
+
+       /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
+       if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
+               return 0;
+
+#ifdef CONFIG_PAX_KERNEXEC
+       /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
+       if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
+               return 0;
+       if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
+               return 0;
+       if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
+               return 0;
+       if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
+               return 0;
+#endif
 
        switch (r_type) {
        case R_386_NONE:
@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
 
 static void emit_relocs(int as_text, int use_real_mode)
 {
-       int i;
+       unsigned int i;
        int (*write_reloc)(uint32_t, FILE *) = write32;
        int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
                        const char *symname);
@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
 {
        regex_init(use_real_mode);
        read_ehdr(fp);
+       read_phdrs(fp);
        read_shdrs(fp);
        read_strtabs(fp);
        read_symtabs(fp);
-       read_relocs(fp);
+       read_relocs(fp, use_real_mode);
        if (ELF_BITS == 64)
                percpu_init();
        if (show_absolute_syms) {
index f40281e5d6a27430e5aa018dba8861bcf4a208b6..92728c9f172bdd105be195b57bfd062c48eba3e9 100644 (file)
@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-       gate_vma.vm_page_prot = __P101;
+       gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
 
        return 0;
 }
index 80ffa5b9982dd68cfe5537a40c06d7b467bf665b..a33bd15102d1d1f797e6365885f846a44dc09da1 100644 (file)
@@ -260,7 +260,7 @@ out:
        if (unlikely(task == current &&
                     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
                printk(KERN_ERR "get_tls_entry: task with pid %d got here "
-                               "without flushed TLS.", current->pid);
+                               "without flushed TLS.", task_pid_nr(current));
        }
 
        return 0;
index 5a4affe025e81e39df53f40b750c2234baf52844..9e2d5220011b90c555af78801f669bf0de61c16b 100644 (file)
@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO    $@
                       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
        $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
index 0224987556ce80bd606063b56ef124b0857a3f44..c7d65a5db15058f06f6f8a7ffec224d04df442ea 100644 (file)
@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
        unsigned long load_size = -1;  /* Work around bogus warning */
        unsigned long mapping_size;
        ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
-       int i;
+       unsigned int i;
        unsigned long j;
        ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
                *alt_sec = NULL;
index e904c270573bf58054d5fd83f2b9c77b0ee81347..b9eaa036c5a2f1af6d6a023d4700d80d7f55dac5 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/vdso.h>
+#include <asm/mman.h>
 
 #ifdef CONFIG_COMPAT_VDSO
 #define VDSO_DEFAULT   0
index 1c9f750c38592c7278c95d7f8dbe6e1a88835c0c..cfddb1a587a25b244caac25211a578a9c0d81d19 100644 (file)
 #include <asm/page.h>
 #include <asm/hpet.h>
 #include <asm/desc.h>
-
-#if defined(CONFIG_X86_64)
-unsigned int __read_mostly vdso64_enabled = 1;
-#endif
+#include <asm/mman.h>
 
 void __init init_vdso_image(const struct vdso_image *image)
 {
@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
                .pages = no_pages,
        };
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP)
+               calculate_addr = false;
+#endif
+
        if (calculate_addr) {
                addr = vdso_addr(current->mm->start_stack,
                                 image->size - image->sym_vvar_start);
@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
        down_write(&mm->mmap_sem);
 
        addr = get_unmapped_area(NULL, addr,
-                                image->size - image->sym_vvar_start, 0, 0);
+                                image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
        if (IS_ERR_VALUE(addr)) {
                ret = addr;
                goto up_fail;
        }
 
        text_start = addr - image->sym_vvar_start;
-       current->mm->context.vdso = (void __user *)text_start;
+       mm->context.vdso = text_start;
 
        /*
         * MAYWRITE to allow gdb to COW and set breakpoints
@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
                        hpet_address >> PAGE_SHIFT,
                        PAGE_SIZE,
                        pgprot_noncached(PAGE_READONLY));
-
-               if (ret)
-                       goto up_fail;
        }
 #endif
 
 up_fail:
        if (ret)
-               current->mm->context.vdso = NULL;
+               current->mm->context.vdso = 0;
 
        up_write(&mm->mmap_sem);
        return ret;
@@ -191,8 +190,8 @@ static int load_vdso32(void)
 
        if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
                current_thread_info()->sysenter_return =
-                       current->mm->context.vdso +
-                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
+                       (void __force_user *)(current->mm->context.vdso +
+                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
 
        return 0;
 }
@@ -201,9 +200,6 @@ static int load_vdso32(void)
 #ifdef CONFIG_X86_64
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
-       if (!vdso64_enabled)
-               return 0;
-
        return map_vdso(&vdso_image_64, true);
 }
 
@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
                                       int uses_interp)
 {
 #ifdef CONFIG_X86_X32_ABI
-       if (test_thread_flag(TIF_X32)) {
-               if (!vdso64_enabled)
-                       return 0;
-
+       if (test_thread_flag(TIF_X32))
                return map_vdso(&vdso_image_x32, true);
-       }
 #endif
 
        return load_vdso32();
@@ -230,15 +222,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 }
 #endif
 
-#ifdef CONFIG_X86_64
-static __init int vdso_setup(char *s)
-{
-       vdso64_enabled = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-__setup("vdso=", vdso_setup);
-#endif
-
 #ifdef CONFIG_X86_64
 static void vgetcpu_cpu_init(void *arg)
 {
index e88fda867a33b198bc356aded57d59f48fcfb4ee..76ce7ce4f442f934954d96b47ba5c4bc05b29219 100644 (file)
@@ -9,6 +9,7 @@ config XEN
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
        depends on X86_TSC
+       depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
index 78a881b7fc415e16f50f16e4381e968370f4fd9f..9994bbba8886685ece771517e35ac8a00eaffef5 100644 (file)
@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
 
 struct shared_info xen_dummy_shared_info;
 
-void *xen_initial_gdt;
-
 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
 {
        unsigned long va = dtr->address;
        unsigned int size = dtr->size + 1;
-       unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-       unsigned long frames[pages];
+       unsigned long frames[65536 / PAGE_SIZE];
        int f;
 
        /*
@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
 {
        unsigned long va = dtr->address;
        unsigned int size = dtr->size + 1;
-       unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-       unsigned long frames[pages];
+       unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
        int f;
 
        /*
@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
         * 8-byte entries, or 16 4k pages..
         */
 
-       BUG_ON(size > 65536);
+       BUG_ON(size > GDT_SIZE);
        BUG_ON(va & ~PAGE_MASK);
 
        for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
         return 0;
 }
 
-static void set_xen_basic_apic_ops(void)
+static void __init set_xen_basic_apic_ops(void)
 {
        apic->read = xen_apic_read;
        apic->write = xen_apic_write;
@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
 #endif
 };
 
-static void xen_reboot(int reason)
+static __noreturn void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
 
-       if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
-               BUG();
+       HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+       BUG();
 }
 
-static void xen_restart(char *msg)
+static __noreturn void xen_restart(char *msg)
 {
        xen_reboot(SHUTDOWN_reboot);
 }
 
-static void xen_emergency_restart(void)
+static __noreturn void xen_emergency_restart(void)
 {
        xen_reboot(SHUTDOWN_reboot);
 }
 
-static void xen_machine_halt(void)
+static __noreturn void xen_machine_halt(void)
 {
        xen_reboot(SHUTDOWN_poweroff);
 }
 
-static void xen_machine_power_off(void)
+static __noreturn void xen_machine_power_off(void)
 {
        if (pm_power_off)
                pm_power_off();
@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
        pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
        pv_cpu_ops.load_gdt = xen_load_gdt_boot;
 
-       setup_stack_canary_segment(0);
-       switch_to_new_gdt(0);
+       setup_stack_canary_segment(cpu);
+#ifdef CONFIG_X86_64
+       load_percpu_segment(cpu);
+#endif
+       switch_to_new_gdt(cpu);
 
        pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
        pv_cpu_ops.load_gdt = xen_load_gdt;
@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
        __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 
        /* Work out if we support NX */
-       x86_configure_nx();
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+       if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
+           (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
+               unsigned l, h;
+
+               __supported_pte_mask |= _PAGE_NX;
+               rdmsr(MSR_EFER, l, h);
+               l |= EFER_NX;
+               wrmsr(MSR_EFER, l, h);
+       }
+#endif
 
        /* Get mfn list */
        xen_build_dynamic_phys_to_machine();
@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
        machine_ops = xen_machine_ops;
 
-       /*
-        * The only reliable way to retain the initial address of the
-        * percpu gdt_page is to remember it here, so we can go and
-        * mark it RW later, when the initial percpu area is freed.
-        */
-       xen_initial_gdt = &per_cpu(gdt_page, 0);
-
        xen_smp_init();
 
 #ifdef CONFIG_ACPI_NUMA
index 5c1f9ace7ae7cd65798650e71c1cb57ad4807643..0e15f5cffef597ac86b9aea7299dce4cde2452db 100644 (file)
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
        return val;
 }
 
-static pteval_t pte_pfn_to_mfn(pteval_t val)
+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
 {
        if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                 * L3_k[511] -> level2_fixmap_pgt */
                convert_pfn_mfn(level3_kernel_pgt);
 
+               convert_pfn_mfn(level3_vmalloc_start_pgt);
+               convert_pfn_mfn(level3_vmalloc_end_pgt);
+               convert_pfn_mfn(level3_vmemmap_pgt);
                /* L3_k[511][506] -> level1_fixmap_pgt */
+               /* L3_k[511][507] -> level1_vsyscall_pgt */
                convert_pfn_mfn(level2_fixmap_pgt);
        }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
                set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
                set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
                set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
                set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
                set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
                set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+               set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
 
                /* Pin down new L4 */
                pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
        pv_mmu_ops.set_pud = xen_set_pud;
 #if PAGETABLE_LEVELS == 4
        pv_mmu_ops.set_pgd = xen_set_pgd;
+       pv_mmu_ops.set_pgd_batched = xen_set_pgd;
 #endif
 
        /* This will work as long as patching hasn't happened yet
@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .pud_val = PV_CALLEE_SAVE(xen_pud_val),
        .make_pud = PV_CALLEE_SAVE(xen_make_pud),
        .set_pgd = xen_set_pgd_hyper,
+       .set_pgd_batched = xen_set_pgd_hyper,
 
        .alloc_pud = xen_alloc_pmd_init,
        .release_pud = xen_release_pmd_init,
index 4c071aeb8417bb419a45632c7d9f9a5c96bf3e76..00e70498d1ae89990b69e941c33cd0583d7e278f 100644 (file)
@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
 
        if (xen_pv_domain()) {
                if (!xen_feature(XENFEAT_writable_page_tables))
-                       /* We've switched to the "real" per-cpu gdt, so make
-                        * sure the old memory can be recycled. */
-                       make_lowmem_page_readwrite(xen_initial_gdt);
-
 #ifdef CONFIG_X86_32
                /*
                 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
                 * expects __USER_DS
                 */
-               loadsegment(ds, __USER_DS);
-               loadsegment(es, __USER_DS);
+               loadsegment(ds, __KERNEL_DS);
+               loadsegment(es, __KERNEL_DS);
 #endif
 
                xen_filter_cpu_maps();
@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 #ifdef CONFIG_X86_32
        /* Note: PVH is not yet supported on x86_32. */
        ctxt->user_regs.fs = __KERNEL_PERCPU;
-       ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
+       savesegment(gs, ctxt->user_regs.gs);
 #endif
        memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
 
@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
                ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
                ctxt->flags = VGCF_IN_KERNEL;
                ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
-               ctxt->user_regs.ds = __USER_DS;
-               ctxt->user_regs.es = __USER_DS;
+               ctxt->user_regs.ds = __KERNEL_DS;
+               ctxt->user_regs.es = __KERNEL_DS;
                ctxt->user_regs.ss = __KERNEL_DS;
 
                xen_copy_trap_info(ctxt->trap_ctxt);
@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
        int rc;
 
        per_cpu(current_task, cpu) = idle;
+       per_cpu(current_tinfo, cpu) = &idle->tinfo;
 #ifdef CONFIG_X86_32
        irq_ctx_init(cpu);
 #else
        clear_tsk_thread_flag(idle, TIF_FORK);
 #endif
-       per_cpu(kernel_stack, cpu) =
-               (unsigned long)task_stack_page(idle) -
-               KERNEL_STACK_OFFSET + THREAD_SIZE;
+       per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
 
        xen_setup_runstate_info(cpu);
        xen_setup_timer(cpu);
@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
 
 void __init xen_smp_init(void)
 {
-       smp_ops = xen_smp_ops;
+       memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
        xen_fill_possible_map();
 }
 
index fd92a64d748e1ca1a3838fe3c26f977f41a8b984..1f7264171a26750510075ae25ead06ba86236f39 100644 (file)
@@ -99,7 +99,7 @@ ENTRY(xen_iret)
        pushw %fs
        movl $(__KERNEL_PERCPU), %eax
        movl %eax, %fs
-       movl %fs:xen_vcpu, %eax
+       mov PER_CPU_VAR(xen_vcpu), %eax
        POP_FS
 #else
        movl %ss:xen_vcpu, %eax
index 674b222544b78dde6c3b737a767500e5f19627b3..f1f5dc1029657df65c161f3c7dfcb4d97d7d62de 100644 (file)
@@ -39,6 +39,17 @@ ENTRY(startup_xen)
 #ifdef CONFIG_X86_32
        mov %esi,xen_start_info
        mov $init_thread_union+THREAD_SIZE,%esp
+#ifdef CONFIG_SMP
+       movl $cpu_gdt_table,%edi
+       movl $__per_cpu_load,%eax
+       movw %ax,__KERNEL_PERCPU + 2(%edi)
+       rorl $16,%eax
+       movb %al,__KERNEL_PERCPU + 4(%edi)
+       movb %ah,__KERNEL_PERCPU + 7(%edi)
+       movl $__per_cpu_end - 1,%eax
+       subl $__per_cpu_start,%eax
+       movw %ax,__KERNEL_PERCPU + 0(%edi)
+#endif
 #else
        mov %rsi,xen_start_info
        mov $init_thread_union+THREAD_SIZE,%rsp
index 5686bd9d58ccc7051720c218cb9fda63bd9b73f8..0c8b6ee320e6c995b0174a54ad6808d4a5a85394 100644 (file)
@@ -10,8 +10,6 @@
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 
-extern void *xen_initial_gdt;
-
 struct trap_info;
 void xen_copy_trap_info(struct trap_info *traps);
 
index 525bd3d9015495b7ec0af3548c805b4101381f18..ef888b1c4572c54231575c088fd4aeddddf6b25d 100644 (file)
   ----------------------------------------------------------------------*/
 
 #define XCHAL_ICACHE_LINESIZE          32      /* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE          32      /* D-cache line size in bytes */
 #define XCHAL_ICACHE_LINEWIDTH         5       /* log2(I line size in bytes) */
 #define XCHAL_DCACHE_LINEWIDTH         5       /* log2(D line size in bytes) */
+#define XCHAL_DCACHE_LINESIZE          (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)   /* D-cache line size in bytes */
 
 #define XCHAL_ICACHE_SIZE              16384   /* I-cache size in bytes or 0 */
 #define XCHAL_DCACHE_SIZE              16384   /* D-cache size in bytes or 0 */
index 2f337605c7445761ddca9685ea8c8f60f5b9019c..835e50a881f95e3fce39762a205eef3c6a097299 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_CORE_H
 #define _XTENSA_CORE_H
 
+#include <linux/const.h>
 
 /****************************************************************************
            Parameters Useful for Any Code, USER or PRIVILEGED
   ----------------------------------------------------------------------*/
 
 #define XCHAL_ICACHE_LINESIZE          16      /* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE          16      /* D-cache line size in bytes */
 #define XCHAL_ICACHE_LINEWIDTH         4       /* log2(I line size in bytes) */
 #define XCHAL_DCACHE_LINEWIDTH         4       /* log2(D line size in bytes) */
+#define XCHAL_DCACHE_LINESIZE          (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
 
 #define XCHAL_ICACHE_SIZE              8192    /* I-cache size in bytes or 0 */
 #define XCHAL_DCACHE_SIZE              8192    /* D-cache size in bytes or 0 */
index 471d7382c7d17b7f8929b827b217650a93d67e84..bd3da0de437a519da01d3a19320c32b36c4759c2 100644 (file)
@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                /*
                 * Overflow, abort
                 */
-               if (end < start)
+               if (end < start || end - start > INT_MAX - nr_pages)
                        return ERR_PTR(-EINVAL);
 
                nr_pages += end - start;
@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
                /*
                 * Overflow, abort
                 */
-               if (end < start)
+               if (end < start || end - start > INT_MAX - nr_pages)
                        return ERR_PTR(-EINVAL);
 
                nr_pages += end - start;
@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
        const int read = bio_data_dir(bio) == READ;
        struct bio_map_data *bmd = bio->bi_private;
        int i;
-       char *p = bmd->sgvecs[0].iov_base;
+       char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
 
        bio_for_each_segment_all(bvec, bio, i) {
                char *addr = page_address(bvec->bv_page);
index 0736729d64941cf95cb7bbb7cc13016aede5af83..2ec3b48ee7ad00aa38f7baaf08c2b756ee31248d 100644 (file)
@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
 }
 EXPORT_SYMBOL(blk_iopoll_complete);
 
-static void blk_iopoll_softirq(struct softirq_action *h)
+static __latent_entropy void blk_iopoll_softirq(void)
 {
        struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
        int rearm = 0, budget = blk_iopoll_budget;
index f890d4345b0cb63f9faa88e70d466a3cec3e6b3f..97b04826f9f458ec200ba86aef28ecb55efafe37 100644 (file)
@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (!len || !kbuf)
                return -EINVAL;
 
-       do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
+       do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
        if (do_copy)
                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
        else
index 53b1737e978d584878f3dae13352e17a8aed1f06..08177d2e7c6ba0cb0dc35efe509cf6e5bfa2bbc9 100644 (file)
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  * Softirq action handler - move entries to local list and loop over them
  * while passing them to the queue registered handler.
  */
-static void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(void)
 {
        struct list_head *cpu_list, local_list;
 
index 276e869e686cbbdb39112f19b163e353f0ddc3ef..6fe4c61a518c1c098c043b468c201140eaf553ab 100644 (file)
@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
                                struct sg_io_v4 *hdr, struct bsg_device *bd,
                                fmode_t has_write_perm)
 {
+       unsigned char tmpcmd[sizeof(rq->__cmd)];
+       unsigned char *cmdptr;
+
        if (hdr->request_len > BLK_MAX_CDB) {
                rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
                if (!rq->cmd)
                        return -ENOMEM;
-       }
+               cmdptr = rq->cmd;
+       } else
+               cmdptr = tmpcmd;
 
-       if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
+       if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
                           hdr->request_len))
                return -EFAULT;
 
+       if (cmdptr != rq->cmd)
+               memcpy(rq->cmd, cmdptr, hdr->request_len);
+
        if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
                if (blk_verify_command(rq->cmd, has_write_perm))
                        return -EPERM;
index f678c733df404189bdff2a87a01225c6aa124816..f35aa18c13b6e431e382f0a2c0f531b6b25475f5 100644 (file)
@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
        cgc = compat_alloc_user_space(sizeof(*cgc));
        cgc32 = compat_ptr(arg);
 
-       if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
+       if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
            get_user(data, &cgc32->buffer) ||
            put_user(compat_ptr(data), &cgc->buffer) ||
            copy_in_user(&cgc->buflen, &cgc32->buflen,
@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
                err |= __get_user(f->spec1, &uf->spec1);
                err |= __get_user(f->fmt_gap, &uf->fmt_gap);
                err |= __get_user(name, &uf->name);
-               f->name = compat_ptr(name);
+               f->name = (void __force_kernel *)compat_ptr(name);
                if (err) {
                        err = -EFAULT;
                        goto out;
index 0a536dc05f3b559d6d04c1e819d65290f96f7c35..b8f7aca52f0c19c3f11ad233192b9f7083bb5d3c 100644 (file)
@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
 
 /*
  * Register device numbers dev..(dev+range-1)
- * range must be nonzero
+ * Noop if @range is zero.
  * The hash chain is sorted on range, so that subranges can override.
  */
 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
                         struct kobject *(*probe)(dev_t, int *, void *),
                         int (*lock)(dev_t, void *), void *data)
 {
-       kobj_map(bdev_map, devt, range, module, probe, lock, data);
+       if (range)
+               kobj_map(bdev_map, devt, range, module, probe, lock, data);
 }
 
 EXPORT_SYMBOL(blk_register_region);
 
+/* undo blk_register_region(), noop if @range is zero */
 void blk_unregister_region(dev_t devt, unsigned long range)
 {
-       kobj_unmap(bdev_map, devt, range);
+       if (range)
+               kobj_unmap(bdev_map, devt, range);
 }
 
 EXPORT_SYMBOL(blk_unregister_region);
index 56d08fd75b1a9511152eb120b6439f4afe01a00a..2e07090734abd934f2aa8693a28495dfa116a8d2 100644 (file)
@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
        if (!gpt)
                return NULL;
 
-       count = le32_to_cpu(gpt->num_partition_entries) *
-                le32_to_cpu(gpt->sizeof_partition_entry);
-       if (!count)
+       if (!le32_to_cpu(gpt->num_partition_entries))
                return NULL;
-       pte = kmalloc(count, GFP_KERNEL);
+       pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
        if (!pte)
                return NULL;
 
+       count = le32_to_cpu(gpt->num_partition_entries) *
+                le32_to_cpu(gpt->sizeof_partition_entry);
        if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
                        (u8 *) pte, count) < count) {
                kfree(pte);
index 28163fad3c5d8800661e2b2c8868300a99e162c2..07190a06c7816415b3821e35c34a677a25833124 100644 (file)
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
        return put_user(0, p);
 }
 
-static int sg_get_timeout(struct request_queue *q)
+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
 {
        return jiffies_to_clock_t(q->sg_timeout);
 }
@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
                             struct sg_io_hdr *hdr, fmode_t mode)
 {
-       if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+       unsigned char tmpcmd[sizeof(rq->__cmd)];
+       unsigned char *cmdptr;
+
+       if (rq->cmd != rq->__cmd)
+               cmdptr = rq->cmd;
+       else
+               cmdptr = tmpcmd;
+
+       if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
                return -EFAULT;
+
+       if (cmdptr != rq->cmd)
+               memcpy(rq->cmd, cmdptr, hdr->cmd_len);
+
        if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
                return -EPERM;
 
@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        int err;
        unsigned int in_len, out_len, bytes, opcode, cmdlen;
        char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+       unsigned char tmpcmd[sizeof(rq->__cmd)];
+       unsigned char *cmdptr;
 
        if (!sic)
                return -EINVAL;
@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
         */
        err = -EFAULT;
        rq->cmd_len = cmdlen;
-       if (copy_from_user(rq->cmd, sic->data, cmdlen))
+
+       if (rq->cmd != rq->__cmd)
+               cmdptr = rq->cmd;
+       else
+               cmdptr = tmpcmd;
+
+       if (copy_from_user(cmdptr, sic->data, cmdlen))
                goto error;
 
+       if (rq->cmd != cmdptr)
+               memcpy(rq->cmd, cmdptr, cmdlen);
+
        if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
                goto error;
 
index 650afac10fd78e5ba51a28d7161ee71bcd6f3175..f3307defa5c0ebb9666e9d363e86009a1dccf39c 100644 (file)
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
 
 struct cryptd_blkcipher_request_ctx {
        crypto_completion_t complete;
-};
+} __no_const;
 
 struct cryptd_hash_ctx {
        struct crypto_shash *child;
@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
 
 struct cryptd_aead_request_ctx {
        crypto_completion_t complete;
-};
+} __no_const;
 
 static void cryptd_queue_worker(struct work_struct *work);
 
index c305d4112735cd3b7b18b477299b534b8c0e1b77..a96de79498eec0b1f3191d324f3842581965b711 100644 (file)
@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
        int ret;
 
        pinst->kobj.kset = pcrypt_kset;
-       ret = kobject_add(&pinst->kobj, NULL, name);
+       ret = kobject_add(&pinst->kobj, NULL, "%s", name);
        if (!ret)
                kobject_uevent(&pinst->kobj, KOBJ_ADD);
 
index 6921c7f3d208f290ed0a1db3a0ce72181bf6537f..78e1af7aeebcd5c12683980ab3da41aea6a719d9 100644 (file)
@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
 
 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
-       {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
-        acpi_hw_extended_sleep},
-       {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
-        acpi_hw_extended_wake_prep},
-       {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
+       {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
+        .extended_function = acpi_hw_extended_sleep},
+       {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
+        .extended_function = acpi_hw_extended_wake_prep},
+       {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
+        .extended_function = acpi_hw_extended_wake}
 };
 
 /*
index 16129c78b4891f0d9d9e2b9035748ad643cabb6a..8b675cd2bd88766fbdf6078f17049364e1a07bfa 100644 (file)
@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
 struct apei_exec_ins_type {
        u32 flags;
        apei_exec_ins_func_t run;
-};
+} __do_const;
 
 struct apei_exec_context {
        u32 ip;
index e82d0976a5d079bc3aa6b6f956d91d943a685f3b..0c855c1b3bc61452f4231af87f1a5fd86f1cc3e7 100644 (file)
@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
                                 const struct acpi_hest_generic *generic,
                                 const struct acpi_hest_generic_status *estatus)
 {
-       static atomic_t seqno;
+       static atomic_unchecked_t seqno;
        unsigned int curr_seqno;
        char pfx_seq[64];
 
@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
                else
                        pfx = KERN_ERR;
        }
-       curr_seqno = atomic_inc_return(&seqno);
+       curr_seqno = atomic_inc_return_unchecked(&seqno);
        snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
        printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
               pfx_seq, generic->header.source_id);
index a83e3c62c5a9d04e57a0fc644598d6e41d42c290..c3d617f8180fe2cfedadd9094f7dc7fc237c766f 100644 (file)
@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
        if (!bgrt_image)
                return -ENODEV;
 
-       bin_attr_image.private = bgrt_image;
-       bin_attr_image.size = bgrt_image_size;
+       pax_open_kernel();
+       *(void **)&bin_attr_image.private = bgrt_image;
+       *(size_t *)&bin_attr_image.size = bgrt_image_size;
+       pax_close_kernel();
 
        bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
        if (!bgrt_kobj)
index 9b693d54c743edbca512fc7b37086a809de9ccd7..8953d5417381894b7eeef83219bf8bcfd4364640 100644 (file)
@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
        u32 is_critical_error;
 };
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
 
 /*
  * POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
        return 0;
 }
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
        {
        .callback = dmi_disable_osi_vista,
        .ident = "Fujitsu Siemens",
index c68e72414a67a9b00231b095335945d5cdd2f31e..e86300825f417137fb72e16772c6a07cb88fd1a0 100644 (file)
@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
        struct acpi_table_header table;
        acpi_status status;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+       return -EPERM;
+#endif
+
        if (!(*ppos)) {
                /* parse the table header to get the table length */
                if (count <= sizeof(struct acpi_table_header))
index c0d44d394ca39c63f87f212f0345d0c05d9acdc4..5ad8f9a8279d58ae4a26013c5151471e7ff5924b 100644 (file)
@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
 
 #endif /* CONFIG_PM_SLEEP */
 
+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
+
 static struct dev_pm_domain acpi_general_pm_domain = {
        .ops = {
 #ifdef CONFIG_PM
@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
 #endif
 #endif
        },
+       .detach = acpi_dev_pm_detach
 };
 
 /**
@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
                acpi_device_wakeup(adev, ACPI_STATE_S0, false);
        }
 
-       dev->pm_domain->detach = acpi_dev_pm_detach;
        return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
index 87b704e41877daa488eec962f545f32cf0d079fe..2d1d0c160869c32553b0b1b6e7a254f11bae85c8 100644 (file)
@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
 {
        int i, count = CPUIDLE_DRIVER_STATE_START;
        struct acpi_processor_cx *cx;
-       struct cpuidle_state *state;
+       cpuidle_state_no_const *state;
        struct cpuidle_driver *drv = &acpi_idle_driver;
 
        if (!pr->flags.power_setup_done)
index 13e577c80201bb1ce4341f9b99007e28d1a0a261..cef11ee2db55ed1c8c48de28ecb1701f46777b2e 100644 (file)
@@ -423,11 +423,11 @@ static u32 num_counters;
 static struct attribute **all_attrs;
 static u32 acpi_gpe_count;
 
-static struct attribute_group interrupt_stats_attr_group = {
+static attribute_group_no_const interrupt_stats_attr_group = {
        .name = "interrupts",
 };
 
-static struct kobj_attribute *counter_attrs;
+static kobj_attribute_no_const *counter_attrs;
 
 static void delete_gpe_attr_array(void)
 {
index 61a9c07e0dff5b277dba35cfa135bac449f9ce84..ea98fa1711319cf61c64ed2a7a459af95fc2c534 100644 (file)
@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ahci_kick_engine);
 
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
                                struct ata_taskfile *tf, int is_cmd, u16 flags,
                                unsigned long timeout_msec)
 {
index d1a05f9bb91f239b6b24f115b6bade4694152698..eb70e10e92e54b306620f016c18a35c4b97e039a 100644 (file)
@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
-atomic_t ata_print_id = ATOMIC_INIT(0);
+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
 
 struct ata_force_param {
        const char      *name;
@@ -4831,7 +4831,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
        struct ata_port *ap;
        unsigned int tag;
 
-       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
        ap = qc->ap;
 
        qc->flags = 0;
@@ -4847,7 +4847,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
        struct ata_port *ap;
        struct ata_link *link;
 
-       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
        WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
        ap = qc->ap;
        link = qc->dev->link;
@@ -5951,6 +5951,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
                return;
 
        spin_lock(&lock);
+       pax_open_kernel();
 
        for (cur = ops->inherits; cur; cur = cur->inherits) {
                void **inherit = (void **)cur;
@@ -5964,8 +5965,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
                if (IS_ERR(*pp))
                        *pp = NULL;
 
-       ops->inherits = NULL;
+       *(struct ata_port_operations **)&ops->inherits = NULL;
 
+       pax_close_kernel();
        spin_unlock(&lock);
 }
 
@@ -6161,7 +6163,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 
        /* give ports names and add SCSI hosts */
        for (i = 0; i < host->n_ports; i++) {
-               host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+               host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
                host->ports[i]->local_port_no = i + 1;
        }
 
index 6abd17a85b1369d4515302090c6eb57a2d1d088b..9961bf7810a2316e59c5e52588d13d8e6f3998af 100644 (file)
@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
 
        if (rc)
                return rc;
-       ap->print_id = atomic_inc_return(&ata_print_id);
+       ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
        return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_init);
index 5f4e0cca56ec5fdc2cf539e3052bd9e2d51bca76..ff2c347f7982388409b094b909d178705543ea57 100644 (file)
@@ -53,7 +53,7 @@ enum {
        ATA_DNXFER_QUIET        = (1 << 31),
 };
 
-extern atomic_t ata_print_id;
+extern atomic_unchecked_t ata_print_id;
 extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
index a9b0c820f2ebc7028cb933b7b021e42b3c6a7930..207d97db09cce7b3b137ae3b7780d46911db3f8c 100644 (file)
@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
        /* Handle platform specific quirks */
        if (quirk) {
                if (quirk & CF_BROKEN_PIO) {
-                       ap->ops->set_piomode = NULL;
+                       pax_open_kernel();
+                       *(void **)&ap->ops->set_piomode = NULL;
+                       pax_close_kernel();
                        ap->pio_mask = 0;
                }
                if (quirk & CF_BROKEN_MWDMA)
index f9b983ae68777544f5cc986f3e0e4e3b1f074179..887b9d89f2d05613169282515a4200be959117eb 100644 (file)
@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
                vcc->pop(vcc, skb);
        else
                dev_kfree_skb_any(skb);
-       atomic_inc(&vcc->stats->tx);
+       atomic_inc_unchecked(&vcc->stats->tx);
 
        return 0;
 }
index f1a9198dfe5a4966cbe7f433b357005501eb002a..f466a4a9b57952061fab81e9e9999feb2fb37c36 100644 (file)
@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
   PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
   
   // VC layer stats
-  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+  atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
   
   // free the descriptor
   kfree (tx_descr);
@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
          dump_skb ("<<<", vc, skb);
          
          // VC layer stats
-         atomic_inc(&atm_vcc->stats->rx);
+         atomic_inc_unchecked(&atm_vcc->stats->rx);
          __net_timestamp(skb);
          // end of our responsibility
          atm_vcc->push (atm_vcc, skb);
@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
       } else {
        PRINTK (KERN_INFO, "dropped over-size frame");
        // should we count this?
-       atomic_inc(&atm_vcc->stats->rx_drop);
+       atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
       }
       
     } else {
@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
   }
   
   if (check_area (skb->data, skb->len)) {
-    atomic_inc(&atm_vcc->stats->tx_err);
+    atomic_inc_unchecked(&atm_vcc->stats->tx_err);
     return -ENOMEM; // ?
   }
   
index 480fa6ffbc090a48ae32496ef788c8e63d92e6be..947067c9d6ea668ce4fa3baae1d84c1b9fd5edaf 100644 (file)
@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
                if (vcc->pop) vcc->pop(vcc,skb);
                else dev_kfree_skb(skb);
                if (dev_data) return 0;
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                return -ENOLINK;
        }
        size = skb->len+sizeof(struct atmtcp_hdr);
@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
        if (!new_skb) {
                if (vcc->pop) vcc->pop(vcc,skb);
                else dev_kfree_skb(skb);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                return -ENOBUFS;
        }
        hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
        if (vcc->pop) vcc->pop(vcc,skb);
        else dev_kfree_skb(skb);
        out_vcc->push(out_vcc,new_skb);
-       atomic_inc(&vcc->stats->tx);
-       atomic_inc(&out_vcc->stats->rx);
+       atomic_inc_unchecked(&vcc->stats->tx);
+       atomic_inc_unchecked(&out_vcc->stats->rx);
        return 0;
 }
 
@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
        read_unlock(&vcc_sklist_lock);
        if (!out_vcc) {
                result = -EUNATCH;
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                goto done;
        }
        skb_pull(skb,sizeof(struct atmtcp_hdr));
@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
        __net_timestamp(new_skb);
        skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
        out_vcc->push(out_vcc,new_skb);
-       atomic_inc(&vcc->stats->tx);
-       atomic_inc(&out_vcc->stats->rx);
+       atomic_inc_unchecked(&vcc->stats->tx);
+       atomic_inc_unchecked(&out_vcc->stats->rx);
 done:
        if (vcc->pop) vcc->pop(vcc,skb);
        else dev_kfree_skb(skb);
index c7fab3ee14eef1e28af86e69a253925d30f491ea..68d0965e36600d40becda445502158423a323fba 100644 (file)
@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
                DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
                    vcc->dev->number);
                length = 0;
-               atomic_inc(&vcc->stats->rx_err);
+               atomic_inc_unchecked(&vcc->stats->rx_err);
        }
        else {
                length = ATM_CELL_SIZE-1; /* no HEC */
@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
                            size);
                }
                eff = length = 0;
-               atomic_inc(&vcc->stats->rx_err);
+               atomic_inc_unchecked(&vcc->stats->rx_err);
        }
        else {
                size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
                            "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
                            vcc->dev->number,vcc->vci,length,size << 2,descr);
                        length = eff = 0;
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                }
        }
        skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
@@ -770,7 +770,7 @@ rx_dequeued++;
                        vcc->push(vcc,skb);
                        pushed++;
                }
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
        }
        wake_up(&eni_dev->rx_wait);
 }
@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
                    PCI_DMA_TODEVICE);
                if (vcc->pop) vcc->pop(vcc,skb);
                else dev_kfree_skb_irq(skb);
-               atomic_inc(&vcc->stats->tx);
+               atomic_inc_unchecked(&vcc->stats->tx);
                wake_up(&eni_dev->tx_wait);
 dma_complete++;
        }
index 82f2ae0d7cc488a337772aaf8d7a19b3373bcd94..f205c026dedaf57ee4d27d917bae71b78d795504 100644 (file)
@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
                                }
                        }
 
-                       atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+                       atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
 
                        fs_dprintk (FS_DEBUG_TXMEM, "i");
                        fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
 #endif
                                skb_put (skb, qe->p1 & 0xffff); 
                                ATM_SKB(skb)->vcc = atm_vcc;
-                               atomic_inc(&atm_vcc->stats->rx);
+                               atomic_inc_unchecked(&atm_vcc->stats->rx);
                                __net_timestamp(skb);
                                fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
                                atm_vcc->push (atm_vcc, skb);
@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
                                kfree (pe);
                        }
                        if (atm_vcc)
-                               atomic_inc(&atm_vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
                        break;
                case 0x1f: /*  Reassembly abort: no buffers. */
                        /* Silently increment error counter. */
                        if (atm_vcc)
-                               atomic_inc(&atm_vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
                        break;
                default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
                        printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", 
index d5d9eafbbfcf1f0851b6db889ccb4edf8eebfadd..65c0d534937cd90e4b42970aadcf8dedd5edf35c 100644 (file)
@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
 #endif
                /* check error condition */
                if (*entry->status & STATUS_ERROR)
-                   atomic_inc(&vcc->stats->tx_err);
+                   atomic_inc_unchecked(&vcc->stats->tx_err);
                else
-                   atomic_inc(&vcc->stats->tx);
+                   atomic_inc_unchecked(&vcc->stats->tx);
            }
        }
 
@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
     if (skb == NULL) {
        DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
 
-       atomic_inc(&vcc->stats->rx_drop);
+       atomic_inc_unchecked(&vcc->stats->rx_drop);
        return -ENOMEM;
     } 
 
@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
 
        dev_kfree_skb_any(skb);
 
-       atomic_inc(&vcc->stats->rx_drop);
+       atomic_inc_unchecked(&vcc->stats->rx_drop);
        return -ENOMEM;
     }
 
     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
 
     vcc->push(vcc, skb);
-    atomic_inc(&vcc->stats->rx);
+    atomic_inc_unchecked(&vcc->stats->rx);
 
     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
 
@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
                DPRINTK(2, "damaged PDU on %d.%d.%d\n",
                        fore200e->atm_dev->number,
                        entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
-               atomic_inc(&vcc->stats->rx_err);
+               atomic_inc_unchecked(&vcc->stats->rx_err);
            }
        }
 
@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
                goto retry_here;
            }
 
-           atomic_inc(&vcc->stats->tx_err);
+           atomic_inc_unchecked(&vcc->stats->tx_err);
 
            fore200e->tx_sat++;
            DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
index c39702bc279d44f747ae24ae07958d2899ac9bcd..785b73b3e8665b7aeef61d5fd6b170e219ec8823 100644 (file)
@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
 
                if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
                        hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
-                               atomic_inc(&vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&vcc->stats->rx_drop);
                        goto return_host_buffers;
                }
 
@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
                                RBRQ_LEN_ERR(he_dev->rbrq_head)
                                                        ? "LEN_ERR" : "",
                                                        vcc->vpi, vcc->vci);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        goto return_host_buffers;
                }
 
@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
                vcc->push(vcc, skb);
                spin_lock(&he_dev->global_lock);
 
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
 
 return_host_buffers:
                ++pdus_assembled;
@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
                                        tpd->vcc->pop(tpd->vcc, tpd->skb);
                                else
                                        dev_kfree_skb_any(tpd->skb);
-                               atomic_inc(&tpd->vcc->stats->tx_err);
+                               atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
                        }
                        pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
                        return;
@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
                        vcc->pop(vcc, skb);
                else
                        dev_kfree_skb_any(skb);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                return -EINVAL;
        }
 
@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
                        vcc->pop(vcc, skb);
                else
                        dev_kfree_skb_any(skb);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                return -EINVAL;
        }
 #endif
@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
                        vcc->pop(vcc, skb);
                else
                        dev_kfree_skb_any(skb);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                spin_unlock_irqrestore(&he_dev->global_lock, flags);
                return -ENOMEM;
        }
@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
                                        vcc->pop(vcc, skb);
                                else
                                        dev_kfree_skb_any(skb);
-                               atomic_inc(&vcc->stats->tx_err);
+                               atomic_inc_unchecked(&vcc->stats->tx_err);
                                spin_unlock_irqrestore(&he_dev->global_lock, flags);
                                return -ENOMEM;
                        }
@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
        __enqueue_tpd(he_dev, tpd, cid);
        spin_unlock_irqrestore(&he_dev->global_lock, flags);
 
-       atomic_inc(&vcc->stats->tx);
+       atomic_inc_unchecked(&vcc->stats->tx);
 
        return 0;
 }
index 1dc0519333f291ab859b58739e762b2aa5ae5834..1aadaf7612e6c10793815551a53da3a079d17459 100644 (file)
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
        {
          struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
          // VC layer stats
-         atomic_inc(&vcc->stats->rx);
+         atomic_inc_unchecked(&vcc->stats->rx);
          __net_timestamp(skb);
          // end of our responsibility
          vcc->push (vcc, skb);
@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
        dev->tx_iovec = NULL;
        
        // VC layer stats
-       atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+       atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
        
        // free the skb
        hrz_kfree_skb (skb);
index 2b24ed0567281fe46f94e305b75859bfbd91ea75..b3d6acc31a811cd1d2ceef85a3f6f6ca26d0fda5 100644 (file)
@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
                else
                        dev_kfree_skb(skb);
 
-               atomic_inc(&vcc->stats->tx);
+               atomic_inc_unchecked(&vcc->stats->tx);
        }
 
        atomic_dec(&scq->used);
@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                        if ((sb = dev_alloc_skb(64)) == NULL) {
                                printk("%s: Can't allocate buffers for aal0.\n",
                                       card->name);
-                               atomic_add(i, &vcc->stats->rx_drop);
+                               atomic_add_unchecked(i, &vcc->stats->rx_drop);
                                break;
                        }
                        if (!atm_charge(vcc, sb->truesize)) {
                                RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
                                         card->name);
-                               atomic_add(i - 1, &vcc->stats->rx_drop);
+                               atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
                                dev_kfree_skb(sb);
                                break;
                        }
@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                        ATM_SKB(sb)->vcc = vcc;
                        __net_timestamp(sb);
                        vcc->push(vcc, sb);
-                       atomic_inc(&vcc->stats->rx);
+                       atomic_inc_unchecked(&vcc->stats->rx);
 
                        cell += ATM_CELL_PAYLOAD;
                }
@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                                 "(CDC: %08x)\n",
                                 card->name, len, rpp->len, readl(SAR_REG_CDC));
                        recycle_rx_pool_skb(card, rpp);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        return;
                }
                if (stat & SAR_RSQE_CRC) {
                        RXPRINTK("%s: AAL5 CRC error.\n", card->name);
                        recycle_rx_pool_skb(card, rpp);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        return;
                }
                if (skb_queue_len(&rpp->queue) > 1) {
@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                                RXPRINTK("%s: Can't alloc RX skb.\n",
                                         card->name);
                                recycle_rx_pool_skb(card, rpp);
-                               atomic_inc(&vcc->stats->rx_err);
+                               atomic_inc_unchecked(&vcc->stats->rx_err);
                                return;
                        }
                        if (!atm_charge(vcc, skb->truesize)) {
@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                        __net_timestamp(skb);
 
                        vcc->push(vcc, skb);
-                       atomic_inc(&vcc->stats->rx);
+                       atomic_inc_unchecked(&vcc->stats->rx);
 
                        return;
                }
@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
                __net_timestamp(skb);
 
                vcc->push(vcc, skb);
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
 
                if (skb->truesize > SAR_FB_SIZE_3)
                        add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
                if (vcc->qos.aal != ATM_AAL0) {
                        RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
                                card->name, vpi, vci);
-                       atomic_inc(&vcc->stats->rx_drop);
+                       atomic_inc_unchecked(&vcc->stats->rx_drop);
                        goto drop;
                }
        
                if ((sb = dev_alloc_skb(64)) == NULL) {
                        printk("%s: Can't allocate buffers for AAL0.\n",
                               card->name);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        goto drop;
                }
 
@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
                ATM_SKB(sb)->vcc = vcc;
                __net_timestamp(sb);
                vcc->push(vcc, sb);
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
 
 drop:
                skb_pull(queue, 64);
@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
 
        if (vc == NULL) {
                printk("%s: NULL connection in send().\n", card->name);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb(skb);
                return -EINVAL;
        }
        if (!test_bit(VCF_TX, &vc->flags)) {
                printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb(skb);
                return -EINVAL;
        }
@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
                break;
        default:
                printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb(skb);
                return -EINVAL;
        }
 
        if (skb_shinfo(skb)->nr_frags != 0) {
                printk("%s: No scatter-gather yet.\n", card->name);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb(skb);
                return -EINVAL;
        }
@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
 
        err = queue_skb(card, vc, skb, oam);
        if (err) {
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb(skb);
                return err;
        }
@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
        skb = dev_alloc_skb(64);
        if (!skb) {
                printk("%s: Out of memory in send_oam().\n", card->name);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                return -ENOMEM;
        }
        atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
index 4217f29a85e0473b9686e428cf11c75599b13b41..88f547aa40041d3cdfbe421b64233e5396f8520a 100644 (file)
@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
        status = (u_short) (buf_desc_ptr->desc_mode);  
        if (status & (RX_CER | RX_PTE | RX_OFL))  
        {  
-                atomic_inc(&vcc->stats->rx_err);
+                atomic_inc_unchecked(&vcc->stats->rx_err);
                IF_ERR(printk("IA: bad packet, dropping it");)  
                 if (status & RX_CER) { 
                     IF_ERR(printk(" cause: packet CRC error\n");)
@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
        len = dma_addr - buf_addr;  
         if (len > iadev->rx_buf_sz) {
            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
-           atomic_inc(&vcc->stats->rx_err);
+           atomic_inc_unchecked(&vcc->stats->rx_err);
           goto out_free_desc;
         }
                  
@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
           ia_vcc = INPH_IA_VCC(vcc);
           if (ia_vcc == NULL)
           {
-             atomic_inc(&vcc->stats->rx_err);
+             atomic_inc_unchecked(&vcc->stats->rx_err);
              atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
              goto INCR_DLE;
@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
           if ((length > iadev->rx_buf_sz) || (length > 
                               (skb->len - sizeof(struct cpcs_trailer))))
           {
-             atomic_inc(&vcc->stats->rx_err);
+             atomic_inc_unchecked(&vcc->stats->rx_err);
              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
                                                             length, skb->len);)
              atm_return(vcc, skb->truesize);
@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
 
          IF_RX(printk("rx_dle_intr: skb push");)  
          vcc->push(vcc,skb);  
-         atomic_inc(&vcc->stats->rx);
+         atomic_inc_unchecked(&vcc->stats->rx);
           iadev->rx_pkt_cnt++;
       }  
 INCR_DLE:
@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
          {
              struct k_sonet_stats *stats;
              stats = &PRIV(_ia_dev[board])->sonet_stats;
-             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
-             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
-             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
-             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
-             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
-             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
-             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
-             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
-             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
+             printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
+             printk("line_bip   : %d\n", atomic_read_unchecked(&stats->line_bip));
+             printk("path_bip   : %d\n", atomic_read_unchecked(&stats->path_bip));
+             printk("line_febe  : %d\n", atomic_read_unchecked(&stats->line_febe));
+             printk("path_febe  : %d\n", atomic_read_unchecked(&stats->path_febe));
+             printk("corr_hcs   : %d\n", atomic_read_unchecked(&stats->corr_hcs));
+             printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
+             printk("tx_cells   : %d\n", atomic_read_unchecked(&stats->tx_cells));
+             printk("rx_cells   : %d\n", atomic_read_unchecked(&stats->rx_cells));
          }
             ia_cmds.status = 0;
             break;
@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
        if ((desc == 0) || (desc > iadev->num_tx_desc))  
        {  
                IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
-                atomic_inc(&vcc->stats->tx);
+                atomic_inc_unchecked(&vcc->stats->tx);
                if (vcc->pop)   
                    vcc->pop(vcc, skb);   
                else  
@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
         ATM_DESC(skb) = vcc->vci;
         skb_queue_tail(&iadev->tx_dma_q, skb);
 
-        atomic_inc(&vcc->stats->tx);
+        atomic_inc_unchecked(&vcc->stats->tx);
         iadev->tx_pkt_cnt++;
        /* Increment transaction counter */  
        writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
         
 #if 0        
         /* add flow control logic */ 
-        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
+        if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
           if (iavcc->vc_desc_cnt > 10) {
              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
index 93eaf8d944926a706c3fe5b5bf1baac6a49c5c7c..b4ca7da49857a5a100f06c86d7c70a504eca1ae5 100644 (file)
@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
        vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
        lanai_endtx(lanai, lvcc);
        lanai_free_skb(lvcc->tx.atmvcc, skb);
-       atomic_inc(&lvcc->tx.atmvcc->stats->tx);
+       atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
 }
 
 /* Try to fill the buffer - don't call unless there is backlog */
@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
        ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
        __net_timestamp(skb);
        lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
-       atomic_inc(&lvcc->rx.atmvcc->stats->rx);
+       atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
     out:
        lvcc->rx.buf.ptr = end;
        cardvcc_write(lvcc, endptr, vcc_rxreadptr);
@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
                DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
                    "vcc %d\n", lanai->number, (unsigned int) s, vci);
                lanai->stats.service_rxnotaal5++;
-               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
                return 0;
        }
        if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
                int bytes;
                read_unlock(&vcc_sklist_lock);
                DPRINTK("got trashed rx pdu on vci %d\n", vci);
-               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
                lvcc->stats.x.aal5.service_trash++;
                bytes = (SERVICE_GET_END(s) * 16) -
                    (((unsigned long) lvcc->rx.buf.ptr) -
@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
        }
        if (s & SERVICE_STREAM) {
                read_unlock(&vcc_sklist_lock);
-               atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+               atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
                lvcc->stats.x.aal5.service_stream++;
                printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
                    "PDU on VCI %d!\n", lanai->number, vci);
@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
                return 0;
        }
        DPRINTK("got rx crc error on vci %d\n", vci);
-       atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+       atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
        lvcc->stats.x.aal5.service_rxcrc++;
        lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
        cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
index 9988ac98b6d83ee58fa2774f1c4ced4559dcee74..7c52585bba3515d307f453de51904bf60427ff3a 100644 (file)
@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
        if ((vc = (vc_map *) vcc->dev_data) == NULL) {
                printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
                       card->index);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb_any(skb);
                return -EINVAL;
        }
@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
        if (!vc->tx) {
                printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
                       card->index);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb_any(skb);
                return -EINVAL;
        }
@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
        if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
                printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
                       card->index);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb_any(skb);
                return -EINVAL;
        }
 
        if (skb_shinfo(skb)->nr_frags != 0) {
                printk("nicstar%d: No scatter-gather yet.\n", card->index);
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb_any(skb);
                return -EINVAL;
        }
@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
        }
 
        if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
-               atomic_inc(&vcc->stats->tx_err);
+               atomic_inc_unchecked(&vcc->stats->tx_err);
                dev_kfree_skb_any(skb);
                return -EIO;
        }
-       atomic_inc(&vcc->stats->tx);
+       atomic_inc_unchecked(&vcc->stats->tx);
 
        return 0;
 }
@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                printk
                                    ("nicstar%d: Can't allocate buffers for aal0.\n",
                                     card->index);
-                               atomic_add(i, &vcc->stats->rx_drop);
+                               atomic_add_unchecked(i, &vcc->stats->rx_drop);
                                break;
                        }
                        if (!atm_charge(vcc, sb->truesize)) {
                                RXPRINTK
                                    ("nicstar%d: atm_charge() dropped aal0 packets.\n",
                                     card->index);
-                               atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
+                               atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);      /* already increased by 1 */
                                dev_kfree_skb_any(sb);
                                break;
                        }
@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                        ATM_SKB(sb)->vcc = vcc;
                        __net_timestamp(sb);
                        vcc->push(vcc, sb);
-                       atomic_inc(&vcc->stats->rx);
+                       atomic_inc_unchecked(&vcc->stats->rx);
                        cell += ATM_CELL_PAYLOAD;
                }
 
@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                        if (iovb == NULL) {
                                printk("nicstar%d: Out of iovec buffers.\n",
                                       card->index);
-                               atomic_inc(&vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&vcc->stats->rx_drop);
                                recycle_rx_buf(card, skb);
                                return;
                        }
@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                   small or large buffer itself. */
        } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
                printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
-               atomic_inc(&vcc->stats->rx_err);
+               atomic_inc_unchecked(&vcc->stats->rx_err);
                recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
                                      NS_MAX_IOVECS);
                NS_PRV_IOVCNT(iovb) = 0;
@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                            ("nicstar%d: Expected a small buffer, and this is not one.\n",
                             card->index);
                        which_list(card, skb);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        recycle_rx_buf(card, skb);
                        vc->rx_iov = NULL;
                        recycle_iov_buf(card, iovb);
@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                            ("nicstar%d: Expected a large buffer, and this is not one.\n",
                             card->index);
                        which_list(card, skb);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
                                              NS_PRV_IOVCNT(iovb));
                        vc->rx_iov = NULL;
@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                printk(" - PDU size mismatch.\n");
                        else
                                printk(".\n");
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
                                              NS_PRV_IOVCNT(iovb));
                        vc->rx_iov = NULL;
@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                        /* skb points to a small buffer */
                        if (!atm_charge(vcc, skb->truesize)) {
                                push_rxbufs(card, skb);
-                               atomic_inc(&vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&vcc->stats->rx_drop);
                        } else {
                                skb_put(skb, len);
                                dequeue_sm_buf(card, skb);
@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                ATM_SKB(skb)->vcc = vcc;
                                __net_timestamp(skb);
                                vcc->push(vcc, skb);
-                               atomic_inc(&vcc->stats->rx);
+                               atomic_inc_unchecked(&vcc->stats->rx);
                        }
                } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
                        struct sk_buff *sb;
@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                        if (len <= NS_SMBUFSIZE) {
                                if (!atm_charge(vcc, sb->truesize)) {
                                        push_rxbufs(card, sb);
-                                       atomic_inc(&vcc->stats->rx_drop);
+                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
                                } else {
                                        skb_put(sb, len);
                                        dequeue_sm_buf(card, sb);
@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                        ATM_SKB(sb)->vcc = vcc;
                                        __net_timestamp(sb);
                                        vcc->push(vcc, sb);
-                                       atomic_inc(&vcc->stats->rx);
+                                       atomic_inc_unchecked(&vcc->stats->rx);
                                }
 
                                push_rxbufs(card, skb);
@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
 
                                if (!atm_charge(vcc, skb->truesize)) {
                                        push_rxbufs(card, skb);
-                                       atomic_inc(&vcc->stats->rx_drop);
+                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
                                } else {
                                        dequeue_lg_buf(card, skb);
 #ifdef NS_USE_DESTRUCTORS
@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                        ATM_SKB(skb)->vcc = vcc;
                                        __net_timestamp(skb);
                                        vcc->push(vcc, skb);
-                                       atomic_inc(&vcc->stats->rx);
+                                       atomic_inc_unchecked(&vcc->stats->rx);
                                }
 
                                push_rxbufs(card, sb);
@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                        printk
                                            ("nicstar%d: Out of huge buffers.\n",
                                             card->index);
-                                       atomic_inc(&vcc->stats->rx_drop);
+                                       atomic_inc_unchecked(&vcc->stats->rx_drop);
                                        recycle_iovec_rx_bufs(card,
                                                              (struct iovec *)
                                                              iovb->data,
@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
                                        card->hbpool.count++;
                                } else
                                        dev_kfree_skb_any(hb);
-                               atomic_inc(&vcc->stats->rx_drop);
+                               atomic_inc_unchecked(&vcc->stats->rx_drop);
                        } else {
                                /* Copy the small buffer to the huge buffer */
                                sb = (struct sk_buff *)iov->iov_base;
@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
 #endif /* NS_USE_DESTRUCTORS */
                                __net_timestamp(hb);
                                vcc->push(vcc, hb);
-                               atomic_inc(&vcc->stats->rx);
+                               atomic_inc_unchecked(&vcc->stats->rx);
                        }
                }
 
index 21b0bc6a9c969ea677630a827f69c45545a9e78a..b5f40ba04314634a50b64b9847d10301131af80d 100644 (file)
@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
                                }
                                atm_charge(vcc, skb->truesize);
                                vcc->push(vcc, skb);
-                               atomic_inc(&vcc->stats->rx);
+                               atomic_inc_unchecked(&vcc->stats->rx);
                                break;
 
                        case PKT_STATUS:
@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
                        vcc = SKB_CB(oldskb)->vcc;
 
                        if (vcc) {
-                               atomic_inc(&vcc->stats->tx);
+                               atomic_inc_unchecked(&vcc->stats->tx);
                                solos_pop(vcc, oldskb);
                        } else {
                                dev_kfree_skb_irq(oldskb);
index 02159345566c3d018d01d50f8309fc4bd680a1d1..ce9f5b153445f6331a1caed63c8c313afa3843e6 100644 (file)
@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
 
 
 #define ADD_LIMITED(s,v) \
-    atomic_add((v),&stats->s); \
-    if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
+    atomic_add_unchecked((v),&stats->s); \
+    if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
 
 
 static void suni_hz(unsigned long from_timer)
index 5120a96b3a894f4a62735c26caacbbf6c8a21ae3..e2572bdba19ca72601a646b94ce3af21194e3089 100644 (file)
@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
        struct sonet_stats tmp;
        int error = 0;
 
-       atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+       atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
        sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
        if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
        if (zero && !error) {
@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
 
 
 #define ADD_LIMITED(s,v) \
-    { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
-    if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
-       atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+    { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
+    if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
+       atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
 
 
 static void stat_event(struct atm_dev *dev)
@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
                if (reason & uPD98402_INT_PFM) stat_event(dev);
                if (reason & uPD98402_INT_PCO) {
                        (void) GET(PCOCR); /* clear interrupt cause */
-                       atomic_add(GET(HECCT),
+                       atomic_add_unchecked(GET(HECCT),
                            &PRIV(dev)->sonet_stats.uncorr_hcs);
                }
                if ((reason & uPD98402_INT_RFO) && 
@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
        PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
          uPD98402_INT_LOS),PIMR); /* enable them */
        (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
-       atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
-       atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
-       atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
+       atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+       atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
+       atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
        return 0;
 }
 
index 969c3c29000c3622094068d940b854d251393673..9b729560b76f68f1885ecc2339a2de61d15f3dc8 100644 (file)
@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
                }
                if (!size) {
                        dev_kfree_skb_irq(skb);
-                       if (vcc) atomic_inc(&vcc->stats->rx_err);
+                       if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
                        continue;
                }
                if (!atm_charge(vcc,skb->truesize)) {
@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
                skb->len = size;
                ATM_SKB(skb)->vcc = vcc;
                vcc->push(vcc,skb);
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
        }
        zout(pos & 0xffff,MTA(mbx));
 #if 0 /* probably a stupid idea */
@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
                        skb_queue_head(&zatm_vcc->backlog,skb);
                        break;
                }
-       atomic_inc(&vcc->stats->tx);
+       atomic_inc_unchecked(&vcc->stats->tx);
        wake_up(&zatm_vcc->tx_wait);
 }
 
index 876bae5ade3393464bd7b2250af8f1ed29bf5bc8..89787857f0b332bea15a63aa5b37a1bc6f1a1b50 100644 (file)
@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
                return -EINVAL;
 
        mutex_lock(&subsys->p->mutex);
-       list_add_tail(&sif->node, &subsys->p->interfaces);
+       pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
        if (sif->add_dev) {
                subsys_dev_iter_init(&iter, subsys, NULL, NULL);
                while ((dev = subsys_dev_iter_next(&iter)))
@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
        subsys = sif->subsys;
 
        mutex_lock(&subsys->p->mutex);
-       list_del_init(&sif->node);
+       pax_list_del_init((struct list_head *)&sif->node);
        if (sif->remove_dev) {
                subsys_dev_iter_init(&iter, subsys, NULL, NULL);
                while ((dev = subsys_dev_iter_next(&iter)))
index 25798db14553265e899257d8e13e53fc356e81bb..15f130e155aa75b95ff869aaa061b935a41788a4 100644 (file)
@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
        if (!thread)
                return 0;
 
-       err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
+       err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
        if (err)
                printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
        else
@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
        *err = sys_unshare(CLONE_NEWNS);
        if (*err)
                goto out;
-       *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+       *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
        if (*err)
                goto out;
-       sys_chdir("/.."); /* will traverse into overmounted root */
-       sys_chroot(".");
+       sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
+       sys_chroot((char __force_user *)".");
        complete(&setup_done);
        while (1) {
                spin_lock(&req_lock);
index a3b82e9c7f200283c1c4a6c67ef7028707dd9abc..f90a8ce4956d9f004d80e05fefa59bc473ac1471 100644 (file)
@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
 struct node_attr {
        struct device_attribute attr;
        enum node_states state;
-};
+} __do_const;
 
 static ssize_t show_node_state(struct device *dev,
                               struct device_attribute *attr, char *buf)
index 0d8780c04a5e4d7c409b2ad1aa2d7e4c0c4f424b..0b5df3fc888f58341208aec03495dd3cfbcbd5db 100644 (file)
@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
 {
        struct cpuidle_driver *cpuidle_drv;
        struct gpd_cpuidle_data *cpuidle_data;
-       struct cpuidle_state *idle_state;
+       cpuidle_state_no_const *idle_state;
        int ret = 0;
 
        if (IS_ERR_OR_NULL(genpd) || state < 0)
@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
 {
        struct gpd_cpuidle_data *cpuidle_data;
-       struct cpuidle_state *idle_state;
+       cpuidle_state_no_const *idle_state;
        int ret = 0;
 
        if (IS_ERR_OR_NULL(genpd))
@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
                return ret;
        }
 
-       dev->pm_domain->detach = genpd_dev_pm_detach;
+       pax_open_kernel();
+       *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
+       pax_close_kernel();
+
        pm_genpd_poweron(pd);
 
        return 0;
index d2be3f9c211cbba39d2c685eb481c54ac2ed3a23..0a3167a5033cf7a63a43d64c99f23ba2a2e1a127 100644 (file)
@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
                        return -EIO;
                }
        }
-       return sprintf(buf, p);
+       return sprintf(buf, "%s", p);
 }
 
 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
index c2744b30d5d92e9dde512e492cf9fdf44f21b5ef..08fac198816da7125009e4e7cf9657cba26ac0d5 100644 (file)
@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
  * They need to be modified together atomically, so it's better to use one
  * atomic variable to hold them both.
  */
-static atomic_t combined_event_count = ATOMIC_INIT(0);
+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
 
 #define IN_PROGRESS_BITS       (sizeof(int) * 4)
 #define MAX_IN_PROGRESS                ((1 << IN_PROGRESS_BITS) - 1)
 
 static void split_counters(unsigned int *cnt, unsigned int *inpr)
 {
-       unsigned int comb = atomic_read(&combined_event_count);
+       unsigned int comb = atomic_read_unchecked(&combined_event_count);
 
        *cnt = (comb >> IN_PROGRESS_BITS);
        *inpr = comb & MAX_IN_PROGRESS;
@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
                ws->start_prevent_time = ws->last_time;
 
        /* Increment the counter of events in progress. */
-       cec = atomic_inc_return(&combined_event_count);
+       cec = atomic_inc_return_unchecked(&combined_event_count);
 
        trace_wakeup_source_activate(ws->name, cec);
 }
@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
         * Increment the counter of registered wakeup events and decrement the
         * couter of wakeup events in progress simultaneously.
         */
-       cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+       cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
        trace_wakeup_source_deactivate(ws->name, cec);
 
        split_counters(&cnt, &inpr);
index 8d98a329f6ea63a2daf179bb3f15e5307c6a0d13..61d31652400ee8ce675c16d2f52b5f0145c72f6a 100644 (file)
@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
 void register_syscore_ops(struct syscore_ops *ops)
 {
        mutex_lock(&syscore_ops_lock);
-       list_add_tail(&ops->node, &syscore_ops_list);
+       pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
        mutex_unlock(&syscore_ops_lock);
 }
 EXPORT_SYMBOL_GPL(register_syscore_ops);
@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
 void unregister_syscore_ops(struct syscore_ops *ops)
 {
        mutex_lock(&syscore_ops_lock);
-       list_del(&ops->node);
+       pax_list_del((struct list_head *)&ops->node);
        mutex_unlock(&syscore_ops_lock);
 }
 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
index ff20f192b0f67a77fc9e38092c457429e215f8a7..018f1daffff21aedf4eaad5d998b506fbd37120c 100644 (file)
@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
        while (!list_empty(&h->reqQ)) {
                c = list_entry(h->reqQ.next, CommandList_struct, list);
                /* can't do anything if fifo is full */
-               if ((h->access.fifo_full(h))) {
+               if ((h->access->fifo_full(h))) {
                        dev_warn(&h->pdev->dev, "fifo full\n");
                        break;
                }
@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
                h->Qdepth--;
 
                /* Tell the controller execute command */
-               h->access.submit_command(h, c);
+               h->access->submit_command(h, c);
 
                /* Put job onto the completed Q */
                addQ(&h->cmpQ, c);
@@ -3444,17 +3444,17 @@ startio:
 
 static inline unsigned long get_next_completion(ctlr_info_t *h)
 {
-       return h->access.command_completed(h);
+       return h->access->command_completed(h);
 }
 
 static inline int interrupt_pending(ctlr_info_t *h)
 {
-       return h->access.intr_pending(h);
+       return h->access->intr_pending(h);
 }
 
 static inline long interrupt_not_for_us(ctlr_info_t *h)
 {
-       return ((h->access.intr_pending(h) == 0) ||
+       return ((h->access->intr_pending(h) == 0) ||
                (h->interrupts_enabled == 0));
 }
 
@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
        u32 a;
 
        if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-               return h->access.command_completed(h);
+               return h->access->command_completed(h);
 
        if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
                a = *(h->reply_pool_head); /* Next cmd in ring buffer */
@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
                trans_support & CFGTBL_Trans_use_short_tags);
 
        /* Change the access methods to the performant access methods */
-       h->access = SA5_performant_access;
+       h->access = &SA5_performant_access;
        h->transMethod = CFGTBL_Trans_Performant;
 
        return;
@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
        if (prod_index < 0)
                return -ENODEV;
        h->product_name = products[prod_index].product_name;
-       h->access = *(products[prod_index].access);
+       h->access = products[prod_index].access;
 
        if (cciss_board_disabled(h)) {
                dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
        }
 
        /* make sure the board interrupts are off */
-       h->access.set_intr_mask(h, CCISS_INTR_OFF);
+       h->access->set_intr_mask(h, CCISS_INTR_OFF);
        rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
        if (rc)
                goto clean2;
@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
                 * fake ones to scoop up any residual completions.
                 */
                spin_lock_irqsave(&h->lock, flags);
-               h->access.set_intr_mask(h, CCISS_INTR_OFF);
+               h->access->set_intr_mask(h, CCISS_INTR_OFF);
                spin_unlock_irqrestore(&h->lock, flags);
                free_irq(h->intr[h->intr_mode], h);
                rc = cciss_request_irq(h, cciss_msix_discard_completions,
@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
                dev_info(&h->pdev->dev, "Board READY.\n");
                dev_info(&h->pdev->dev,
                        "Waiting for stale completions to drain.\n");
-               h->access.set_intr_mask(h, CCISS_INTR_ON);
+               h->access->set_intr_mask(h, CCISS_INTR_ON);
                msleep(10000);
-               h->access.set_intr_mask(h, CCISS_INTR_OFF);
+               h->access->set_intr_mask(h, CCISS_INTR_OFF);
 
                rc = controller_reset_failed(h->cfgtable);
                if (rc)
@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
        cciss_scsi_setup(h);
 
        /* Turn the interrupts on so we can service requests */
-       h->access.set_intr_mask(h, CCISS_INTR_ON);
+       h->access->set_intr_mask(h, CCISS_INTR_ON);
 
        /* Get the firmware version */
        inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
        kfree(flush_buf);
        if (return_code != IO_OK)
                dev_warn(&h->pdev->dev, "Error flushing cache\n");
-       h->access.set_intr_mask(h, CCISS_INTR_OFF);
+       h->access->set_intr_mask(h, CCISS_INTR_OFF);
        free_irq(h->intr[h->intr_mode], h);
 }
 
index 7fda30e4a2416195696b6a4eebd560d74fca6917..2f279464f75d12cb94ac5677146303903fd1a380 100644 (file)
@@ -101,7 +101,7 @@ struct ctlr_info
        /* information about each logical volume */
        drive_info_struct *drv[CISS_MAX_LUN];
 
-       struct access_method access;
+       struct access_method *access;
 
        /* queue and queue Info */ 
        struct list_head reqQ;
@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method SA5_access = {
-       SA5_submit_command,
-       SA5_intr_mask,
-       SA5_fifo_full,
-       SA5_intr_pending,
-       SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5B_access = {
-        SA5_submit_command,
-        SA5B_intr_mask,
-        SA5_fifo_full,
-        SA5B_intr_pending,
-        SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5B_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5B_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5_performant_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 struct board_type {
index 2b944038453681ef15ba61f41e1cfa3a9e885fbe..fd6ad1f4d5f1bd6aeeaa44c9b64c192dc6b248b7 100644 (file)
@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
        if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
                goto Enomem4;
        }
-       hba[i]->access.set_intr_mask(hba[i], 0);
+       hba[i]->access->set_intr_mask(hba[i], 0);
        if (request_irq(hba[i]->intr, do_ida_intr,
                IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
        {
@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
        add_timer(&hba[i]->timer);
 
        /* Enable IRQ now that spinlock and rate limit timer are set up */
-       hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+       hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
 
        for(j=0; j<NWD; j++) {
                struct gendisk *disk = ida_gendisk[i][j];
@@ -694,7 +694,7 @@ DBGINFO(
        for(i=0; i<NR_PRODUCTS; i++) {
                if (board_id == products[i].board_id) {
                        c->product_name = products[i].product_name;
-                       c->access = *(products[i].access);
+                       c->access = products[i].access;
                        break;
                }
        }
@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
                hba[ctlr]->intr = intr;
                sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
                hba[ctlr]->product_name = products[j].product_name;
-               hba[ctlr]->access = *(products[j].access);
+               hba[ctlr]->access = products[j].access;
                hba[ctlr]->ctlr = ctlr;
                hba[ctlr]->board_id = board_id;
                hba[ctlr]->pci_dev = NULL; /* not PCI */
@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
 
        while((c = h->reqQ) != NULL) {
                /* Can't do anything if we're busy */
-               if (h->access.fifo_full(h) == 0)
+               if (h->access->fifo_full(h) == 0)
                        return;
 
                /* Get the first entry from the request Q */
@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
                h->Qdepth--;
        
                /* Tell the controller to do our bidding */
-               h->access.submit_command(h, c);
+               h->access->submit_command(h, c);
 
                /* Get onto the completion Q */
                addQ(&h->cmpQ, c);
@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
        unsigned long flags;
        __u32 a,a1;
 
-       istat = h->access.intr_pending(h);
+       istat = h->access->intr_pending(h);
        /* Is this interrupt for us? */
        if (istat == 0)
                return IRQ_NONE;
@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
         */
        spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
        if (istat & FIFO_NOT_EMPTY) {
-               while((a = h->access.command_completed(h))) {
+               while((a = h->access->command_completed(h))) {
                        a1 = a; a &= ~3;
                        if ((c = h->cmpQ) == NULL)
                        {  
@@ -1448,11 +1448,11 @@ static int sendcmd(
        /*
         * Disable interrupt
         */
-       info_p->access.set_intr_mask(info_p, 0);
+       info_p->access->set_intr_mask(info_p, 0);
        /* Make sure there is room in the command FIFO */
        /* Actually it should be completely empty at this time. */
        for (i = 200000; i > 0; i--) {
-               temp = info_p->access.fifo_full(info_p);
+               temp = info_p->access->fifo_full(info_p);
                if (temp != 0) {
                        break;
                }
@@ -1465,7 +1465,7 @@ DBG(
        /*
         * Send the cmd
         */
-       info_p->access.submit_command(info_p, c);
+       info_p->access->submit_command(info_p, c);
        complete = pollcomplete(ctlr);
        
        pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
         * we check the new geometry.  Then turn interrupts back on when
         * we're done.
         */
-       host->access.set_intr_mask(host, 0);
+       host->access->set_intr_mask(host, 0);
        getgeometry(ctlr);
-       host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
+       host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
 
        for(i=0; i<NWD; i++) {
                struct gendisk *disk = ida_gendisk[ctlr][i];
@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
        /* Wait (up to 2 seconds) for a command to complete */
 
        for (i = 200000; i > 0; i--) {
-               done = hba[ctlr]->access.command_completed(hba[ctlr]);
+               done = hba[ctlr]->access->command_completed(hba[ctlr]);
                if (done == 0) {
                        udelay(10);     /* a short fixed delay */
                } else
index be73e9d579c5923954647987d4ec28a0e68e0499..7fbf140cdb9e83cffff0847a74c64c752132857e 100644 (file)
@@ -99,7 +99,7 @@ struct ctlr_info {
        drv_info_t      drv[NWD];
        struct proc_dir_entry *proc;
 
-       struct access_method access;
+       struct access_method *access;
 
        cmdlist_t *reqQ;
        cmdlist_t *cmpQ;
index 434c77dcc99e00c9da129278db2043117f7040d6..6d3219acf147d3df2da1d6c1c0eff6a156eef752 100644 (file)
@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
                submit_bio(rw, bio);
                /* this should not count as user activity and cause the
                 * resync to throttle -- see drbd_rs_should_slow_down(). */
-               atomic_add(len >> 9, &device->rs_sect_ev);
+               atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
        }
 }
 
index b905e9888b888eb4b72fd10bae06eb06950e760b..0812ed85b1949d38ed2e15aeaed9b5c54347d6aa 100644 (file)
@@ -385,7 +385,7 @@ struct drbd_epoch {
        struct drbd_connection *connection;
        struct list_head list;
        unsigned int barrier_nr;
-       atomic_t epoch_size; /* increased on every request added. */
+       atomic_unchecked_t epoch_size; /* increased on every request added. */
        atomic_t active;     /* increased on every req. added, and dec on every finished. */
        unsigned long flags;
 };
@@ -946,7 +946,7 @@ struct drbd_device {
        unsigned int al_tr_number;
        int al_tr_cycle;
        wait_queue_head_t seq_wait;
-       atomic_t packet_seq;
+       atomic_unchecked_t packet_seq;
        unsigned int peer_seq;
        spinlock_t peer_seq_lock;
        unsigned long comm_bm_set; /* communicated number of set bits. */
@@ -955,8 +955,8 @@ struct drbd_device {
        struct mutex own_state_mutex;
        struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
        char congestion_reason;  /* Why we where congested... */
-       atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
-       atomic_t rs_sect_ev; /* for submitted resync data rate, both */
+       atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
+       atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
        int rs_last_sect_ev; /* counter to compare with */
        int rs_last_events;  /* counter of read or write "events" (unit sectors)
                              * on the lower level device when we last looked. */
index 1fc83427199c9429e3a3f9cbd5f49cb6c6e98d86..7e7742bc9f768244f35d749003a1ade5ad4b4d9a 100644 (file)
@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
        p->sector = sector;
        p->block_id = block_id;
        p->blksize = blksize;
-       p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
+       p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
        return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
 }
 
@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
                return -EIO;
        p->sector = cpu_to_be64(req->i.sector);
        p->block_id = (unsigned long)req;
-       p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
+       p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
        dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
        if (device->state.conn >= C_SYNC_SOURCE &&
            device->state.conn <= C_PAUSED_SYNC_T)
@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
        atomic_set(&device->unacked_cnt, 0);
        atomic_set(&device->local_cnt, 0);
        atomic_set(&device->pp_in_use_by_net, 0);
-       atomic_set(&device->rs_sect_in, 0);
-       atomic_set(&device->rs_sect_ev, 0);
+       atomic_set_unchecked(&device->rs_sect_in, 0);
+       atomic_set_unchecked(&device->rs_sect_ev, 0);
        atomic_set(&device->ap_in_flight, 0);
        atomic_set(&device->md_io.in_use, 0);
 
@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
        struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
        struct drbd_resource *resource = connection->resource;
 
-       if (atomic_read(&connection->current_epoch->epoch_size) !=  0)
-               drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
+       if (atomic_read_unchecked(&connection->current_epoch->epoch_size) !=  0)
+               drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
        kfree(connection->current_epoch);
 
        idr_destroy(&connection->peer_devices);
index 74df8cfad414f7242a1354c4c34fd4d8273d74ca..e41fc24f93ac9f3ab9a56817f72839f6d45c29df 100644 (file)
@@ -3637,13 +3637,13 @@ finish:
 
 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
 {
-       static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+       static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
        struct sk_buff *msg;
        struct drbd_genlmsghdr *d_out;
        unsigned seq;
        int err = -ENOMEM;
 
-       seq = atomic_inc_return(&drbd_genl_seq);
+       seq = atomic_inc_return_unchecked(&drbd_genl_seq);
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
        if (!msg)
                goto failed;
index d169b4a7926700187cc643f1ef29a6cd22aeaed3..481463f9133fe54fe3977ac5ccd6ef5974220858 100644 (file)
@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
        struct drbd_device *device = peer_device->device;
        int err;
 
-       atomic_set(&device->packet_seq, 0);
+       atomic_set_unchecked(&device->packet_seq, 0);
        device->peer_seq = 0;
 
        device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
        do {
                next_epoch = NULL;
 
-               epoch_size = atomic_read(&epoch->epoch_size);
+               epoch_size = atomic_read_unchecked(&epoch->epoch_size);
 
                switch (ev & ~EV_CLEANUP) {
                case EV_PUT:
@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
                                        rv = FE_DESTROYED;
                        } else {
                                epoch->flags = 0;
-                               atomic_set(&epoch->epoch_size, 0);
+                               atomic_set_unchecked(&epoch->epoch_size, 0);
                                /* atomic_set(&epoch->active, 0); is already zero */
                                if (rv == FE_STILL_LIVE)
                                        rv = FE_RECYCLED;
@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
                conn_wait_active_ee_empty(connection);
                drbd_flush(connection);
 
-               if (atomic_read(&connection->current_epoch->epoch_size)) {
+               if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
                        epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
                        if (epoch)
                                break;
@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
        }
 
        epoch->flags = 0;
-       atomic_set(&epoch->epoch_size, 0);
+       atomic_set_unchecked(&epoch->epoch_size, 0);
        atomic_set(&epoch->active, 0);
 
        spin_lock(&connection->epoch_lock);
-       if (atomic_read(&connection->current_epoch->epoch_size)) {
+       if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
                list_add(&epoch->list, &connection->current_epoch->list);
                connection->current_epoch = epoch;
                connection->epochs++;
@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
        list_add_tail(&peer_req->w.list, &device->sync_ee);
        spin_unlock_irq(&device->resource->req_lock);
 
-       atomic_add(pi->size >> 9, &device->rs_sect_ev);
+       atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
        if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
                return 0;
 
@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
                drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
        }
 
-       atomic_add(pi->size >> 9, &device->rs_sect_in);
+       atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
 
        return err;
 }
@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
 
                err = wait_for_and_update_peer_seq(peer_device, peer_seq);
                drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
-               atomic_inc(&connection->current_epoch->epoch_size);
+               atomic_inc_unchecked(&connection->current_epoch->epoch_size);
                err2 = drbd_drain_block(peer_device, pi->size);
                if (!err)
                        err = err2;
@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
 
        spin_lock(&connection->epoch_lock);
        peer_req->epoch = connection->current_epoch;
-       atomic_inc(&peer_req->epoch->epoch_size);
+       atomic_inc_unchecked(&peer_req->epoch->epoch_size);
        atomic_inc(&peer_req->epoch->active);
        spin_unlock(&connection->epoch_lock);
 
@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
 
        curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
                      (int)part_stat_read(&disk->part0, sectors[1]) -
-                       atomic_read(&device->rs_sect_ev);
+                       atomic_read_unchecked(&device->rs_sect_ev);
 
        if (atomic_read(&device->ap_actlog_cnt)
            || curr_events - device->rs_last_events > 64) {
@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
                        device->use_csums = true;
                } else if (pi->cmd == P_OV_REPLY) {
                        /* track progress, we may need to throttle */
-                       atomic_add(size >> 9, &device->rs_sect_in);
+                       atomic_add_unchecked(size >> 9, &device->rs_sect_in);
                        peer_req->w.cb = w_e_end_ov_reply;
                        dec_rs_pending(device);
                        /* drbd_rs_begin_io done when we sent this request,
@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
                goto out_free_e;
 
 submit_for_resync:
-       atomic_add(size >> 9, &device->rs_sect_ev);
+       atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
 
 submit:
        update_receiver_timing_details(connection, drbd_submit_peer_request);
@@ -4564,7 +4564,7 @@ struct data_cmd {
        int expect_payload;
        size_t pkt_size;
        int (*fn)(struct drbd_connection *, struct packet_info *);
-};
+} __do_const;
 
 static struct data_cmd drbd_cmd_handler[] = {
        [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
        if (!list_empty(&connection->current_epoch->list))
                drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
        /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
-       atomic_set(&connection->current_epoch->epoch_size, 0);
+       atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
        connection->send.seen_any_write_yet = false;
 
        drbd_info(connection, "Connection closed\n");
@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
                put_ldev(device);
        }
        dec_rs_pending(device);
-       atomic_add(blksize >> 9, &device->rs_sect_in);
+       atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
 
        return 0;
 }
@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
 struct asender_cmd {
        size_t pkt_size;
        int (*fn)(struct drbd_connection *connection, struct packet_info *);
-};
+} __do_const;
 
 static struct asender_cmd asender_tbl[] = {
        [P_PING]            = { 0, got_Ping },
index d0fae55d871d699a67f0a9ad3036f71d2265131c..44690969b8da9357852d3f90683fc1e67d69e29b 100644 (file)
@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
        list_add_tail(&peer_req->w.list, &device->read_ee);
        spin_unlock_irq(&device->resource->req_lock);
 
-       atomic_add(size >> 9, &device->rs_sect_ev);
+       atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
        if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
                return 0;
 
@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
        unsigned int sect_in;  /* Number of sectors that came in since the last turn */
        int number, mxb;
 
-       sect_in = atomic_xchg(&device->rs_sect_in, 0);
+       sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
        device->rs_in_flight -= sect_in;
 
        rcu_read_lock();
@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
        struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
        struct fifo_buffer *plan;
 
-       atomic_set(&device->rs_sect_in, 0);
-       atomic_set(&device->rs_sect_ev, 0);
+       atomic_set_unchecked(&device->rs_sect_in, 0);
+       atomic_set_unchecked(&device->rs_sect_ev, 0);
        device->rs_in_flight = 0;
        device->rs_last_events =
                (int)part_stat_read(&disk->part0, sectors[0]) +
index 6cb1beb47c25d1d2a7db113ca9f173a9ef8b68d3..bf490f7596765d05f9f09c28e3fd312cee32cead 100644 (file)
@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
 
        file_start_write(file);
        set_fs(get_ds());
-       bw = file->f_op->write(file, buf, len, &pos);
+       bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
        set_fs(old_fs);
        file_end_write(file);
        if (likely(bw == len))
index d826bf3e62c8621e8572ca9eabb7951d42c33eb7..8eb406cde11265ca5822084882c4ad2899fa9725 100644 (file)
@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_hotcpu_notifier(&nvme_nb);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
index 09e628dafd9d829eadd9abb68c6956d182a582f8..7607aaa75e2d92f581ee3c893b7cfa2c10827212 100644 (file)
@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
 
 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
 {
-       return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+       return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
 }
 
 /*
@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
                return -EROFS;
        }
        pd->settings.fp = ti.fp;
-       pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+       pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
 
        if (ti.nwa_v) {
                pd->nwa = be32_to_cpu(ti.next_writable);
index 8a86b62466f7ce72b54853b283e03fd495df8083..f54c87e86e17a9eaa0406d9f3f8b4df5792d6f17 100644 (file)
@@ -63,7 +63,7 @@
  * If the counter is already at its maximum value returns
  * -EINVAL without updating it.
  */
-static int atomic_inc_return_safe(atomic_t *v)
+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
 {
        unsigned int counter;
 
index e5565fbaeb30a7115190989e094f9e7f578ecf8d..71be10b4492ff18aaa00745a64da51b72e331112 100644 (file)
@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method smart4_access = {
-       smart4_submit_command,
-       smart4_intr_mask,
-       smart4_fifo_full,
-       smart4_intr_pending,
-       smart4_completed,
+       .submit_command = smart4_submit_command,
+       .set_intr_mask = smart4_intr_mask,
+       .fifo_full = smart4_fifo_full,
+       .intr_pending = smart4_intr_pending,
+       .command_completed = smart4_completed,
 };
 
 /*
@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method smart2_access = {
-       smart2_submit_command,
-       smart2_intr_mask,
-       smart2_fifo_full,
-       smart2_intr_pending,
-       smart2_completed,
+       .submit_command = smart2_submit_command,
+       .set_intr_mask = smart2_intr_mask,
+       .fifo_full = smart2_fifo_full,
+       .intr_pending = smart2_intr_pending,
+       .command_completed = smart2_completed,
 };
 
 /*
@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method smart2e_access = {
-       smart2e_submit_command,
-       smart2e_intr_mask,
-       smart2e_fifo_full,
-       smart2e_intr_pending,
-       smart2e_completed,
+       .submit_command = smart2e_submit_command,
+       .set_intr_mask = smart2e_intr_mask,
+       .fifo_full = smart2e_fifo_full,
+       .intr_pending = smart2e_intr_pending,
+       .command_completed = smart2e_completed,
 };
 
 /*
@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method smart1_access = {
-       smart1_submit_command,
-       smart1_intr_mask,
-       smart1_fifo_full,
-       smart1_intr_pending,
-       smart1_completed,
+       .submit_command = smart1_submit_command,
+       .set_intr_mask = smart1_intr_mask,
+       .fifo_full = smart1_fifo_full,
+       .intr_pending = smart1_intr_pending,
+       .command_completed = smart1_completed,
 };
index 55c135b7757a9df84745f05bf27870365cbddd73..9f8d60cbda240f9dd692b942186b8f36e2a75d71 100644 (file)
@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 
 static int bt_ti_probe(struct platform_device *pdev)
 {
-       static struct ti_st *hst;
+       struct ti_st *hst;
        struct hci_dev *hdev;
        int err;
 
index 5d28a45d2960c6a40945b134c755ff03a1117c76..a538f90fb5be08af9ba59a98076450975120fd3e 100644 (file)
@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
        ENSURE(reset, CDC_RESET);
        ENSURE(generic_packet, CDC_GENERIC_PACKET);
        cdi->mc_flags = 0;
-       cdo->n_minors = 0;
        cdi->options = CDO_USE_FFLAGS;
 
        if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
        else
                cdi->cdda_method = CDDA_OLD;
 
-       if (!cdo->generic_packet)
-               cdo->generic_packet = cdrom_dummy_generic_packet;
+       if (!cdo->generic_packet) {
+               pax_open_kernel();
+               *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
+               pax_close_kernel();
+       }
 
        cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
        mutex_lock(&cdrom_mutex);
@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
        if (cdi->exit)
                cdi->exit(cdi);
 
-       cdi->ops->n_minors--;
        cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
 }
 
@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
         */
        nr = nframes;
        do {
-               cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+               cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
                if (cgc.buffer)
                        break;
 
@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
        struct cdrom_device_info *cdi;
        int ret;
 
-       ret = scnprintf(info + *pos, max_size - *pos, header);
+       ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
        if (!ret)
                return 1;
 
index 584bc3126403d58955a915db2117fcd1d16de5a2..e64a12cb4251ee41726c9981efd748f15f83a958 100644 (file)
@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
        .audio_ioctl            = gdrom_audio_ioctl,
        .capability             = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
                                  CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
-       .n_minors               = 1,
 };
 
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
index efefd12a0f7bd97b2d6a3970eb068250bce257c2..4f1d494598ea48151ab5f931246f0f77e04e09ce 100644 (file)
@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
 
 config DEVKMEM
        bool "/dev/kmem virtual device support"
-       default y
+       default n
+       depends on !GRKERNSEC_KMEM
        help
          Say Y here if you want to support the /dev/kmem device. The
          /dev/kmem device is rarely used, but can be used for certain
@@ -577,6 +578,7 @@ config DEVPORT
        bool
        depends on !M68K
        depends on ISA || PCI
+       depends on !GRKERNSEC_KMEM
        default y
 
 source "drivers/s390/char/Kconfig"
index a48e05b31593b448d789b4f4e4e83ba484e7d340..6bac831733c86ae379b4d0b525149d0d79390b62 100644 (file)
@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
                        return -ENOMEM;
                }
 
-               if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
+               if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
                                   sizeof(*usegment) * ureserve.seg_count)) {
                        kfree(usegment);
                        kfree(ksegment);
index 09f17eb734863fd0dfc6d69fc6f7716906391424..8531d2f06db81f5d39c902beb41d8c6b4a7a522c 100644 (file)
@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
        if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
                return -EFAULT;
 
-       if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+       if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
                return -EFAULT;
 
        client = agp_find_client_by_pid(reserve.pid);
@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
                if (segment == NULL)
                        return -ENOMEM;
 
-               if (copy_from_user(segment, (void __user *) reserve.seg_list,
+               if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
                                   sizeof(struct agp_segment) * reserve.seg_count)) {
                        kfree(segment);
                        return -EFAULT;
index 4f943759d37699a1ec364c290c58594a543bffce..413694ecfd85b606b2c4f646e7fe80f94c9b5192 100644 (file)
@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
        switch (cmd) {
 
        case RTC_PLL_GET:
+           memset(&pll, 0, sizeof(pll));
            if (get_rtc_pll(&pll))
                    return -EINVAL;
            else
index d5d4cd82b9f7dac435af267806ff541777f3ebdb..22d561dbf68de2e984bdc3aeb56f72fa9baaea3c 100644 (file)
@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
 }
 
 static int
-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
                  struct hpet_info *info)
 {
        struct hpet_timer __iomem *timer;
index 6b65fa4e0c5586895df2b26ee499c9e5ad4d8b2c..8ebbc996874de20d7a1b53d0c289a71836f90626 100644 (file)
@@ -436,7 +436,7 @@ struct ipmi_smi {
        struct proc_dir_entry *proc_dir;
        char                  proc_dir_name[10];
 
-       atomic_t stats[IPMI_NUM_STATS];
+       atomic_unchecked_t stats[IPMI_NUM_STATS];
 
        /*
         * run_to_completion duplicate of smb_info, smi_info
@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
 static DEFINE_MUTEX(smi_watchers_mutex);
 
 #define ipmi_inc_stat(intf, stat) \
-       atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+       atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
 #define ipmi_get_stat(intf, stat) \
-       ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+       ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
 
 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
                                   "ACPI", "SMBIOS", "PCI",
@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        INIT_LIST_HEAD(&intf->cmd_rcvrs);
        init_waitqueue_head(&intf->waitq);
        for (i = 0; i < IPMI_NUM_STATS; i++)
-               atomic_set(&intf->stats[i], 0);
+               atomic_set_unchecked(&intf->stats[i], 0);
 
        intf->proc_dir = NULL;
 
index 967b73aa4e66d31481d6ae9488ab567530661f76..946e94c526c846d461831f1820ab13ba76ca7bb2 100644 (file)
@@ -284,7 +284,7 @@ struct smi_info {
        unsigned char slave_addr;
 
        /* Counters and things for the proc filesystem. */
-       atomic_t stats[SI_NUM_STATS];
+       atomic_unchecked_t stats[SI_NUM_STATS];
 
        struct task_struct *thread;
 
@@ -293,9 +293,9 @@ struct smi_info {
 };
 
 #define smi_inc_stat(smi, stat) \
-       atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+       atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
 #define smi_get_stat(smi, stat) \
-       ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+       ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
 
 #define SI_MAX_PARMS 4
 
@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
        atomic_set(&new_smi->req_events, 0);
        new_smi->run_to_completion = false;
        for (i = 0; i < SI_NUM_STATS; i++)
-               atomic_set(&new_smi->stats[i], 0);
+               atomic_set_unchecked(&new_smi->stats[i], 0);
 
        new_smi->interrupt_disabled = true;
        atomic_set(&new_smi->need_watch, 0);
index 4c58333b42570d233ccfaa83dac3c67f69e73b49..d5cca271ee1673d30911cab3152b909434a35f26 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/raw.h>
 #include <linux/tty.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/device.h>
 #include <linux/highmem.h>
 
 #define DEVPORT_MINOR  4
 
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+extern const struct file_operations grsec_fops;
+#endif
+
 static inline unsigned long size_inside_page(unsigned long start,
                                             unsigned long size)
 {
@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
+#ifdef CONFIG_GRKERNSEC_KMEM
+                       gr_handle_mem_readwrite(from, to);
+#else
                        printk(KERN_INFO
                "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
                                current->comm, from, to);
+#endif
                        return 0;
                }
                cursor += PAGE_SIZE;
@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        }
        return 1;
 }
+#elif defined(CONFIG_GRKERNSEC_KMEM)
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+       return 0;
+}
 #else
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 #endif
 
        while (count > 0) {
-               unsigned long remaining;
+               unsigned long remaining = 0;
+               char *temp;
 
                sz = size_inside_page(p, count);
 
@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
                if (!ptr)
                        return -EFAULT;
 
-               remaining = copy_to_user(buf, ptr, sz);
+#ifdef CONFIG_PAX_USERCOPY
+               temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+               if (!temp) {
+                       unxlate_dev_mem_ptr(p, ptr);
+                       return -ENOMEM;
+               }
+               remaining = probe_kernel_read(temp, ptr, sz);
+#else
+               temp = ptr;
+#endif
+
+               if (!remaining)
+                       remaining = copy_to_user(buf, temp, sz);
+
+#ifdef CONFIG_PAX_USERCOPY
+               kfree(temp);
+#endif
+
                unxlate_dev_mem_ptr(p, ptr);
                if (remaining)
                        return -EFAULT;
@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                         size_t count, loff_t *ppos)
 {
        unsigned long p = *ppos;
-       ssize_t low_count, read, sz;
+       ssize_t low_count, read, sz, err = 0;
        char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-       int err = 0;
 
        read = 0;
        if (p < (unsigned long) high_memory) {
@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                }
 #endif
                while (low_count > 0) {
+                       char *temp;
+
                        sz = size_inside_page(p, low_count);
 
                        /*
@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                         */
                        kbuf = xlate_dev_kmem_ptr((void *)p);
 
-                       if (copy_to_user(buf, kbuf, sz))
+#ifdef CONFIG_PAX_USERCOPY
+                       temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+                       if (!temp)
+                               return -ENOMEM;
+                       err = probe_kernel_read(temp, kbuf, sz);
+#else
+                       temp = kbuf;
+#endif
+
+                       if (!err)
+                               err = copy_to_user(buf, temp, sz);
+
+#ifdef CONFIG_PAX_USERCOPY
+                       kfree(temp);
+#endif
+
+                       if (err)
                                return -EFAULT;
                        buf += sz;
                        p += sz;
@@ -800,6 +849,9 @@ static const struct memdev {
 #ifdef CONFIG_PRINTK
        [11] = { "kmsg", 0644, &kmsg_fops, NULL },
 #endif
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+       [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
+#endif
 };
 
 static int memory_open(struct inode *inode, struct file *filp)
@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
                        continue;
 
                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
-                             NULL, devlist[minor].name);
+                             NULL, "%s", devlist[minor].name);
        }
 
        return tty_init();
index 9df78e2cc45d2951fc77815975cbeeb6633aea3a..01ba9ae7c0b173ebb3e0120f73fcdbddf2c84980 100644 (file)
@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
 
        spin_unlock_irq(&rtc_lock);
 
-       if (copy_to_user(buf, contents, tmp - contents))
+       if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
                return -EFAULT;
 
        *ppos = i;
index 0ea9986059affee9d7bc65b0040c02acdae751d8..e7b07e45d8beb0abe22c0d2a9a12e6a0ab44d99a 100644 (file)
@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
-                        __FILE__, __LINE__, info->device_name, port->count);
+                        __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
 
        if (tty_port_close_start(port, tty, filp) == 0)
                goto cleanup;
@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
 cleanup:
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
-                       tty->driver->name, port->count);
+                       tty->driver->name, atomic_read(&port->count));
 }
 
 /* Wait until the transmitter is empty.
@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
-                        __FILE__, __LINE__, tty->driver->name, port->count);
+                        __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
        /* If port is closing, signal caller to try again */
        if (port->flags & ASYNC_CLOSING){
@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
                goto cleanup;
        }
        spin_lock(&port->lock);
-       port->count++;
+       atomic_inc(&port->count);
        spin_unlock(&port->lock);
        spin_unlock_irqrestore(&info->netlock, flags);
 
-       if (port->count == 1) {
+       if (atomic_read(&port->count) == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info, tty);
                if (retval < 0)
@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
        unsigned short new_crctype;
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        switch (encoding)
@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* arbitrate between network and tty opens */
        spin_lock_irqsave(&info->netlock, flags);
-       if (info->port.count != 0 || info->netcount != 0) {
+       if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
                printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
                spin_unlock_irqrestore(&info->netlock, flags);
                return -EBUSY;
@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        if (cmd != SIOCWANDEV)
index 9cd6968e2f924bf7eb5c545c4298445651d5665d..6416f00219f3d23d0b2c07daf60ae2f6031dfb7d 100644 (file)
 /*
  * To allow fractional bits to be tracked, the entropy_count field is
  * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
  */
 #define ENTROPY_SHIFT 3
 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
@@ -439,9 +436,9 @@ struct entropy_store {
 };
 
 static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 
 static struct entropy_store input_pool = {
        .poolinfo = &poolinfo_table[0],
@@ -635,7 +632,7 @@ retry:
                /* The +2 corresponds to the /4 in the denominator */
 
                do {
-                       unsigned int anfrac = min(pnfrac, pool_size/2);
+                       u64 anfrac = min(pnfrac, pool_size/2);
                        unsigned int add =
                                ((pool_size - entropy_count)*anfrac*3) >> s;
 
@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
 
                extract_buf(r, tmp);
                i = min_t(int, nbytes, EXTRACT_SIZE);
-               if (copy_to_user(buf, tmp, i)) {
+               if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
                        ret = -EFAULT;
                        break;
                }
@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
 static int proc_do_uuid(struct ctl_table *table, int write,
                        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table fake_table;
+       ctl_table_no_const fake_table;
        unsigned char buf[64], tmp_uuid[16], *uuid;
 
        uuid = table->data;
@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
 static int proc_do_entropy(struct ctl_table *table, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table fake_table;
+       ctl_table_no_const fake_table;
        int entropy_count;
 
        entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
index e496daefe9e02fa553e4cdb2a6d77399e4bf4dee..b793e7d11f4352c541d4de6efa06fd324c295a85 100644 (file)
@@ -54,6 +54,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
+#include <asm/local.h>
 
 #include <linux/sonypi.h>
 
@@ -490,7 +491,7 @@ static struct sonypi_device {
        spinlock_t fifo_lock;
        wait_queue_head_t fifo_proc_list;
        struct fasync_struct *fifo_async;
-       int open_count;
+       local_t open_count;
        int model;
        struct input_dev *input_jog_dev;
        struct input_dev *input_key_dev;
@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
 static int sonypi_misc_release(struct inode *inode, struct file *file)
 {
        mutex_lock(&sonypi_device.lock);
-       sonypi_device.open_count--;
+       local_dec(&sonypi_device.open_count);
        mutex_unlock(&sonypi_device.lock);
        return 0;
 }
@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
 {
        mutex_lock(&sonypi_device.lock);
        /* Flush input queue on first open */
-       if (!sonypi_device.open_count)
+       if (!local_read(&sonypi_device.open_count))
                kfifo_reset(&sonypi_device.fifo);
-       sonypi_device.open_count++;
+       local_inc(&sonypi_device.open_count);
        mutex_unlock(&sonypi_device.lock);
 
        return 0;
index 565a9478cb94082030a9fe5ce40e277baafb6e1b..dcdc06e5cbf487b79a09ea33fb493f765bcfdbcf 100644 (file)
@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
        virt = acpi_os_map_iomem(start, len);
        if (!virt) {
                kfree(log->bios_event_log);
+               log->bios_event_log = NULL;
                printk("%s: ERROR - Unable to map memory\n", __func__);
                return -EIO;
        }
 
-       memcpy_fromio(log->bios_event_log, virt, len);
+       memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
 
        acpi_os_unmap_iomem(virt, len);
        return 0;
index 3a56a131586c824f97e2cc2310c496a91378fd3d..f8cbd25ba33f86f46b680b2bf11e7b9f5354015b 100644 (file)
@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
        event = addr;
 
        if ((event->event_type == 0 && event->event_size == 0) ||
-           ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+           (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
                return NULL;
 
        return addr;
@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
                return NULL;
 
        if ((event->event_type == 0 && event->event_size == 0) ||
-           ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+           (event->event_size >= limit - v - sizeof(struct tcpa_event)))
                return NULL;
 
        (*pos)++;
@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
        int i;
 
        for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
-               seq_putc(m, data[i]);
+               if (!seq_putc(m, data[i]))
+                       return -EFAULT;
 
        return 0;
 }
index de03df9dd7c9614e290a7c60ddee6a1564c0f5f2..0a309a9d1949cca37465e3f04713cdb8b2b2d93a 100644 (file)
@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
        if (to_user) {
                ssize_t ret;
 
-               ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+               ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
                if (ret)
                        return -EFAULT;
        } else {
@@ -788,7 +788,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
        if (!port_has_data(port) && !port->host_connected)
                return 0;
 
-       return fill_readbuf(port, ubuf, count, true);
+       return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
 }
 
 static int wait_port_writable(struct port *port, bool nonblock)
index 4386697236a78dc23aea66d0c4792873d558f71e..754ceca8b2093a0ba5cc1f159bdf69c1061e803b 100644 (file)
@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
        struct clk *clk;
        struct clk_init_data init;
        struct clk_composite *composite;
-       struct clk_ops *clk_composite_ops;
+       clk_ops_no_const *clk_composite_ops;
 
        composite = kzalloc(sizeof(*composite), GFP_KERNEL);
        if (!composite) {
index dd3a78c64795f27c7e171f004f9bb0462f44579a..386d49ca7d8dfc7aaf70eef4a3cd65beb87aca3a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/regmap.h>
+#include <asm/pgtable.h>
 
 #include "clk.h"
 
@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
        return 0;
 }
 
-static struct clk_ops gateclk_ops = {
+static clk_ops_no_const gateclk_ops __read_only = {
        .prepare = socfpga_clk_prepare,
        .recalc_rate = socfpga_clk_recalc_rate,
        .get_parent = socfpga_clk_get_parent,
@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
                socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
                socfpga_clk->hw.bit_idx = clk_gate[1];
 
-               gateclk_ops.enable = clk_gate_ops.enable;
-               gateclk_ops.disable = clk_gate_ops.disable;
+               pax_open_kernel();
+               *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
+               *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
+               pax_close_kernel();
        }
 
        rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
index de6da957a09d6ebe82f416370c84a7dc50acea8e..c98278b9c808019713f1ac4cb5bd37852d85086b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <asm/pgtable.h>
 
 #include "clk.h"
 
@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
                        CLK_MGR_PLL_CLK_SRC_MASK;
 }
 
-static struct clk_ops clk_pll_ops = {
+static clk_ops_no_const clk_pll_ops __read_only = {
        .recalc_rate = clk_pll_recalc_rate,
        .get_parent = clk_pll_get_parent,
 };
@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
        pll_clk->hw.hw.init = &init;
 
        pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
-       clk_pll_ops.enable = clk_gate_ops.enable;
-       clk_pll_ops.disable = clk_gate_ops.disable;
+       pax_open_kernel();
+       *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
+       *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
+       pax_close_kernel();
 
        clk = clk_register(NULL, &pll_clk->hw.hw);
        if (WARN_ON(IS_ERR(clk))) {
index b0c18ed8d83f707d000213e458dba613e4ffaf96..1713a80e95322d087e1b58ada0d28a5f270b8dd8 100644 (file)
@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
        per_cpu(acfreq_data, cpu) = data;
 
-       if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
-               acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+       if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+               pax_open_kernel();
+               *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+               pax_close_kernel();
+       }
 
        result = acpi_processor_register_performance(data->acpi_data, cpu);
        if (result)
@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
-               acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+               pax_open_kernel();
+               *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+               pax_close_kernel();
                break;
        default:
                break;
@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
                if (!msrs)
                        return;
 
-               acpi_cpufreq_driver.boost_supported = true;
-               acpi_cpufreq_driver.boost_enabled = boost_state(0);
+               pax_open_kernel();
+               *(bool *)&acpi_cpufreq_driver.boost_supported = true;
+               *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
+               pax_close_kernel();
 
                cpu_notifier_register_begin();
 
index fde97d6e31d6d9749698aaf91bfae821f1a72f9f..3631ecad74fdfd2881f2c76fe6121982de20f3e1 100644 (file)
@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
        if (!IS_ERR(cpu_reg))
                regulator_put(cpu_reg);
 
-       dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
+       pax_open_kernel();
+       *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
+       pax_close_kernel();
 
        ret = cpufreq_register_driver(&dt_cpufreq_driver);
        if (ret)
index 7030c409be24bd4ed3343c17b10db8678b8bb214..3a97de696103b22f321adb13fa36e00fa7210a63 100644 (file)
@@ -2135,7 +2135,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
        }
 
        mutex_lock(&cpufreq_governor_mutex);
-       list_del(&governor->governor_list);
+       pax_list_del(&governor->governor_list);
        mutex_unlock(&cpufreq_governor_mutex);
        return;
 }
@@ -2351,7 +2351,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata cpufreq_cpu_notifier = {
+static struct notifier_block cpufreq_cpu_notifier = {
        .notifier_call = cpufreq_cpu_callback,
 };
 
@@ -2391,13 +2391,17 @@ int cpufreq_boost_trigger_state(int state)
                return 0;
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
-       cpufreq_driver->boost_enabled = state;
+       pax_open_kernel();
+       *(bool *)&cpufreq_driver->boost_enabled = state;
+       pax_close_kernel();
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        ret = cpufreq_driver->set_boost(state);
        if (ret) {
                write_lock_irqsave(&cpufreq_driver_lock, flags);
-               cpufreq_driver->boost_enabled = !state;
+               pax_open_kernel();
+               *(bool *)&cpufreq_driver->boost_enabled = !state;
+               pax_close_kernel();
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
                pr_err("%s: Cannot %s BOOST\n",
@@ -2454,8 +2458,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        pr_debug("trying to register driver %s\n", driver_data->name);
 
-       if (driver_data->setpolicy)
-               driver_data->flags |= CPUFREQ_CONST_LOOPS;
+       if (driver_data->setpolicy) {
+               pax_open_kernel();
+               *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
+               pax_close_kernel();
+       }
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
@@ -2470,8 +2477,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                 * Check if driver provides function to enable boost -
                 * if not, use cpufreq_boost_set_sw as default
                 */
-               if (!cpufreq_driver->set_boost)
-                       cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+               if (!cpufreq_driver->set_boost) {
+                       pax_open_kernel();
+                       *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+                       pax_close_kernel();
+               }
 
                ret = cpufreq_sysfs_create_file(&boost.attr);
                if (ret) {
index 1b44496b2d2b3548bab6c1aff39a5c4f25c17428..b80ff5ee6804b319b1d7b5338d8267009e5a1746 100644 (file)
@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
        struct dbs_data *dbs_data;
        struct od_cpu_dbs_info_s *od_dbs_info = NULL;
        struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-       struct od_ops *od_ops = NULL;
+       const struct od_ops *od_ops = NULL;
        struct od_dbs_tuners *od_tuners = NULL;
        struct cs_dbs_tuners *cs_tuners = NULL;
        struct cpu_dbs_common_info *cpu_cdbs;
@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
                if ((cdata->governor == GOV_CONSERVATIVE) &&
                                (!policy->governor->initialized)) {
-                       struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+                       const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                        cpufreq_register_notifier(cs_ops->notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
                        if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
                                (policy->governor->initialized == 1)) {
-                               struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+                               const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                                cpufreq_unregister_notifier(cs_ops->notifier_block,
                                                CPUFREQ_TRANSITION_NOTIFIER);
index cc401d147e727615c8255a951a08065ae954fb23..819734041ee0c543ed04fb0af0e0018bb341b8f0 100644 (file)
@@ -212,7 +212,7 @@ struct common_dbs_data {
        void (*exit)(struct dbs_data *dbs_data);
 
        /* Governor specific ops, see below */
-       void *gov_ops;
+       const void *gov_ops;
 };
 
 /* Governor Per policy data */
@@ -232,7 +232,7 @@ struct od_ops {
        unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
                        unsigned int freq_next, unsigned int relation);
        void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
-};
+} __no_const;
 
 struct cs_ops {
        struct notifier_block *notifier_block;
index ad3f38fd3eb9feefa12212362e1312884974d745..8f086cd408392aa5d39ad9c443fb0230b8e9a150 100644 (file)
@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
 
 define_get_cpu_dbs_routines(od_cpu_dbs_info);
 
-static struct od_ops od_ops = {
+static struct od_ops od_ops __read_only = {
        .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
        .powersave_bias_target = generic_powersave_bias_target,
        .freq_increase = dbs_freq_increase,
@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
                (struct cpufreq_policy *, unsigned int, unsigned int),
                unsigned int powersave_bias)
 {
-       od_ops.powersave_bias_target = f;
+       pax_open_kernel();
+       *(void **)&od_ops.powersave_bias_target = f;
+       pax_close_kernel();
        od_set_powersave_bias(powersave_bias);
 }
 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
 
 void od_unregister_powersave_bias_handler(void)
 {
-       od_ops.powersave_bias_target = generic_powersave_bias_target;
+       pax_open_kernel();
+       *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
+       pax_close_kernel();
        od_set_powersave_bias(0);
 }
 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
index 742eefba12c2101d8bc8e035958d7bb59596ef93..e2fcfc8b65f3968fca04e24f5ba79088b2905ef5 100644 (file)
@@ -133,10 +133,10 @@ struct pstate_funcs {
 struct cpu_defaults {
        struct pstate_adjust_policy pid_policy;
        struct pstate_funcs funcs;
-};
+} __do_const;
 
 static struct pstate_adjust_policy pid_params;
-static struct pstate_funcs pstate_funcs;
+static struct pstate_funcs *pstate_funcs;
 static int hwp_active;
 
 struct perf_limits {
@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 
        cpu->pstate.current_pstate = pstate;
 
-       pstate_funcs.set(cpu, pstate);
+       pstate_funcs->set(cpu, pstate);
 }
 
 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 {
-       cpu->pstate.min_pstate = pstate_funcs.get_min();
-       cpu->pstate.max_pstate = pstate_funcs.get_max();
-       cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
-       cpu->pstate.scaling = pstate_funcs.get_scaling();
+       cpu->pstate.min_pstate = pstate_funcs->get_min();
+       cpu->pstate.max_pstate = pstate_funcs->get_max();
+       cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
+       cpu->pstate.scaling = pstate_funcs->get_scaling();
 
-       if (pstate_funcs.get_vid)
-               pstate_funcs.get_vid(cpu);
+       if (pstate_funcs->get_vid)
+               pstate_funcs->get_vid(cpu);
        intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
 
-       if (!pstate_funcs.get_max() ||
-           !pstate_funcs.get_min() ||
-           !pstate_funcs.get_turbo())
+       if (!pstate_funcs->get_max() ||
+           !pstate_funcs->get_min() ||
+           !pstate_funcs->get_turbo())
                return -ENODEV;
 
        rdmsrl(MSR_IA32_APERF, tmp);
@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
        return 0;
 }
 
-static void copy_pid_params(struct pstate_adjust_policy *policy)
+static void copy_pid_params(const struct pstate_adjust_policy *policy)
 {
        pid_params.sample_rate_ms = policy->sample_rate_ms;
        pid_params.p_gain_pct = policy->p_gain_pct;
@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
 
 static void copy_cpu_funcs(struct pstate_funcs *funcs)
 {
-       pstate_funcs.get_max   = funcs->get_max;
-       pstate_funcs.get_min   = funcs->get_min;
-       pstate_funcs.get_turbo = funcs->get_turbo;
-       pstate_funcs.get_scaling = funcs->get_scaling;
-       pstate_funcs.set       = funcs->set;
-       pstate_funcs.get_vid   = funcs->get_vid;
+       pstate_funcs = funcs;
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
index 529cfd92158fa6f7e0e9cfb79d88307c83c57ac3..0e28fffbe81d217d53461135a079559ea30bff79 100644 (file)
@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
                case 0x0F: /* Core Duo */
                case 0x16: /* Celeron Core */
                case 0x1C: /* Atom */
-                       p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+                       pax_open_kernel();
+                       *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+                       pax_close_kernel();
                        return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
                case 0x0D: /* Pentium M (Dothan) */
-                       p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+                       pax_open_kernel();
+                       *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+                       pax_close_kernel();
                        /* fall through */
                case 0x09: /* Pentium M (Banias) */
                        return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 
        /* on P-4s, the TSC runs with constant frequency independent whether
         * throttling is active or not. */
-       p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+       pax_open_kernel();
+       *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+       pax_close_kernel();
 
        if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
                printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
index 9bb42ba50efaf90d9b773fa1767422dd3c1f7c98..b01b4a2ddf623f0747e5f01ec01e257af534f0bc 100644 (file)
 #include <asm/head.h>
 #include <asm/timer.h>
 
-static struct cpufreq_driver *cpufreq_us3_driver;
-
 struct us3_freq_percpu_info {
        struct cpufreq_frequency_table table[4];
 };
 
 /* Indexed by cpu number. */
-static struct us3_freq_percpu_info *us3_freq_table;
+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
 
 /* UltraSPARC-III has three dividers: 1, 2, and 32.  These are controlled
  * in the Safari config register.
@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us3_driver)
-               us3_freq_target(policy, 0);
+       us3_freq_target(policy, 0);
 
        return 0;
 }
 
+static int __init us3_freq_init(void);
+static void __exit us3_freq_exit(void);
+
+static struct cpufreq_driver cpufreq_us3_driver = {
+       .init           = us3_freq_cpu_init,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = us3_freq_target,
+       .get            = us3_freq_get,
+       .exit           = us3_freq_cpu_exit,
+       .name           = "UltraSPARC-III",
+
+};
+
 static int __init us3_freq_init(void)
 {
        unsigned long manuf, impl, ver;
-       int ret;
 
        if (tlb_type != cheetah && tlb_type != cheetah_plus)
                return -ENODEV;
@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
            (impl == CHEETAH_IMPL ||
             impl == CHEETAH_PLUS_IMPL ||
             impl == JAGUAR_IMPL ||
-            impl == PANTHER_IMPL)) {
-               struct cpufreq_driver *driver;
-
-               ret = -ENOMEM;
-               driver = kzalloc(sizeof(*driver), GFP_KERNEL);
-               if (!driver)
-                       goto err_out;
-
-               us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
-                       GFP_KERNEL);
-               if (!us3_freq_table)
-                       goto err_out;
-
-               driver->init = us3_freq_cpu_init;
-               driver->verify = cpufreq_generic_frequency_table_verify;
-               driver->target_index = us3_freq_target;
-               driver->get = us3_freq_get;
-               driver->exit = us3_freq_cpu_exit;
-               strcpy(driver->name, "UltraSPARC-III");
-
-               cpufreq_us3_driver = driver;
-               ret = cpufreq_register_driver(driver);
-               if (ret)
-                       goto err_out;
-
-               return 0;
-
-err_out:
-               if (driver) {
-                       kfree(driver);
-                       cpufreq_us3_driver = NULL;
-               }
-               kfree(us3_freq_table);
-               us3_freq_table = NULL;
-               return ret;
-       }
+            impl == PANTHER_IMPL))
+               return cpufreq_register_driver(&cpufreq_us3_driver);
 
        return -ENODEV;
 }
 
 static void __exit us3_freq_exit(void)
 {
-       if (cpufreq_us3_driver) {
-               cpufreq_unregister_driver(cpufreq_us3_driver);
-               kfree(cpufreq_us3_driver);
-               cpufreq_us3_driver = NULL;
-               kfree(us3_freq_table);
-               us3_freq_table = NULL;
-       }
+       cpufreq_unregister_driver(&cpufreq_us3_driver);
 }
 
 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
index 7d4a31571608524e75cfa71c35c5336dea485efa..21bb8860ab031455bac26a2aeaab288941997390 100644 (file)
@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
            !cpu_has(cpu, X86_FEATURE_EST))
                return -ENODEV;
 
-       if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
-               centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+       if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
+               pax_open_kernel();
+               *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+               pax_close_kernel();
+       }
 
        if (policy->cpu != 0)
                return -ENODEV;
index 2697e87d5b34ff9ae520f1130220afb4d0876dff..c32476c53c9495851b9b0e545065eff88af3417b 100644 (file)
@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
 
 static void poll_idle_init(struct cpuidle_driver *drv)
 {
-       struct cpuidle_state *state = &drv->states[0];
+       cpuidle_state_no_const *state = &drv->states[0];
 
        snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
        snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
index fb9f511cca23724b4da463714bd49a8e53fba588..213e6cc1baddbca153e6cec282f4edf6a5473a69 100644 (file)
@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
        mutex_lock(&cpuidle_lock);
        if (__cpuidle_find_governor(gov->name) == NULL) {
                ret = 0;
-               list_add_tail(&gov->governor_list, &cpuidle_governors);
+               pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
                if (!cpuidle_curr_governor ||
                    cpuidle_curr_governor->rating < gov->rating)
                        cpuidle_switch_governor(gov);
index 97c5903b4606cf0fa02aecec826cbed605796234..023ad2315b0c22cfb34a14b68eff52eba8921fca 100644 (file)
@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
        NULL
 };
 
-static struct attribute_group cpuidle_attr_group = {
+static attribute_group_no_const cpuidle_attr_group = {
        .attrs = cpuidle_default_attrs,
        .name = "cpuidle",
 };
index 8d2a7728434d05cd06250e2c6fb2d74a3336bc88..33826c97a29a6fb7f7ca4152f8eed6eee95f7371 100644 (file)
@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
 MODULE_PARM_DESC(hifn_pll_ref,
                 "PLL reference clock (pci[freq] or ext[freq], default ext)");
 
-static atomic_t hifn_dev_number;
+static atomic_unchecked_t hifn_dev_number;
 
 #define ACRYPTO_OP_DECRYPT     0
 #define ACRYPTO_OP_ENCRYPT     1
@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_out_disable_pci_device;
 
        snprintf(name, sizeof(name), "hifn%d",
-                       atomic_inc_return(&hifn_dev_number)-1);
+                       atomic_inc_return_unchecked(&hifn_dev_number)-1);
 
        err = pci_request_regions(pdev, name);
        if (err)
index 30b538d8cc90a5cb5e6baa88172c5711aa804b93..1610d7517b91e9df3daf8e555101771282a97116 100644 (file)
@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
                goto err_out;
        }
 
-       list_add(&governor->node, &devfreq_governor_list);
+       pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
 
        list_for_each_entry(devfreq, &devfreq_list, node) {
                int ret = 0;
@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
                }
        }
 
-       list_del(&governor->node);
+       pax_list_del((struct list_head *)&governor->node);
 err_out:
        mutex_unlock(&devfreq_list_lock);
 
index 3a2adb131d462597c2ee07fe7819de75e4ef239e..b3be9a3b0f17e14378fbc0abdedb47c951afa6e6 100644 (file)
@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
                schan->slave_id = -EINVAL;
        }
 
-       schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
-                             sdev->desc_size, GFP_KERNEL);
+       schan->desc = kcalloc(sdev->desc_size,
+                             NR_DESCS_PER_CHANNEL, GFP_KERNEL);
        if (!schan->desc) {
                ret = -ENOMEM;
                goto edescalloc;
index aec8a84784a469d70f5958db4e1126790d21806b..7b45a1fde5f79a35feaade18e8ee358ed3a5c3f4 100644 (file)
@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
        return ret;
 }
 
-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+static struct notifier_block sh_dmae_nmi_notifier = {
        .notifier_call  = sh_dmae_nmi_handler,
 
        /* Run before NMI debug handler and KGDB */
index 592af5f0cf391d292e05fd5c3a94b5589c7cb684..bb1d58314ff0dd82f32b2b96865f9a3bbf585240 100644 (file)
@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
  */
 int edac_device_alloc_index(void)
 {
-       static atomic_t device_indexes = ATOMIC_INIT(0);
+       static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
 
-       return atomic_inc_return(&device_indexes) - 1;
+       return atomic_inc_return_unchecked(&device_indexes) - 1;
 }
 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
 
index 670d2829c54768b9484fe5d982b4634e5b407665..6675f4d19d4be4de79d7e4b76e6818257c1c5ad6 100644 (file)
@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
 struct dev_ch_attribute {
        struct device_attribute attr;
        int channel;
-};
+} __do_const;
 
 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
        struct dev_ch_attribute dev_attr_legacy_##_name = \
@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
        }
 
        if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
+               pax_open_kernel();
                if (mci->get_sdram_scrub_rate) {
-                       dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
-                       dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+                       *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+                       *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
                }
                if (mci->set_sdram_scrub_rate) {
-                       dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
-                       dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+                       *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+                       *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
                }
+               pax_close_kernel();
                err = device_create_file(&mci->dev,
                                         &dev_attr_sdram_scrub_rate);
                if (err) {
index 2cf44b4db80c8beac0a5575cf00d1ed4cca32c4d..6dd2dc7b086c625459aeeccad51e23469e4d0b2e 100644 (file)
@@ -29,7 +29,7 @@
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
 static LIST_HEAD(edac_pci_list);
-static atomic_t pci_indexes = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
 
 /*
  * edac_pci_alloc_ctl_info
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
  */
 int edac_pci_alloc_index(void)
 {
-       return atomic_inc_return(&pci_indexes) - 1;
+       return atomic_inc_return_unchecked(&pci_indexes) - 1;
 }
 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
 
index 24d877f6e57751b07123771c2f9e7b898c17cee6..4e30133f4d91a5c8a882b9879cafa73151ac0d66 100644 (file)
@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1;               /* log PCI parity errors */
 static int edac_pci_log_npe = 1;       /* log PCI non-parity error errors */
 static int edac_pci_poll_msec = 1000;  /* one second workq period */
 
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
 
 static struct kobject *edac_pci_top_main_kobj;
 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
        void *value;
         ssize_t(*show) (void *, char *);
         ssize_t(*store) (void *, const char *, size_t);
-};
+} __do_const;
 
 /* Set of show/store abstract level functions for PCI Parity object */
 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                        edac_printk(KERN_CRIT, EDAC_PCI,
                                "Signaled System Error on %s\n",
                                pci_name(dev));
-                       atomic_inc(&pci_nonparity_count);
+                       atomic_inc_unchecked(&pci_nonparity_count);
                }
 
                if (status & (PCI_STATUS_PARITY)) {
@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                                "Master Data Parity Error on %s\n",
                                pci_name(dev));
 
-                       atomic_inc(&pci_parity_count);
+                       atomic_inc_unchecked(&pci_parity_count);
                }
 
                if (status & (PCI_STATUS_DETECTED_PARITY)) {
@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                                "Detected Parity Error on %s\n",
                                pci_name(dev));
 
-                       atomic_inc(&pci_parity_count);
+                       atomic_inc_unchecked(&pci_parity_count);
                }
        }
 
@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                                edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
                                        "Signaled System Error on %s\n",
                                        pci_name(dev));
-                               atomic_inc(&pci_nonparity_count);
+                               atomic_inc_unchecked(&pci_nonparity_count);
                        }
 
                        if (status & (PCI_STATUS_PARITY)) {
@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                                        "Master Data Parity Error on "
                                        "%s\n", pci_name(dev));
 
-                               atomic_inc(&pci_parity_count);
+                               atomic_inc_unchecked(&pci_parity_count);
                        }
 
                        if (status & (PCI_STATUS_DETECTED_PARITY)) {
@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
                                        "Detected Parity Error on %s\n",
                                        pci_name(dev));
 
-                               atomic_inc(&pci_parity_count);
+                               atomic_inc_unchecked(&pci_parity_count);
                        }
                }
        }
@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
        if (!check_pci_errors)
                return;
 
-       before_count = atomic_read(&pci_parity_count);
+       before_count = atomic_read_unchecked(&pci_parity_count);
 
        /* scan all PCI devices looking for a Parity Error on devices and
         * bridges.
@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
        /* Only if operator has selected panic on PCI Error */
        if (edac_pci_get_panic_on_pe()) {
                /* If the count is different 'after' from 'before' */
-               if (before_count != atomic_read(&pci_parity_count))
+               if (before_count != atomic_read_unchecked(&pci_parity_count))
                        panic("EDAC: PCI Parity Error");
        }
 }
index c2359a1ea6b300443f750624fd4cb6beb295d05c..8bd119dd2f6e80d1d827d55ea3c943ad54c51fbe 100644 (file)
@@ -74,7 +74,7 @@ struct amd_decoder_ops {
        bool (*mc0_mce)(u16, u8);
        bool (*mc1_mce)(u16, u8);
        bool (*mc2_mce)(u16, u8);
-};
+} __no_const;
 
 void amd_report_gart_errors(bool);
 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
index 57ea7f464178708576c5809886d50568e5c71dae..af06b76341a81350a28b742ab967456539f3800b 100644 (file)
@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
                        const struct fw_card_driver *driver,
                        struct device *device)
 {
-       static atomic_t index = ATOMIC_INIT(-1);
+       static atomic_unchecked_t index = ATOMIC_INIT(-1);
 
-       card->index = atomic_inc_return(&index);
+       card->index = atomic_inc_return_unchecked(&index);
        card->driver = driver;
        card->device = device;
        card->current_tlabel = 0;
@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
 
 void fw_core_remove_card(struct fw_card *card)
 {
-       struct fw_card_driver dummy_driver = dummy_driver_template;
+       fw_card_driver_no_const dummy_driver = dummy_driver_template;
 
        card->driver->update_phy_reg(card, 4,
                                     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
index f9e3aee6a2113031a8df272eacdf024c504ba27e..269dbdb05e0f5fd668991de88830c7bbaea4090a 100644 (file)
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
 struct config_rom_attribute {
        struct device_attribute attr;
        u32 key;
-};
+} __do_const;
 
 static ssize_t show_immediate(struct device *dev,
                              struct device_attribute *dattr, char *buf)
index eb6935c8ad9449bb1c557ac8c596940fc2919096..3cc2bfa6d6d479bcf2469e09b3e3293b6e8af96b 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/timer.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/byteorder.h>
 
index e1480ff683d281f95320918712e566f6cec2e6b7..1a429bdcd1123dd7fd9a739b014ba3da689f6de9 100644 (file)
@@ -111,6 +111,7 @@ struct fw_card_driver {
 
        int (*stop_iso)(struct fw_iso_context *ctx);
 };
+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
 
 void fw_card_initialize(struct fw_card *card,
                const struct fw_card_driver *driver, struct device *device);
index aff9018d06588d7b37cf6f836cb62d12d0597bf0..fc87dede093d3c104b01f49a44ce703c7abbb4f8 100644 (file)
@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
                          be32_to_cpu(ohci->next_header));
        }
 
+#ifndef CONFIG_GRKERNSEC
        if (param_remote_dma) {
                reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
                reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
        }
+#endif
 
        spin_unlock_irq(&ohci->lock);
 
@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
        unsigned long flags;
        int n, ret = 0;
 
+#ifndef CONFIG_GRKERNSEC
        if (param_remote_dma)
                return 0;
+#endif
 
        /*
         * FIXME:  Make sure this bitmask is cleared when we clear the busReset
index 94a58a082b9930a2acc05192273c6941ef003ef6..f5eba42e56c22d6d6679063200cd6b66f3fabe2d 100644 (file)
@@ -16,7 +16,7 @@
 struct dmi_device_attribute{
        struct device_attribute dev_attr;
        int field;
-};
+} __do_const;
 #define to_dmi_dev_attr(_dev_attr) \
        container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
 
index 69fac068669fde566f41013cefbdf48db023466c..820f0c9a8cf07959c70f24932eb6a37c5d140df3 100644 (file)
@@ -901,7 +901,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
        if (buf == NULL)
                return -1;
 
-       dmi_table(buf, dmi_len, dmi_num, decode, private_data);
+       dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
 
        dmi_unmap(buf);
        return 0;
index 4fd9961d552e8a0c12604f1cfef83f2e15bea66d..52d60cef39b76d9856cf772449750a282c1a3590 100644 (file)
@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
  */
 u64 cper_next_record_id(void)
 {
-       static atomic64_t seq;
+       static atomic64_unchecked_t seq;
 
-       if (!atomic64_read(&seq))
-               atomic64_set(&seq, ((u64)get_seconds()) << 32);
+       if (!atomic64_read_unchecked(&seq))
+               atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
 
-       return atomic64_inc_return(&seq);
+       return atomic64_inc_return_unchecked(&seq);
 }
 EXPORT_SYMBOL_GPL(cper_next_record_id);
 
index 9035c1b74d5839471445facd9100b8522c2e9dd8..aff45f8ad7980a7de3caba1a0d571cf4381d0d67 100644 (file)
@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
 };
 
 static struct efivars generic_efivars;
-static struct efivar_operations generic_ops;
+static efivar_operations_no_const generic_ops __read_only;
 
 static int generic_ops_register(void)
 {
-       generic_ops.get_variable = efi.get_variable;
-       generic_ops.set_variable = efi.set_variable;
-       generic_ops.get_next_variable = efi.get_next_variable;
-       generic_ops.query_variable_store = efi_query_variable_store;
+       pax_open_kernel();
+       *(void **)&generic_ops.get_variable = efi.get_variable;
+       *(void **)&generic_ops.set_variable = efi.set_variable;
+       *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
+       *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
+       pax_close_kernel();
 
        return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
 }
index f256ecd8a176483ba4b9919c55c0a6c1ad2c0aef..387dcb18a653247b70245fded6f52528848f2ab1 100644 (file)
@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
 static int
 create_efivars_bin_attributes(void)
 {
-       struct bin_attribute *attr;
+       bin_attribute_no_const *attr;
        int error;
 
        /* new_var */
index 2f569aaed4c754d361b04c3f09da717db35471e3..c95f4fb481fc206c8b245b57404c3c659a4ce484 100644 (file)
@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
        if (!found_memconsole())
                return -ENODEV;
 
-       memconsole_bin_attr.size = memconsole_length;
+       pax_open_kernel();
+       *(size_t *)&memconsole_bin_attr.size = memconsole_length;
+       pax_close_kernel();
+
        return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
 }
 
index 3cfcfc620c8e4bab5938c9ebe2bf9c818481d63b..09d6f1174eb9ce6f6acea0ca216373a6cbddfdd9 100644 (file)
@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
        struct em_gio_priv *p;
        struct resource *io[2], *irq[2];
        struct gpio_chip *gpio_chip;
-       struct irq_chip *irq_chip;
+       irq_chip_no_const *irq_chip;
        const char *name = dev_name(&pdev->dev);
        int ret;
 
index 7818cd1453ae88cd7d806a1664479836b20fb9e4..1be40e50c047b235202c4549c456b64c159f2fef 100644 (file)
@@ -94,7 +94,7 @@ struct ichx_desc {
         * this option allows driver caching written output values
         */
        bool use_outlvl_cache;
-};
+} __do_const;
 
 static struct {
        spinlock_t lock;
index f476ae2eb0b3c8610e54377cf7e3010079e916bf..05e1bddc108bc575d688176a262652e7ef020996 100644 (file)
@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
        const struct omap_gpio_platform_data *pdata;
        struct resource *res;
        struct gpio_bank *bank;
-       struct irq_chip *irqc;
+       irq_chip_no_const *irqc;
        int ret;
 
        match = of_match_device(of_match_ptr(omap_gpio_match), dev);
index 584484e3f1e3cd1dfe4ef6c99d2f6b9748174019..e26ebd6affbd6d15489e1f58d8d2d329695f3cd1 100644 (file)
@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        struct gpio_rcar_priv *p;
        struct resource *io, *irq;
        struct gpio_chip *gpio_chip;
-       struct irq_chip *irq_chip;
+       irq_chip_no_const *irq_chip;
        struct device *dev = &pdev->dev;
        const char *name = dev_name(dev);
        int ret;
index c1caa459c02da527f580f6b8445fdf4df28ce383..f0f97d2c32c9f9a27b9575d5dbf328c64e133a26 100644 (file)
@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
        printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
               maskl, pendl, maskh, pendh);
 
-       atomic_inc(&irq_err_count);
+       atomic_inc_unchecked(&irq_err_count);
 
        return -EINVAL;
 }
index 568aa2b6bdb019e9285372d731d8a9e9bcbed1f7..d1204d8492cf6f7ecbbdc075781d9bfb88a72858 100644 (file)
@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
        }
 
        if (gpiochip->irqchip) {
-               gpiochip->irqchip->irq_request_resources = NULL;
-               gpiochip->irqchip->irq_release_resources = NULL;
+               pax_open_kernel();
+               *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
+               *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
+               pax_close_kernel();
                gpiochip->irqchip = NULL;
        }
 }
@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
                gpiochip->irqchip = NULL;
                return -EINVAL;
        }
-       irqchip->irq_request_resources = gpiochip_irq_reqres;
-       irqchip->irq_release_resources = gpiochip_irq_relres;
+
+       pax_open_kernel();
+       *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
+       *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
+       pax_close_kernel();
 
        /*
         * Prepare the mapping since the irqchip shall be orthogonal to
index 5213da499d39febae587057ad9b38701e787db32..7ef736edb23b73cf63fa8d025e3e7eb900ae3f54 100644 (file)
@@ -3961,7 +3961,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
                                        goto done;
                                }
 
-                               if (copy_to_user(&enum_ptr[copied].name,
+                               if (copy_to_user(enum_ptr[copied].name,
                                                 &prop_enum->name, DRM_PROP_NAME_LEN)) {
                                        ret = -EFAULT;
                                        goto done;
index 4f41377b0b80963f9ce84b3492412d025924652b..ee33f401b179d9d7c5697e7674072de8befe9d9e 100644 (file)
@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
 
        drm_device_set_unplugged(dev);
 
-       if (dev->open_count == 0) {
+       if (local_read(&dev->open_count) == 0) {
                drm_put_dev(dev);
        }
        mutex_unlock(&drm_global_mutex);
index 0b9514b6cd640793b7f73e8737bf310cb28b23ee..6acd17424c748f142a9de2e945a4a5fac462934f 100644 (file)
@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
                return PTR_ERR(minor);
 
        dev = minor->dev;
-       if (!dev->open_count++)
+       if (local_inc_return(&dev->open_count) == 1)
                need_setup = 1;
 
        /* share address_space across all char-devs of a single device */
@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
        return 0;
 
 err_undo:
-       dev->open_count--;
+       local_dec(&dev->open_count);
        drm_minor_release(minor);
        return retcode;
 }
@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
 
        mutex_lock(&drm_global_mutex);
 
-       DRM_DEBUG("open_count = %d\n", dev->open_count);
+       DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
 
        mutex_lock(&dev->struct_mutex);
        list_del(&file_priv->lhead);
@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
         * Begin inline drm_release
         */
 
-       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
                  task_pid_nr(current),
                  (long)old_encode_dev(file_priv->minor->kdev->devt),
-                 dev->open_count);
+                 local_read(&dev->open_count));
 
        /* Release any auth tokens that might point to this file_priv,
           (do that under the drm_global_mutex) */
@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
         * End inline drm_release
         */
 
-       if (!--dev->open_count) {
+       if (local_dec_and_test(&dev->open_count)) {
                retcode = drm_lastclose(dev);
                if (drm_device_is_unplugged(dev))
                        drm_put_dev(dev);
index 3d2e91c4d78e1c1003ac9521250dc4ca41c9b6ae..d31c4c9e82d3beb0fb8268293ad2d6d243906950 100644 (file)
@@ -36,7 +36,7 @@
 struct drm_global_item {
        struct mutex mutex;
        void *object;
-       int refcount;
+       atomic_t refcount;
 };
 
 static struct drm_global_item glob[DRM_GLOBAL_NUM];
@@ -49,7 +49,7 @@ void drm_global_init(void)
                struct drm_global_item *item = &glob[i];
                mutex_init(&item->mutex);
                item->object = NULL;
-               item->refcount = 0;
+               atomic_set(&item->refcount, 0);
        }
 }
 
@@ -59,7 +59,7 @@ void drm_global_release(void)
        for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
                struct drm_global_item *item = &glob[i];
                BUG_ON(item->object != NULL);
-               BUG_ON(item->refcount != 0);
+               BUG_ON(atomic_read(&item->refcount) != 0);
        }
 }
 
@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
        struct drm_global_item *item = &glob[ref->global_type];
 
        mutex_lock(&item->mutex);
-       if (item->refcount == 0) {
+       if (atomic_read(&item->refcount) == 0) {
                item->object = kzalloc(ref->size, GFP_KERNEL);
                if (unlikely(item->object == NULL)) {
                        ret = -ENOMEM;
@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
                        goto out_err;
 
        }
-       ++item->refcount;
+       atomic_inc(&item->refcount);
        ref->object = item->object;
        mutex_unlock(&item->mutex);
        return 0;
@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
        struct drm_global_item *item = &glob[ref->global_type];
 
        mutex_lock(&item->mutex);
-       BUG_ON(item->refcount == 0);
+       BUG_ON(atomic_read(&item->refcount) == 0);
        BUG_ON(ref->object != item->object);
-       if (--item->refcount == 0) {
+       if (atomic_dec_and_test(&item->refcount)) {
                ref->release(ref);
                item->object = NULL;
        }
index 51efebd434f302d61134c2eb384935ef4c1b2521..2b70935c9a916c11507b5e4a33d376b4b77aa678 100644 (file)
@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
        struct drm_local_map *map;
        struct drm_map_list *r_list;
 
-       /* Hardcoded from _DRM_FRAME_BUFFER,
-          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+       static const char * const types[] = {
+               [_DRM_FRAME_BUFFER] = "FB",
+               [_DRM_REGISTERS] = "REG",
+               [_DRM_SHM] = "SHM",
+               [_DRM_AGP] = "AGP",
+               [_DRM_SCATTER_GATHER] = "SG",
+               [_DRM_CONSISTENT] = "PCI"};
        const char *type;
        int i;
 
@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
                map = r_list->map;
                if (!map)
                        continue;
-               if (map->type < 0 || map->type > 5)
+               if (map->type >= ARRAY_SIZE(types))
                        type = "??";
                else
                        type = types[map->type];
index 2f4c4343dfa32f5a3f2c81e83a67f9409a31e11f..dd12cd2d4b35f420131d872ac92ce885c209ed40 100644 (file)
@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
        request = compat_alloc_user_space(nbytes);
        if (!access_ok(VERIFY_WRITE, request, nbytes))
                return -EFAULT;
-       list = (struct drm_buf_desc *) (request + 1);
+       list = (struct drm_buf_desc __user *) (request + 1);
 
        if (__put_user(count, &request->count)
            || __put_user(list, &request->list))
@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
        request = compat_alloc_user_space(nbytes);
        if (!access_ok(VERIFY_WRITE, request, nbytes))
                return -EFAULT;
-       list = (struct drm_buf_pub *) (request + 1);
+       list = (struct drm_buf_pub __user *) (request + 1);
 
        if (__put_user(count, &request->count)
            || __put_user(list, &request->list))
@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
+drm_ioctl_compat_t drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
        [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
        [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn;
        int ret;
 
        /* Assume that ioctls without an explicit compat routine will just
@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        if (nr >= ARRAY_SIZE(drm_compat_ioctls))
                return drm_ioctl(filp, cmd, arg);
 
-       fn = drm_compat_ioctls[nr];
-
-       if (fn != NULL)
-               ret = (*fn) (filp, cmd, arg);
+       if (drm_compat_ioctls[nr] != NULL)
+               ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
        else
                ret = drm_ioctl(filp, cmd, arg);
 
index 00587a1e3c83c0dc23c05a55a7d37ea10f891847..57a65ca10980476e0e7b42d27994be2ea047a651 100644 (file)
@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
        const struct drm_ioctl_desc *ioctl = NULL;
-       drm_ioctl_t *func;
+       drm_ioctl_no_const_t func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        int retcode = -EINVAL;
        char stack_kdata[128];
index 93ec5dc4e7d3034743bf03f32923f8c3db932abc..82acbaf2d4f7462291b3fdf370a1539205255e70 100644 (file)
@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
        int page_flipping;
 
        wait_queue_head_t irq_queue;
-       atomic_t irq_received;
-       atomic_t irq_emitted;
+       atomic_unchecked_t irq_received;
+       atomic_unchecked_t irq_emitted;
 
        int front_offset;
 } drm_i810_private_t;
index ecee3bcc8772907850a536166851624ab4795800..ad5ae67436162b4dfe429c8c82b172b208eb7489 100644 (file)
@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
         * locking inversion with the driver load path. And the access here is
         * completely racy anyway. So don't bother with locking for now.
         */
-       return dev->open_count == 0;
+       return local_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
index 11738316394af9b16669155dbbec74fde0f22223..7dfb389d4599dd859c5c53c88def34524ce7ca37 100644 (file)
@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 static int
 validate_exec_list(struct drm_device *dev,
                   struct drm_i915_gem_exec_object2 *exec,
-                  int count)
+                  unsigned int count)
 {
        unsigned relocs_total = 0;
        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
        unsigned invalid_flags;
-       int i;
+       unsigned int i;
 
        invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
        if (USES_FULL_PPGTT(dev))
index 176de6322e4d0039482422bc1210ff89a1af769b..1ef9ac7409902a6abf49ee9feed812a2168fcc4e 100644 (file)
@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
                         (unsigned long)request);
 }
 
-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t i915_compat_ioctls[] = {
        [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
        [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
        [DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn = NULL;
        int ret;
 
        if (nr < DRM_COMMAND_BASE)
                return drm_compat_ioctl(filp, cmd, arg);
 
-       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
-               fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-       if (fn != NULL)
+       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
+               drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
                ret = (*fn) (filp, cmd, arg);
-       else
+       else
                ret = drm_ioctl(filp, cmd, arg);
 
        return ret;
index 30d4eb300be07f64bcc48e540a52b6911e0ba7f5..92f2dc8060906d00cac8c00c79a10f57f031ca72 100644 (file)
@@ -12935,13 +12935,13 @@ struct intel_quirk {
        int subsystem_vendor;
        int subsystem_device;
        void (*hook)(struct drm_device *dev);
-};
+} __do_const;
 
 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
 struct intel_dmi_quirk {
        void (*hook)(struct drm_device *dev);
        const struct dmi_system_id (*dmi_id_list)[];
-};
+} __do_const;
 
 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
 {
@@ -12949,18 +12949,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
        return 1;
 }
 
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+static const struct dmi_system_id intel_dmi_quirks_table[] = {
        {
-               .dmi_id_list = &(const struct dmi_system_id[]) {
-                       {
-                               .callback = intel_dmi_reverse_brightness,
-                               .ident = "NCR Corporation",
-                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
-                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
-                               },
-                       },
-                       { }  /* terminating entry */
+               .callback = intel_dmi_reverse_brightness,
+               .ident = "NCR Corporation",
+               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
                },
+       },
+       { }  /* terminating entry */
+};
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+       {
+               .dmi_id_list = &intel_dmi_quirks_table,
                .hook = quirk_invert_brightness,
        },
 };
index b250130debc87ebd138e8bac964cbadfba776b5d..98df2a4d3463b006872ce265cb5fa7bcc9794cbd 100644 (file)
@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
        if (imxdrm->pipes >= MAX_CRTC)
                return -EINVAL;
 
-       if (imxdrm->drm->open_count)
+       if (local_read(&imxdrm->drm->open_count))
                return -EBUSY;
 
        imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
index b4a2014917e525109eaccdd5cd2237e6eb82915d..219ab78f9f6f31388c822fa20b8e7cf148274966 100644 (file)
@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
        u32 clear_cmd;
        u32 maccess;
 
-       atomic_t vbl_received;          /**< Number of vblanks received. */
+       atomic_unchecked_t vbl_received;          /**< Number of vblanks received. */
        wait_queue_head_t fence_queue;
-       atomic_t last_fence_retired;
+       atomic_unchecked_t last_fence_retired;
        u32 next_fence_to_post;
 
        unsigned int fb_cpp;
index 729bfd56b55f49d9bd71a501f6717659a2f47af1..ead88234851f4cd0f9d583e06d6c652ca959dd50 100644 (file)
@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
        return 0;
 }
 
-drm_ioctl_compat_t *mga_compat_ioctls[] = {
+drm_ioctl_compat_t mga_compat_ioctls[] = {
        [DRM_MGA_INIT] = compat_mga_init,
        [DRM_MGA_GETPARAM] = compat_mga_getparam,
        [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn = NULL;
        int ret;
 
        if (nr < DRM_COMMAND_BASE)
                return drm_compat_ioctl(filp, cmd, arg);
 
-       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
-               fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-       if (fn != NULL)
+       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
+               drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
                ret = (*fn) (filp, cmd, arg);
-       else
+       else
                ret = drm_ioctl(filp, cmd, arg);
 
        return ret;
index 1b071b8ff9dccec81e1d93376d0b6b09bc58521b..de8601a560fb867441262c1987e73f966549dab0 100644 (file)
@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
        if (crtc != 0)
                return 0;
 
-       return atomic_read(&dev_priv->vbl_received);
+       return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 
@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
        /* VBLANK interrupt */
        if (status & MGA_VLINEPEN) {
                MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
-               atomic_inc(&dev_priv->vbl_received);
+               atomic_inc_unchecked(&dev_priv->vbl_received);
                drm_handle_vblank(dev, 0);
                handled = 1;
        }
@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
                if ((prim_start & ~0x03) != (prim_end & ~0x03))
                        MGA_WRITE(MGA_PRIMEND, prim_end);
 
-               atomic_inc(&dev_priv->last_fence_retired);
+               atomic_inc_unchecked(&dev_priv->last_fence_retired);
                wake_up(&dev_priv->fence_queue);
                handled = 1;
        }
@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
         * using fences.
         */
        DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
-                   (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
+                   (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
                      - *sequence) <= (1 << 23)));
 
        *sequence = cur_fence;
index 7df6acc8bb3413ad847db56c5b234764dbd7124e..84bbe52ce2cb123d0d120bbf3b6f95ee8e177cd9 100644 (file)
@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 struct bit_table {
        const char id;
        int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
-};
+} __no_const;
 
 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
 
index 8ae36f265fb8867b6092a48fcde8b06450148b40..1147a3087cbe2e3490446713ac8011a6242451ef 100644 (file)
@@ -121,7 +121,6 @@ struct nouveau_drm {
                struct drm_global_reference mem_global_ref;
                struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
-               atomic_t validate_sequence;
                int (*move)(struct nouveau_channel *,
                            struct ttm_buffer_object *,
                            struct ttm_mem_reg *, struct ttm_mem_reg *);
index 462679a8fec5783735a5cabd61058a79e212999c..88e32a794279af6edb16d4371a420b486079bd26 100644 (file)
@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
                         unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn = NULL;
+       drm_ioctl_compat_t fn = NULL;
        int ret;
 
        if (nr < DRM_COMMAND_BASE)
index 3d1cfcb96b6bfd9e7dfa50e282d6e3761009f7e1..05427004e2f321c33009e83ee674f69a4866d9fb 100644 (file)
@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 }
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-       nouveau_vram_manager_init,
-       nouveau_vram_manager_fini,
-       nouveau_vram_manager_new,
-       nouveau_vram_manager_del,
-       nouveau_vram_manager_debug
+       .init = nouveau_vram_manager_init,
+       .takedown = nouveau_vram_manager_fini,
+       .get_node = nouveau_vram_manager_new,
+       .put_node = nouveau_vram_manager_del,
+       .debug = nouveau_vram_manager_debug
 };
 
 static int
@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 }
 
 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-       nouveau_gart_manager_init,
-       nouveau_gart_manager_fini,
-       nouveau_gart_manager_new,
-       nouveau_gart_manager_del,
-       nouveau_gart_manager_debug
+       .init = nouveau_gart_manager_init,
+       .takedown = nouveau_gart_manager_fini,
+       .get_node = nouveau_gart_manager_new,
+       .put_node = nouveau_gart_manager_del,
+       .debug = nouveau_gart_manager_debug
 };
 
 /*XXX*/
@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 }
 
 const struct ttm_mem_type_manager_func nv04_gart_manager = {
-       nv04_gart_manager_init,
-       nv04_gart_manager_fini,
-       nv04_gart_manager_new,
-       nv04_gart_manager_del,
-       nv04_gart_manager_debug
+       .init = nv04_gart_manager_init,
+       .takedown = nv04_gart_manager_fini,
+       .get_node = nv04_gart_manager_new,
+       .put_node = nv04_gart_manager_del,
+       .debug = nv04_gart_manager_debug
 };
 
 int
index c7592ec8ecb8e430a49e836e05d64914dd2693ac..dd45ebcbd60a3f8609e84c95416159ec043133bd 100644 (file)
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
         * locking inversion with the driver load path. And the access here is
         * completely racy anyway. So don't bother with locking for now.
         */
-       return dev->open_count == 0;
+       return local_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops
index 97823644d34743a285d025c810a5168ed8c8f142..89bd954bd2549f9348f7e6e663c245fcc850f971 100644 (file)
@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
        int ret;
 
        mutex_lock(&qdev->async_io_mutex);
-       irq_num = atomic_read(&qdev->irq_received_io_cmd);
+       irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
        if (qdev->last_sent_io_cmd > irq_num) {
                if (intr)
                        ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
-                                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+                                                              atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
                else
                        ret = wait_event_timeout(qdev->io_cmd_event,
-                                                atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+                                                atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
                /* 0 is timeout, just bail the "hw" has gone away */
                if (ret <= 0)
                        goto out;
-               irq_num = atomic_read(&qdev->irq_received_io_cmd);
+               irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
        }
        outb(val, addr);
        qdev->last_sent_io_cmd = irq_num + 1;
        if (intr)
                ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
-                                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+                                                      atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
        else
                ret = wait_event_timeout(qdev->io_cmd_event,
-                                        atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+                                        atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 out:
        if (ret > 0)
                ret = 0;
index 6911b8c444928fdaa04f2f2d82b1180def5aba09..89d6867078d67691a31bf7e971f6c25b37fa178f 100644 (file)
@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct qxl_device *qdev = node->minor->dev->dev_private;
 
-       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
-       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
-       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
-       seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+       seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
+       seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
+       seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
+       seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
        seq_printf(m, "%d\n", qdev->irq_received_error);
        return 0;
 }
index 7c6cafe21f5f5fd2b354c8d34494fff51fc84d3b..460f542aaea1fe293ff7eb2039a413b621ed9015 100644 (file)
@@ -290,10 +290,10 @@ struct qxl_device {
        unsigned int last_sent_io_cmd;
 
        /* interrupt handling */
-       atomic_t irq_received;
-       atomic_t irq_received_display;
-       atomic_t irq_received_cursor;
-       atomic_t irq_received_io_cmd;
+       atomic_unchecked_t irq_received;
+       atomic_unchecked_t irq_received_display;
+       atomic_unchecked_t irq_received_cursor;
+       atomic_unchecked_t irq_received_io_cmd;
        unsigned irq_received_error;
        wait_queue_head_t display_event;
        wait_queue_head_t cursor_event;
index b110883f8253dc272498abb64b4c408146519930..dd06418c1848bd393e39a948c7ce06847bbb4b71 100644 (file)
@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
 
        /* TODO copy slow path code from i915 */
        fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
-       unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
+       unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
 
        {
                struct qxl_drawable *draw = fb_cmd;
@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
                struct drm_qxl_reloc reloc;
 
                if (copy_from_user(&reloc,
-                                      &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+                                      &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
                                       sizeof(reloc))) {
                        ret = -EFAULT;
                        goto out_free_bos;
@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
 
        for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
 
-               struct drm_qxl_command *commands =
-                       (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+               struct drm_qxl_command __user *commands =
+                       (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
 
-               if (copy_from_user(&user_cmd, &commands[cmd_num],
+               if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
                                       sizeof(user_cmd)))
                        return -EFAULT;
 
index 0bf1e20c6e44cee16a4c11b18b7903713e4707a9..42a73107a57b5b91039bbdf15222b8e9882063a9 100644 (file)
@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
        if (!pending)
                return IRQ_NONE;
 
-       atomic_inc(&qdev->irq_received);
+       atomic_inc_unchecked(&qdev->irq_received);
 
        if (pending & QXL_INTERRUPT_DISPLAY) {
-               atomic_inc(&qdev->irq_received_display);
+               atomic_inc_unchecked(&qdev->irq_received_display);
                wake_up_all(&qdev->display_event);
                qxl_queue_garbage_collect(qdev, false);
        }
        if (pending & QXL_INTERRUPT_CURSOR) {
-               atomic_inc(&qdev->irq_received_cursor);
+               atomic_inc_unchecked(&qdev->irq_received_cursor);
                wake_up_all(&qdev->cursor_event);
        }
        if (pending & QXL_INTERRUPT_IO_CMD) {
-               atomic_inc(&qdev->irq_received_io_cmd);
+               atomic_inc_unchecked(&qdev->irq_received_io_cmd);
                wake_up_all(&qdev->io_cmd_event);
        }
        if (pending & QXL_INTERRUPT_ERROR) {
@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
        init_waitqueue_head(&qdev->io_cmd_event);
        INIT_WORK(&qdev->client_monitors_config_work,
                  qxl_client_monitors_config_work_func);
-       atomic_set(&qdev->irq_received, 0);
-       atomic_set(&qdev->irq_received_display, 0);
-       atomic_set(&qdev->irq_received_cursor, 0);
-       atomic_set(&qdev->irq_received_io_cmd, 0);
+       atomic_set_unchecked(&qdev->irq_received, 0);
+       atomic_set_unchecked(&qdev->irq_received_display, 0);
+       atomic_set_unchecked(&qdev->irq_received_cursor, 0);
+       atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
        qdev->irq_received_error = 0;
        ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
        qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
index 0cbc4c9871643eb5da5cf210fc8b301dee912feb..0e4668640a1a92007ec2da9fc3667da322b816ba 100644 (file)
@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
        }
 }
 
-static struct vm_operations_struct qxl_ttm_vm_ops;
+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
 static const struct vm_operations_struct *ttm_vm_ops;
 
 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
                return r;
        if (unlikely(ttm_vm_ops == NULL)) {
                ttm_vm_ops = vma->vm_ops;
+               pax_open_kernel();
                qxl_ttm_vm_ops = *ttm_vm_ops;
                qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+               pax_close_kernel();
        }
        vma->vm_ops = &qxl_ttm_vm_ops;
        return 0;
@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
-       static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
-       unsigned i;
-
-       for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
-               if (i == 0)
-                       sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
-               else
-                       sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
-               qxl_mem_types_list[i].name = qxl_mem_types_names[i];
-               qxl_mem_types_list[i].show = &qxl_mm_dump_table;
-               qxl_mem_types_list[i].driver_features = 0;
-               if (i == 0)
-                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
-               else
-                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+       static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
+               {
+                       .name = "qxl_mem_mm",
+                       .show = &qxl_mm_dump_table,
+               },
+               {
+                       .name = "qxl_surf_mm",
+                       .show = &qxl_mm_dump_table,
+               }
+       };
 
-       }
-       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+       pax_open_kernel();
+       *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+       *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+       pax_close_kernel();
+
+       return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
 #else
        return 0;
 #endif
index 2c45ac9c1dc3afc7d956387c8acd337797edacc9..5d740f84618cda02c095bbaffd0473d4bc0af22f 100644 (file)
@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
 
        /* GH: Simple idle check.
         */
-       atomic_set(&dev_priv->idle_count, 0);
+       atomic_set_unchecked(&dev_priv->idle_count, 0);
 
        /* We don't support anything other than bus-mastering ring mode,
         * but the ring can be in either AGP or PCI space for the ring
index 723e5d6f10a4b96ed1a7db6f5f71f6c51d6a1f3c..102dbaf65a2c952ab8733ff61132ebb6380359d1 100644 (file)
@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
        int is_pci;
        unsigned long cce_buffers_offset;
 
-       atomic_t idle_count;
+       atomic_unchecked_t idle_count;
 
        int page_flipping;
        int current_page;
        u32 crtc_offset;
        u32 crtc_offset_cntl;
 
-       atomic_t vbl_received;
+       atomic_unchecked_t vbl_received;
 
        u32 color_fmt;
        unsigned int front_offset;
index 663f38c63ba6bd334d4732de3e80cdbf89ae711b..c689495c53bbdc191d251cfe17d0b6859fc8612a 100644 (file)
@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
        return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
 }
 
-drm_ioctl_compat_t *r128_compat_ioctls[] = {
+drm_ioctl_compat_t r128_compat_ioctls[] = {
        [DRM_R128_INIT] = compat_r128_init,
        [DRM_R128_DEPTH] = compat_r128_depth,
        [DRM_R128_STIPPLE] = compat_r128_stipple,
@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn = NULL;
        int ret;
 
        if (nr < DRM_COMMAND_BASE)
                return drm_compat_ioctl(filp, cmd, arg);
 
-       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
-               fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-       if (fn != NULL)
+       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
+               drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
                ret = (*fn) (filp, cmd, arg);
-       else
+       else
                ret = drm_ioctl(filp, cmd, arg);
 
        return ret;
index c2ae496babb7374da8381c688f15883ebc533bc0..30b5993fe480e4c6a53ab120026ca26d44954988 100644 (file)
@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
        if (crtc != 0)
                return 0;
 
-       return atomic_read(&dev_priv->vbl_received);
+       return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
        /* VBLANK interrupt */
        if (status & R128_CRTC_VBLANK_INT) {
                R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-               atomic_inc(&dev_priv->vbl_received);
+               atomic_inc_unchecked(&dev_priv->vbl_received);
                drm_handle_vblank(dev, 0);
                return IRQ_HANDLED;
        }
index 8fd2d9f58f770a3de4b7fc2b840b1e788c5aafdb..18c9660f841122f915f0b18638afced1821032b1 100644 (file)
@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
 
 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
 {
-       if (atomic_read(&dev_priv->idle_count) == 0)
+       if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
                r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
        else
-               atomic_set(&dev_priv->idle_count, 0);
+               atomic_set_unchecked(&dev_priv->idle_count, 0);
 }
 
 #endif
index b928c17bdeed98d78286ef82bc97ddcab2a7b287..e5d9400163c09db1698698e041ac3eb95e23730d 100644 (file)
@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
        regex_t mask_rex;
        regmatch_t match[4];
        char buf[1024];
-       size_t end;
+       long end;
        int len;
        int done = 0;
        int r;
        unsigned o;
        struct offset *offset;
        char last_reg_s[10];
-       int last_reg;
+       unsigned long last_reg;
 
        if (regcomp
            (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
index bd7519fdd3f431cbce8c2bc6bd3e588e525be5cd..e1c2cd95192fcb44ca82ad0d44190238716d5149 100644 (file)
@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
         * locking inversion with the driver load path. And the access here is
         * completely racy anyway. So don't bother with locking for now.
         */
-       return dev->open_count == 0;
+       return local_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
index 46bd3938282ca84a6a19338f476daee049575ce7..6ae4719527fc042eb3b913a227ddce14a6b14845 100644 (file)
@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
 
        /* SW interrupt */
        wait_queue_head_t swi_queue;
-       atomic_t swi_emitted;
+       atomic_unchecked_t swi_emitted;
        int vblank_crtc;
        uint32_t irq_enable_reg;
        uint32_t r500_disp_irq_reg;
index 0b98ea1345792986fa064f878e40aaa13388c7de..0881827c25f1b98c37123b0d280492cdfbe3e9d9 100644 (file)
@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
        request = compat_alloc_user_space(sizeof(*request));
        if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
            || __put_user(req32.param, &request->param)
-           || __put_user((void __user *)(unsigned long)req32.value,
+           || __put_user((unsigned long)req32.value,
                          &request->value))
                return -EFAULT;
 
@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
 #define compat_radeon_cp_setparam NULL
 #endif /* X86_64 || IA64 */
 
-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
        [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
        [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
        [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        unsigned int nr = DRM_IOCTL_NR(cmd);
-       drm_ioctl_compat_t *fn = NULL;
        int ret;
 
        if (nr < DRM_COMMAND_BASE)
                return drm_compat_ioctl(filp, cmd, arg);
 
-       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
-               fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-       if (fn != NULL)
+       if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
+               drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
                ret = (*fn) (filp, cmd, arg);
-       else
+       else
                ret = drm_ioctl(filp, cmd, arg);
 
        return ret;
index 244b19bab2e72406648ef1b2eae8ef7965bfcf01..c19226ddf861541d30e62e3551942b0cbb0f0698 100644 (file)
@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
        unsigned int ret;
        RING_LOCALS;
 
-       atomic_inc(&dev_priv->swi_emitted);
-       ret = atomic_read(&dev_priv->swi_emitted);
+       atomic_inc_unchecked(&dev_priv->swi_emitted);
+       ret = atomic_read_unchecked(&dev_priv->swi_emitted);
 
        BEGIN_RING(4);
        OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
        drm_radeon_private_t *dev_priv =
            (drm_radeon_private_t *) dev->dev_private;
 
-       atomic_set(&dev_priv->swi_emitted, 0);
+       atomic_set_unchecked(&dev_priv->swi_emitted, 0);
        init_waitqueue_head(&dev_priv->swi_queue);
 
        dev->max_vblank_count = 0x001fffff;
index 15aee723db77ec171a5b8a32797d75e3d49e1469..cda326e7704e9a158e81de1f93b6669e5753c5e5 100644 (file)
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
        if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
                sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
 
-       if (copy_from_user(&depth_boxes, clear->depth_boxes,
+       if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
                               sarea_priv->nbox * sizeof(depth_boxes[0])))
                return -EFAULT;
 
@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        drm_radeon_getparam_t *param = data;
-       int value;
+       int value = 0;
 
        DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
 
index d02aa1d0f5885408c877056bd4ac1ab0e1ed6f12..ca19e2ce09f475dabc19f69e2fcb2a0da79b28e8 100644 (file)
@@ -959,7 +959,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
        man->size = size >> PAGE_SHIFT;
 }
 
-static struct vm_operations_struct radeon_ttm_vm_ops;
+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -1000,8 +1000,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
        }
        if (unlikely(ttm_vm_ops == NULL)) {
                ttm_vm_ops = vma->vm_ops;
+               pax_open_kernel();
                radeon_ttm_vm_ops = *ttm_vm_ops;
                radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+               pax_close_kernel();
        }
        vma->vm_ops = &radeon_ttm_vm_ops;
        return 0;
index 978993fa3a360ef426b6dc48a3c287c3eda0cf48..e36e50eb5de60271ddd6be68c3450993289abc03 100644 (file)
@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
        }
 
        for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
-               dc->debugfs_files[i].data = dc;
+               *(void **)&dc->debugfs_files[i].data = dc;
 
        err = drm_debugfs_create_files(dc->debugfs_files,
                                       ARRAY_SIZE(debugfs_files),
index 33f67fd601c6076670496617d51366050f327238..55ee9761e3628187f58e9478d6b8fe099b4d207e 100644 (file)
@@ -39,7 +39,7 @@ struct tegra_dsi {
        struct clk *clk_lp;
        struct clk *clk;
 
-       struct drm_info_list *debugfs_files;
+       drm_info_list_no_const *debugfs_files;
        struct drm_minor *minor;
        struct dentry *debugfs;
 
index ffe26547328df8a352f8dca6e832912084cc7388..03c7b1cbb01f2929575e82d64ed1271bf95e6992 100644 (file)
@@ -60,7 +60,7 @@ struct tegra_hdmi {
        bool stereo;
        bool dvi;
 
-       struct drm_info_list *debugfs_files;
+       drm_info_list_no_const *debugfs_files;
        struct drm_minor *minor;
        struct dentry *debugfs;
 };
index aa0bd054d3e95c148f63072a39f82059ba2b366c..aea6a01500e11a67e944e2c1aa03f5844d454835 100644 (file)
@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
-       ttm_bo_man_init,
-       ttm_bo_man_takedown,
-       ttm_bo_man_get_node,
-       ttm_bo_man_put_node,
-       ttm_bo_man_debug
+       .init = ttm_bo_man_init,
+       .takedown = ttm_bo_man_takedown,
+       .get_node = ttm_bo_man_get_node,
+       .put_node = ttm_bo_man_put_node,
+       .debug = ttm_bo_man_debug
 };
 EXPORT_SYMBOL(ttm_bo_manager_func);
index a1803fbcc898a9f347b696ac454442592bc22004..c53f6b02e16653dc143c1f2bda0e7b4c11e4b4fd 100644 (file)
@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
        zone->glob = glob;
        glob->zone_kernel = zone;
        ret = kobject_init_and_add(
-               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
        if (unlikely(ret != 0)) {
                kobject_put(&zone->kobj);
                return ret;
@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
        zone->glob = glob;
        glob->zone_dma32 = zone;
        ret = kobject_init_and_add(
-               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
        if (unlikely(ret != 0)) {
                kobject_put(&zone->kobj);
                return ret;
index 025c429050c06c4079f5b471128e499463abc753..314062fd5e8f7f6751b6502e61b829ee913e0acc 100644 (file)
@@ -54,7 +54,7 @@
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               16
-#define FREE_ALL_PAGES                 (~0U)
+#define FREE_ALL_PAGES                 (~0UL)
 /* times are in msecs */
 #define PAGE_FREE_INTERVAL             1000
 
@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
  * @free_all: If set to true will free all pages in pool
  * @use_static: Safe to use static buffer
  **/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
                              bool use_static)
 {
        static struct page *static_buf[NUM_PAGES_TO_ALLOC];
        unsigned long irq_flags;
        struct page *p;
        struct page **pages_to_free;
-       unsigned freed_pages = 0,
-                npages_to_free = nr_free;
+       unsigned long freed_pages = 0, npages_to_free = nr_free;
 
        if (NUM_PAGES_TO_ALLOC < nr_free)
                npages_to_free = NUM_PAGES_TO_ALLOC;
@@ -371,7 +370,8 @@ restart:
                __list_del(&p->lru, &pool->list);
 
                ttm_pool_update_free_locked(pool, freed_pages);
-               nr_free -= freed_pages;
+               if (likely(nr_free != FREE_ALL_PAGES))
+                       nr_free -= freed_pages;
        }
 
        spin_unlock_irqrestore(&pool->lock, irq_flags);
@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        unsigned i;
        unsigned pool_offset;
        struct ttm_page_pool *pool;
-       int shrink_pages = sc->nr_to_scan;
+       unsigned long shrink_pages = sc->nr_to_scan;
        unsigned long freed = 0;
 
        if (!mutex_trylock(&lock))
@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        pool_offset = ++start_pool % NUM_POOLS;
        /* select start pool in round robin fashion */
        for (i = 0; i < NUM_POOLS; ++i) {
-               unsigned nr_free = shrink_pages;
+               unsigned long nr_free = shrink_pages;
                if (shrink_pages == 0)
                        break;
                pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
@@ -673,7 +673,7 @@ out:
 }
 
 /* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
                          enum ttm_caching_state cstate)
 {
        unsigned long irq_flags;
@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
        struct list_head plist;
        struct page *p = NULL;
        gfp_t gfp_flags = GFP_USER;
-       unsigned count;
+       unsigned long count;
        int r;
 
        /* set zero flag for page allocation if required */
index 01e1d27eb078396cd97dc2f3a51cbda8dec95a9f..aaa018a50d5ad38659b9155b5030948b05575389 100644 (file)
@@ -56,7 +56,7 @@
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               4
-#define FREE_ALL_PAGES                 (~0U)
+#define FREE_ALL_PAGES                 (~0UL)
 /* times are in msecs */
 #define IS_UNDEFINED                   (0)
 #define IS_WC                          (1<<1)
@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
  * @nr_free: If set to true will free all pages in pool
  * @use_static: Safe to use static buffer
  **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
                                       bool use_static)
 {
        static struct page *static_buf[NUM_PAGES_TO_ALLOC];
@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
        struct dma_page *dma_p, *tmp;
        struct page **pages_to_free;
        struct list_head d_pages;
-       unsigned freed_pages = 0,
-                npages_to_free = nr_free;
+       unsigned long freed_pages = 0, npages_to_free = nr_free;
 
        if (NUM_PAGES_TO_ALLOC < nr_free)
                npages_to_free = NUM_PAGES_TO_ALLOC;
@@ -499,7 +498,8 @@ restart:
        /* remove range of pages from the pool */
        if (freed_pages) {
                ttm_pool_update_free_locked(pool, freed_pages);
-               nr_free -= freed_pages;
+               if (likely(nr_free != FREE_ALL_PAGES))
+                       nr_free -= freed_pages;
        }
 
        spin_unlock_irqrestore(&pool->lock, irq_flags);
@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        struct dma_page *d_page, *next;
        enum pool_type type;
        bool is_cached = false;
-       unsigned count = 0, i, npages = 0;
+       unsigned long count = 0, i, npages = 0;
        unsigned long irq_flags;
 
        type = ttm_to_type(ttm->page_flags, ttm->caching_state);
@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        static unsigned start_pool;
        unsigned idx = 0;
        unsigned pool_offset;
-       unsigned shrink_pages = sc->nr_to_scan;
+       unsigned long shrink_pages = sc->nr_to_scan;
        struct device_pools *p;
        unsigned long freed = 0;
 
@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                goto out;
        pool_offset = ++start_pool % _manager->npools;
        list_for_each_entry(p, &_manager->pools, pools) {
-               unsigned nr_free;
+               unsigned long nr_free;
 
                if (!p->dev)
                        continue;
@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
                freed += nr_free - shrink_pages;
 
-               pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+               pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
                         p->pool->dev_name, p->pool->name, current->pid,
                         nr_free, shrink_pages);
        }
index 8cbcb4589bd34db8fd35e8186eed1700dd9ba8cb..a4d9cf7153e913866fcc0e84f05decdae6f5fb99 100644 (file)
@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
                fb_deferred_io_cleanup(info);
                kfree(info->fbdefio);
                info->fbdefio = NULL;
-               info->fbops->fb_mmap = udl_fb_mmap;
        }
 
        pr_warn("released /dev/fb%d user=%d count=%d\n",
index ef8c500b4a006e4f63d848b6e5c43d81959c5439..01030c8ffba348af02c00703f86f1044c58422a6 100644 (file)
@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
 typedef uint32_t maskarray_t[5];
 
 typedef struct drm_via_irq {
-       atomic_t irq_received;
+       atomic_unchecked_t irq_received;
        uint32_t pending_mask;
        uint32_t enable_mask;
        wait_queue_head_t irq_queue;
@@ -77,7 +77,7 @@ typedef struct drm_via_private {
        struct timeval last_vblank;
        int last_vblank_valid;
        unsigned usec_per_vblank;
-       atomic_t vbl_received;
+       atomic_unchecked_t vbl_received;
        drm_via_state_t hc_state;
        char pci_buf[VIA_PCI_BUF_SIZE];
        const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
index 1319433816d3cd4743c7eadbcf073d8703c6476b..a993b0cce020bf1a664d696d4b9fd811b5a99cb0 100644 (file)
@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
        if (crtc != 0)
                return 0;
 
-       return atomic_read(&dev_priv->vbl_received);
+       return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 irqreturn_t via_driver_irq_handler(int irq, void *arg)
@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
 
        status = VIA_READ(VIA_REG_INTERRUPT);
        if (status & VIA_IRQ_VBLANK_PENDING) {
-               atomic_inc(&dev_priv->vbl_received);
-               if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
+               atomic_inc_unchecked(&dev_priv->vbl_received);
+               if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
                        do_gettimeofday(&cur_vblank);
                        if (dev_priv->last_vblank_valid) {
                                dev_priv->usec_per_vblank =
@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
                        dev_priv->last_vblank = cur_vblank;
                        dev_priv->last_vblank_valid = 1;
                }
-               if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
+               if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
                        DRM_DEBUG("US per vblank is: %u\n",
                                  dev_priv->usec_per_vblank);
                }
@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
 
        for (i = 0; i < dev_priv->num_irqs; ++i) {
                if (status & cur_irq->pending_mask) {
-                       atomic_inc(&cur_irq->irq_received);
+                       atomic_inc_unchecked(&cur_irq->irq_received);
                        wake_up(&cur_irq->irq_queue);
                        handled = 1;
                        if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
                DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
                            ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
                             masks[irq][4]));
-               cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+               cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
        } else {
                DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
                            (((cur_irq_sequence =
-                              atomic_read(&cur_irq->irq_received)) -
+                              atomic_read_unchecked(&cur_irq->irq_received)) -
                              *sequence) <= (1 << 23)));
        }
        *sequence = cur_irq_sequence;
@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
                }
 
                for (i = 0; i < dev_priv->num_irqs; ++i) {
-                       atomic_set(&cur_irq->irq_received, 0);
+                       atomic_set_unchecked(&cur_irq->irq_received, 0);
                        cur_irq->enable_mask = dev_priv->irq_masks[i][0];
                        cur_irq->pending_mask = dev_priv->irq_masks[i][1];
                        init_waitqueue_head(&cur_irq->irq_queue);
@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
        switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
        case VIA_IRQ_RELATIVE:
                irqwait->request.sequence +=
-                       atomic_read(&cur_irq->irq_received);
+                       atomic_read_unchecked(&cur_irq->irq_received);
                irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
        case VIA_IRQ_ABSOLUTE:
                break;
index d26a6daa9719a23542cb8c575691f1d63851dba4..5fa41ed1a1843308c5ff07f0fc6d7c1d7e1a0af5 100644 (file)
@@ -447,7 +447,7 @@ struct vmw_private {
         * Fencing and IRQs.
         */
 
-       atomic_t marker_seq;
+       atomic_unchecked_t marker_seq;
        wait_queue_head_t fence_queue;
        wait_queue_head_t fifo_queue;
        spinlock_t waiter_lock;
index 39f2b03888e7e5b7beb107cd0a32aa0345a328be..d1b0a6433823bed06d0f6a5c13fcdd9c0c4c0513 100644 (file)
@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
                 (unsigned int) min,
                 (unsigned int) fifo->capabilities);
 
-       atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+       atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
        iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
        vmw_marker_queue_init(&fifo->marker_queue);
        return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
                                if (reserveable)
                                        iowrite32(bytes, fifo_mem +
                                                  SVGA_FIFO_RESERVED);
-                               return fifo_mem + (next_cmd >> 2);
+                               return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
                        } else {
                                need_bounce = true;
                        }
@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 
        fm = vmw_fifo_reserve(dev_priv, bytes);
        if (unlikely(fm == NULL)) {
-               *seqno = atomic_read(&dev_priv->marker_seq);
+               *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
                ret = -ENOMEM;
                (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
                                        false, 3*HZ);
@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
        }
 
        do {
-               *seqno = atomic_add_return(1, &dev_priv->marker_seq);
+               *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
        } while (*seqno == 0);
 
        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
index 170b61be1e4ece3069a9f9c20e7c90d1f9558ae9..fec7348cea2cbed0c79f10b89af773cbbc1400cf 100644 (file)
@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
-       vmw_gmrid_man_init,
-       vmw_gmrid_man_takedown,
-       vmw_gmrid_man_get_node,
-       vmw_gmrid_man_put_node,
-       vmw_gmrid_man_debug
+       .init = vmw_gmrid_man_init,
+       .takedown = vmw_gmrid_man_takedown,
+       .get_node = vmw_gmrid_man_get_node,
+       .put_node = vmw_gmrid_man_put_node,
+       .debug = vmw_gmrid_man_debug
 };
index 69c8ce23123c96af22c44011ff2b8fcdab837584..cacb0ab04cde31e37ac3171396bc3ab9d8e6dd5c 100644 (file)
@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
        int ret;
 
        num_clips = arg->num_clips;
-       clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+       clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
        if (unlikely(num_clips == 0))
                return 0;
@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
        int ret;
 
        num_clips = arg->num_clips;
-       clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+       clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
        if (unlikely(num_clips == 0))
                return 0;
index 9fe9827ee499c177e50735d1acbf84d13eb606f0..0aa2fc0120e948dfdf2031377718001022f08edd 100644 (file)
@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
         * emitted. Then the fence is stale and signaled.
         */
 
-       ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
+       ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
               > VMW_FENCE_WRAP);
 
        return ret;
@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 
        if (fifo_idle)
                down_read(&fifo_state->rwsem);
-       signal_seq = atomic_read(&dev_priv->marker_seq);
+       signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
        ret = 0;
 
        for (;;) {
index efd1ffd68185a77e07626dd2fc2385434f4f9194..0ae13ca2d36e195087f7ccb96e26679949026ad1 100644 (file)
@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
        while (!vmw_lag_lt(queue, us)) {
                spin_lock(&queue->lock);
                if (list_empty(&queue->head))
-                       seqno = atomic_read(&dev_priv->marker_seq);
+                       seqno = atomic_read_unchecked(&dev_priv->marker_seq);
                else {
                        marker = list_first_entry(&queue->head,
                                                 struct vmw_marker, head);
index 37ac7b5dbd066025c90009d6b44a1c08fffcc800..d52a5c90f40b3b8718b551cdcdfbf47eceb1234a 100644 (file)
@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
 
 /* this version is for the case where the power switch is separate
    to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
 {
        /* copy over all the bus versions */
        if (dev->bus && dev->bus->pm) {
@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
        return ret;
 }
 
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
 {
        /* copy over all the bus versions */
        if (dev->bus && dev->bus->pm) {
index 8b638792cb43c426c2e4fffb0bb594e76617f554..a5a5e72a62d85542a3a47cf8f81ab1de94f6a979 100644 (file)
@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
 
 int hid_add_device(struct hid_device *hdev)
 {
-       static atomic_t id = ATOMIC_INIT(0);
+       static atomic_unchecked_t id = ATOMIC_INIT(0);
        int ret;
 
        if (WARN_ON(hdev->status & HID_STAT_ADDED))
@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
        /* XXX hack, any other cleaner solution after the driver core
         * is converted to allow more than 20 bytes as the device name? */
        dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
-                    hdev->vendor, hdev->product, atomic_inc_return(&id));
+                    hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
 
        hid_debug_register(hdev, dev_name(&hdev->dev));
        ret = device_add(&hdev->dev);
index 5bc6d80d5be79f465f3cbbb686c471db6162eb63..e47b55a13fcee6619b50fc14130687980c0ba862 100644 (file)
@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
         * case we forward it to the correct hid device (via hid_input_report()
         * ) and return 1 so hid-core does not anything else with it.
         */
+       if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+           (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+               dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+                               __func__, dj_report->device_index);
+               return false;
+       }
 
        if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
            (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
index c13fb5bd79e8f461212c14e4febc3717e1077cb9..55a380241f051b14f24d1b056f7a3413af84f24e 100644 (file)
@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
        else if (size == 0)
                return -EIO;
 
-       if (copy_to_user(u, buf, size))
+       if (size > sizeof(buf) || copy_to_user(u, buf, size))
                return -EFAULT;
 
        *off += size;
index 433f72a1c0062be9b13dff78b6885d37a80a8738..2926005c48a3cd3e79a04cde324a366d98345720 100644 (file)
@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
        unsigned long flags;
        int ret = 0;
 
-       next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
-       atomic_inc(&vmbus_connection.next_gpadl_handle);
+       next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
+       atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
 
        ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
        if (ret)
index 3e4235c7a47fd30697b4c6d1e9b37b3bf55b7e6b..877d0e52aaf412973b600e5909c7d97c2f6c44ea 100644 (file)
@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
        u64 output_address = (output) ? virt_to_phys(output) : 0;
        u32 output_address_hi = output_address >> 32;
        u32 output_address_lo = output_address & 0xFFFFFFFF;
-       void *hypercall_page = hv_context.hypercall_page;
+       void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
 
        __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
                              "=a"(hv_status_lo) : "d" (control_hi),
@@ -156,7 +156,7 @@ int hv_init(void)
        /* See if the hypercall page is already set */
        rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-       virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+       virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
 
        if (!virtaddr)
                goto cleanup;
index b958ded8ac7e9ffef85dbc3b8547746a5cf26367..b2452bbfd9b69493df450831ec51fee26c0e98c7 100644 (file)
@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 
 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
-static atomic_t trans_id = ATOMIC_INIT(0);
+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
 
 static int dm_ring_size = (5 * PAGE_SIZE);
 
@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
                pr_info("Memory hot add failed\n");
 
        dm->state = DM_INITIALIZED;
-       resp.hdr.trans_id = atomic_inc_return(&trans_id);
+       resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
        vmbus_sendpacket(dm->dev->channel, &resp,
                        sizeof(struct dm_hot_add_response),
                        (unsigned long)NULL,
@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
        memset(&status, 0, sizeof(struct dm_status));
        status.hdr.type = DM_STATUS_REPORT;
        status.hdr.size = sizeof(struct dm_status);
-       status.hdr.trans_id = atomic_inc_return(&trans_id);
+       status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 
        /*
         * The host expects the guest to report free memory.
@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
         * send the status. This can happen if we were interrupted
         * after we picked our transaction ID.
         */
-       if (status.hdr.trans_id != atomic_read(&trans_id))
+       if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
                return;
 
        /*
@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
                 */
 
                do {
-                       bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
+                       bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
                        ret = vmbus_sendpacket(dm_device.dev->channel,
                                                bl_resp,
                                                bl_resp->hdr.size,
@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
 
        memset(&resp, 0, sizeof(struct dm_unballoon_response));
        resp.hdr.type = DM_UNBALLOON_RESPONSE;
-       resp.hdr.trans_id = atomic_inc_return(&trans_id);
+       resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
        resp.hdr.size = sizeof(struct dm_unballoon_response);
 
        vmbus_sendpacket(dm_device.dev->channel, &resp,
@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
        memset(&version_req, 0, sizeof(struct dm_version_request));
        version_req.hdr.type = DM_VERSION_REQUEST;
        version_req.hdr.size = sizeof(struct dm_version_request);
-       version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+       version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
        version_req.is_last_attempt = 1;
 
@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
        memset(&version_req, 0, sizeof(struct dm_version_request));
        version_req.hdr.type = DM_VERSION_REQUEST;
        version_req.hdr.size = sizeof(struct dm_version_request);
-       version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+       version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
        version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
        version_req.is_last_attempt = 0;
 
@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
        memset(&cap_msg, 0, sizeof(struct dm_capabilities));
        cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
        cap_msg.hdr.size = sizeof(struct dm_capabilities);
-       cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+       cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 
        cap_msg.caps.cap_bits.balloon = 1;
        cap_msg.caps.cap_bits.hot_add = 1;
index c386d8dc7223a2103ca2904ffc0a90ef026c156d..d6004c498034ee5be34be108a9d894f1edfb58c0 100644 (file)
@@ -611,7 +611,7 @@ enum vmbus_connect_state {
 struct vmbus_connection {
        enum vmbus_connect_state conn_state;
 
-       atomic_t next_gpadl_handle;
+       atomic_unchecked_t next_gpadl_handle;
 
        /*
         * Represents channel interrupts. Each bit position represents a
index 4d6b26979fbd54e457dfbcea4bfc9a6a26ec846c..2e23b863b59644b64e313db8baa3bf0e09e91112 100644 (file)
@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
 {
        int ret = 0;
 
-       static atomic_t device_num = ATOMIC_INIT(0);
+       static atomic_unchecked_t device_num = ATOMIC_INIT(0);
 
        dev_set_name(&child_device_obj->device, "vmbus_0_%d",
-                    atomic_inc_return(&device_num));
+                    atomic_inc_return_unchecked(&device_num));
 
        child_device_obj->device.bus = &hv_bus;
        child_device_obj->device.parent = &hv_acpi_dev->dev;
index 579bdf93be433b54a23fa8827badd7ace8dbb762..75118b53a597254f1d5172d218009efef32ceb3b 100644 (file)
@@ -116,7 +116,7 @@ struct sensor_template {
                       struct device_attribute *devattr,
                       const char *buf, size_t count);
        int index;
-};
+} __do_const;
 
 /* Averaging interval */
 static int update_avg_interval(struct acpi_power_meter_resource *resource)
@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
                          struct sensor_template *attrs)
 {
        struct device *dev = &resource->acpi_dev->dev;
-       struct sensor_device_attribute *sensors =
+       sensor_device_attribute_no_const *sensors =
                &resource->sensors[resource->num_sensors];
        int res = 0;
 
index 0af63da6b603d05afb5feb23dcf02270905e3470..05a183a4e159e03c39b838b6136ef3212206ae04 100644 (file)
@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
 {
        struct applesmc_node_group *grp;
        struct applesmc_dev_attr *node;
-       struct attribute *attr;
+       attribute_no_const *attr;
        int ret, i;
 
        for (grp = groups; grp->format; grp++) {
index cccef87963e050afb99181d63d3ed5dcf401d5a1..06ce8ec4d69939fbdec68c549da119b89c8d47b4 100644 (file)
@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
 struct atk_sensor_data {
        struct list_head list;
        struct atk_data *data;
-       struct device_attribute label_attr;
-       struct device_attribute input_attr;
-       struct device_attribute limit1_attr;
-       struct device_attribute limit2_attr;
+       device_attribute_no_const label_attr;
+       device_attribute_no_const input_attr;
+       device_attribute_no_const limit1_attr;
+       device_attribute_no_const limit2_attr;
        char label_attr_name[ATTR_NAME_SIZE];
        char input_attr_name[ATTR_NAME_SIZE];
        char limit1_attr_name[ATTR_NAME_SIZE];
@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
 static struct device_attribute atk_name_attr =
                __ATTR(name, 0444, atk_name_show, NULL);
 
-static void atk_init_attribute(struct device_attribute *attr, char *name,
+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
                sysfs_show_func show)
 {
        sysfs_attr_init(&attr->attr);
index 5b7fec824f10a0eafc8109d7e9632789d6017e69..05c957a2ceadbaeac82a19cf4b59c390190dff7d 100644 (file)
@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block coretemp_cpu_notifier __refdata = {
+static struct notifier_block coretemp_cpu_notifier = {
        .notifier_call = coretemp_cpu_callback,
 };
 
index 7a8a6fbf11ff9618c482ad4dfe67da7804c65b35..015c1fd7653e40dd89a4dccf5c3f4a7c49e4efb2 100644 (file)
@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
                                struct aem_rw_sensor_template *rw)
 {
        struct device *dev = &data->pdev->dev;
-       struct sensor_device_attribute *sensors = data->sensors;
+       sensor_device_attribute_no_const *sensors = data->sensors;
        int err;
 
        /* Set up read-only sensors */
index 17ae2eb26ce21604edf9a1d9df4e24abe01980d6..21b71ddcd2e556b997cd0a9dba73d25c9002571c 100644 (file)
@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct iio_hwmon_state *st;
-       struct sensor_device_attribute *a;
+       sensor_device_attribute_no_const *a;
        int ret, i;
        int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
        enum iio_chan_type type;
index f3830db02d4637675cebbe7b6b5e185492571a1e..9f4d6d58e6acc96f7ba5cc933c72792dcb2f05bc 100644 (file)
@@ -397,11 +397,11 @@ static struct attribute_group *
 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                          int repeat)
 {
-       struct sensor_device_attribute_2 *a2;
-       struct sensor_device_attribute *a;
+       sensor_device_attribute_2_no_const *a2;
+       sensor_device_attribute_no_const *a;
        struct sensor_device_template **t;
        struct sensor_device_attr_u *su;
-       struct attribute_group *group;
+       attribute_group_no_const *group;
        struct attribute **attrs;
        int i, j, count;
 
index 1be41177b620f5179f254f0e8936a74bfb79fe94..88ae1e111754580e122e7ca875b42cf8ea3284fe 100644 (file)
@@ -952,10 +952,10 @@ static struct attribute_group *
 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                          int repeat)
 {
-       struct attribute_group *group;
+       attribute_group_no_const *group;
        struct sensor_device_attr_u *su;
-       struct sensor_device_attribute *a;
-       struct sensor_device_attribute_2 *a2;
+       sensor_device_attribute_no_const *a;
+       sensor_device_attribute_2_no_const *a2;
        struct attribute **attrs;
        struct sensor_device_template **t;
        int i, count;
index f2e47c7dd808becd444512c74dc5738c9e544955..45d79412de35e23176671fb0c3a0ca28af780a44 100644 (file)
@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
        return 0;
 }
 
-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
                                const char *name,
                                umode_t mode,
                                ssize_t (*show)(struct device *dev,
@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
        dev_attr->store = store;
 }
 
-static void pmbus_attr_init(struct sensor_device_attribute *a,
+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
                            const char *name,
                            umode_t mode,
                            ssize_t (*show)(struct device *dev,
@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
                             u16 reg, u8 mask)
 {
        struct pmbus_boolean *boolean;
-       struct sensor_device_attribute *a;
+       sensor_device_attribute_no_const *a;
 
        boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
        if (!boolean)
@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
                                             bool update, bool readonly)
 {
        struct pmbus_sensor *sensor;
-       struct device_attribute *a;
+       device_attribute_no_const *a;
 
        sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
        if (!sensor)
@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
                           const char *lstring, int index)
 {
        struct pmbus_label *label;
-       struct device_attribute *a;
+       device_attribute_no_const *a;
 
        label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
        if (!label)
index d4f0935daaa11edfb8e56c5d58edc2f32eff570a..7420593e499fccf285b2f78064bddc02a483fd6c 100644 (file)
@@ -169,7 +169,7 @@ struct sht15_data {
        int                             supply_uv;
        bool                            supply_uv_valid;
        struct work_struct              update_supply_work;
-       atomic_t                        interrupt_handled;
+       atomic_unchecked_t              interrupt_handled;
 };
 
 /**
@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
        ret = gpio_direction_input(data->pdata->gpio_data);
        if (ret)
                return ret;
-       atomic_set(&data->interrupt_handled, 0);
+       atomic_set_unchecked(&data->interrupt_handled, 0);
 
        enable_irq(gpio_to_irq(data->pdata->gpio_data));
        if (gpio_get_value(data->pdata->gpio_data) == 0) {
                disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
                /* Only relevant if the interrupt hasn't occurred. */
-               if (!atomic_read(&data->interrupt_handled))
+               if (!atomic_read_unchecked(&data->interrupt_handled))
                        schedule_work(&data->read_work);
        }
        ret = wait_event_timeout(data->wait_queue,
@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
 
        /* First disable the interrupt */
        disable_irq_nosync(irq);
-       atomic_inc(&data->interrupt_handled);
+       atomic_inc_unchecked(&data->interrupt_handled);
        /* Then schedule a reading work struct */
        if (data->state != SHT15_READING_NOTHING)
                schedule_work(&data->read_work);
@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
                 * If not, then start the interrupt again - care here as could
                 * have gone low in meantime so verify it hasn't!
                 */
-               atomic_set(&data->interrupt_handled, 0);
+               atomic_set_unchecked(&data->interrupt_handled, 0);
                enable_irq(gpio_to_irq(data->pdata->gpio_data));
                /* If still not occurred or another handler was scheduled */
                if (gpio_get_value(data->pdata->gpio_data)
-                   || atomic_read(&data->interrupt_handled))
+                   || atomic_read_unchecked(&data->interrupt_handled))
                        return;
        }
 
index ac91c07e3f906869858a4a359d157ebf89aa7b36..8e6966306eb3a1ecc2870c25b0a3ec3ade66548d 100644 (file)
@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
+static struct notifier_block via_cputemp_cpu_notifier = {
        .notifier_call = via_cputemp_cpu_callback,
 };
 
index 65e324054970b51aded8091d8f8b5a212fd49bc4..e6c511d8acf94ceea17cb10874617a3fa814bb4f 100644 (file)
@@ -39,7 +39,7 @@
 extern struct i2c_adapter amd756_smbus;
 
 static struct i2c_adapter *s4882_adapter;
-static struct i2c_algorithm *s4882_algo;
+static i2c_algorithm_no_const *s4882_algo;
 
 /* Wrapper access functions for multiplexed SMBus */
 static DEFINE_MUTEX(amd756_lock);
index b19a310bf9b3e822121cfcf48c1a01856fcabe67..d6eece09744d7c3d21e6c1e2e6e30661670ebe45 100644 (file)
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
 /* usb layer */
 
 /* Send command to device, and get response. */
-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
 {
        int ret = 0;
        int actual;
index 88eda09e73c0b31509427a784c5219655a49c2fb..cf40434e4789a73352de2641d00e8fceb962b897 100644 (file)
@@ -37,7 +37,7 @@
 extern struct i2c_adapter *nforce2_smbus;
 
 static struct i2c_adapter *s4985_adapter;
-static struct i2c_algorithm *s4985_algo;
+static i2c_algorithm_no_const *s4985_algo;
 
 /* Wrapper access functions for multiplexed SMBus */
 static DEFINE_MUTEX(nforce2_lock);
index 71c7a3975b6287927c4bb666f433cf2787e20a54..71dd3e06cbec691495a7d430b4ca89c68851d631 100644 (file)
@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
                        break;
                }
 
-               data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
+               data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
                rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
                if (IS_ERR(rdwr_pa[i].buf)) {
                        res = PTR_ERR(rdwr_pa[i].buf);
index 0b510bafd90e2904d989d4f8ce06b89f50ea1f76..4fbb5085b2f9d1b35add6a4768310c8fdf0f0f7f 100644 (file)
@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
                alignment = queue_dma_alignment(q) | q->dma_pad_mask;
                if ((unsigned long)buf & alignment
                    || blk_rq_bytes(rq) & q->dma_pad_mask
-                   || object_is_on_stack(buf))
+                   || object_starts_on_stack(buf))
                        drive->dma = 0;
        }
 }
index af3e76d652ba0a51f9fdd777079edf9c30304b99..96dfe5e39b15ce54d6762d0146cdb4ff12b00ceb 100644 (file)
@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
 }
 
 static
-int __iio_device_attr_init(struct device_attribute *dev_attr,
+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
                           const char *postfix,
                           struct iio_chan_spec const *chan,
                           ssize_t (*readfunc)(struct device *dev,
index e28a494e2a3a0f72b41af479b269262c6472cb77..f7c267115b49804d71a91e287787bf46da0750c1 100644 (file)
@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
 
 struct cm_counter_group {
        struct kobject obj;
-       atomic_long_t counter[CM_ATTR_COUNT];
+       atomic_long_unchecked_t counter[CM_ATTR_COUNT];
 };
 
 struct cm_counter_attribute {
@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
        struct ib_mad_send_buf *msg = NULL;
        int ret;
 
-       atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+       atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                        counter[CM_REQ_COUNTER]);
 
        /* Quick state check to discard duplicate REQs. */
@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
        if (!cm_id_priv)
                return;
 
-       atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+       atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                        counter[CM_REP_COUNTER]);
        ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
        if (ret)
@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
        if (cm_id_priv->id.state != IB_CM_REP_SENT &&
            cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
                spin_unlock_irq(&cm_id_priv->lock);
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_RTU_COUNTER]);
                goto out;
        }
@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
        cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
                                   dreq_msg->local_comm_id);
        if (!cm_id_priv) {
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_DREQ_COUNTER]);
                cm_issue_drep(work->port, work->mad_recv_wc);
                return -EINVAL;
@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
        case IB_CM_MRA_REP_RCVD:
                break;
        case IB_CM_TIMEWAIT:
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_DREQ_COUNTER]);
                if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
                        goto unlock;
@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
                        cm_free_msg(msg);
                goto deref;
        case IB_CM_DREQ_RCVD:
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_DREQ_COUNTER]);
                goto unlock;
        default:
@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
                    ib_modify_mad(cm_id_priv->av.port->mad_agent,
                                  cm_id_priv->msg, timeout)) {
                        if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
-                               atomic_long_inc(&work->port->
+                               atomic_long_inc_unchecked(&work->port->
                                                counter_group[CM_RECV_DUPLICATES].
                                                counter[CM_MRA_COUNTER]);
                        goto out;
@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
                break;
        case IB_CM_MRA_REQ_RCVD:
        case IB_CM_MRA_REP_RCVD:
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_MRA_COUNTER]);
                /* fall through */
        default:
@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
        case IB_CM_LAP_IDLE:
                break;
        case IB_CM_MRA_LAP_SENT:
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_LAP_COUNTER]);
                if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
                        goto unlock;
@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
                        cm_free_msg(msg);
                goto deref;
        case IB_CM_LAP_RCVD:
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_LAP_COUNTER]);
                goto unlock;
        default:
@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
        cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
        if (cur_cm_id_priv) {
                spin_unlock_irq(&cm.lock);
-               atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+               atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
                                counter[CM_SIDR_REQ_COUNTER]);
                goto out; /* Duplicate message. */
        }
@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
        if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
                msg->retries = 1;
 
-       atomic_long_add(1 + msg->retries,
+       atomic_long_add_unchecked(1 + msg->retries,
                        &port->counter_group[CM_XMIT].counter[attr_index]);
        if (msg->retries)
-               atomic_long_add(msg->retries,
+               atomic_long_add_unchecked(msg->retries,
                                &port->counter_group[CM_XMIT_RETRIES].
                                counter[attr_index]);
 
@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
        }
 
        attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
-       atomic_long_inc(&port->counter_group[CM_RECV].
+       atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
                        counter[attr_id - CM_ATTR_ID_OFFSET]);
 
        work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
        cm_attr = container_of(attr, struct cm_counter_attribute, attr);
 
        return sprintf(buf, "%ld\n",
-                      atomic_long_read(&group->counter[cm_attr->index]));
+                      atomic_long_read_unchecked(&group->counter[cm_attr->index]));
 }
 
 static const struct sysfs_ops cm_counter_ops = {
index 9f5ad7cc33c89985fb1c490c27f07766e38e29fa..588cd84175438d5852e6ce4e21c1f6baecc504be 100644 (file)
@@ -98,8 +98,8 @@ struct ib_fmr_pool {
 
        struct task_struct       *thread;
 
-       atomic_t                  req_ser;
-       atomic_t                  flush_ser;
+       atomic_unchecked_t        req_ser;
+       atomic_unchecked_t        flush_ser;
 
        wait_queue_head_t         force_wait;
 };
@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
        struct ib_fmr_pool *pool = pool_ptr;
 
        do {
-               if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
+               if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
                        ib_fmr_batch_release(pool);
 
-                       atomic_inc(&pool->flush_ser);
+                       atomic_inc_unchecked(&pool->flush_ser);
                        wake_up_interruptible(&pool->force_wait);
 
                        if (pool->flush_function)
@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
                }
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
+               if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
                    !kthread_should_stop())
                        schedule();
                __set_current_state(TASK_RUNNING);
@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
        pool->dirty_watermark = params->dirty_watermark;
        pool->dirty_len       = 0;
        spin_lock_init(&pool->pool_lock);
-       atomic_set(&pool->req_ser,   0);
-       atomic_set(&pool->flush_ser, 0);
+       atomic_set_unchecked(&pool->req_ser,   0);
+       atomic_set_unchecked(&pool->flush_ser, 0);
        init_waitqueue_head(&pool->force_wait);
 
        pool->thread = kthread_run(ib_fmr_cleanup_thread,
@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
        }
        spin_unlock_irq(&pool->pool_lock);
 
-       serial = atomic_inc_return(&pool->req_ser);
+       serial = atomic_inc_return_unchecked(&pool->req_ser);
        wake_up_process(pool->thread);
 
        if (wait_event_interruptible(pool->force_wait,
-                                    atomic_read(&pool->flush_ser) - serial >= 0))
+                                    atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
                return -EINTR;
 
        return 0;
@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
                } else {
                        list_add_tail(&fmr->list, &pool->dirty_list);
                        if (++pool->dirty_len >= pool->dirty_watermark) {
-                               atomic_inc(&pool->req_ser);
+                               atomic_inc_unchecked(&pool->req_ser);
                                wake_up_process(pool->thread);
                        }
                }
index aec7a6aa2951db47bc6b5be969a29d1867688b23..8c014b5dab4c82ff805a744c89655555a56094fc 100644 (file)
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        if (dmasync)
                dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
+       /*
+        * If the combination of the addr and size requested for this memory
+        * region causes an integer overflow, return error.
+        */
+       if ((PAGE_ALIGN(addr + size) <= size) ||
+           (PAGE_ALIGN(addr + size) <= addr))
+               return ERR_PTR(-EINVAL);
+
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
 
index cb43c2299ac00b94ec4252a6074013ecf2730955..2e12dd7661c8e6e2807cb97386ab8bc5fb90b6b6 100644 (file)
@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
        int err;
        struct fw_ri_tpte tpt;
        u32 stag_idx;
-       static atomic_t key;
+       static atomic_unchecked_t key;
 
        if (c4iw_fatal_error(rdev))
                return -EIO;
@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                if (rdev->stats.stag.cur > rdev->stats.stag.max)
                        rdev->stats.stag.max = rdev->stats.stag.cur;
                mutex_unlock(&rdev->stats.lock);
-               *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+               *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
        }
        PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
             __func__, stag_state, type, pdid, stag_idx);
index 79b3dbc9717923bfdbc215e78e10fcf9feb43a06..96e5fccc5fd3b6e97ae4af4f7f318a669aa9f1d8 100644 (file)
@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                struct ib_atomic_eth *ateth;
                struct ipath_ack_entry *e;
                u64 vaddr;
-               atomic64_t *maddr;
+               atomic64_unchecked_t *maddr;
                u64 sdata;
                u32 rkey;
                u8 next;
@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                                            IB_ACCESS_REMOTE_ATOMIC)))
                        goto nack_acc_unlck;
                /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+               maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
                sdata = be64_to_cpu(ateth->swap_data);
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
                e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
+                       (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
                        (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
                                      be64_to_cpu(ateth->compare_data),
                                      sdata);
index 1f95bbaf7602289c79854a2919f966cce35e618a..9530f87aedccbaf4fae30590b042578f50859b61 100644 (file)
@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
        unsigned long flags;
        struct ib_wc wc;
        u64 sdata;
-       atomic64_t *maddr;
+       atomic64_unchecked_t *maddr;
        enum ib_wc_status send_status;
 
        /*
@@ -382,11 +382,11 @@ again:
                                            IB_ACCESS_REMOTE_ATOMIC)))
                        goto acc_err;
                /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+               maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
                sdata = wqe->wr.wr.atomic.compare_add;
                *(u64 *) sqp->s_sge.sge.vaddr =
                        (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
+                       (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
                        (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
                                      sdata, wqe->wr.wr.atomic.swap);
                goto send_comp;
index 82a7dd87089b66efa0e925116b4d72053c341a3f..8fb6ba6cd05896547ef0aa7b3fa1760bf19e8a2d 100644 (file)
@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
 
 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
 {
-       return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
+       return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
                cpu_to_be64(0xff00000000000000LL);
 }
 
index ed327e6c8fdca54baf19c3ded92d60776cc1adbf..ca1739e07a7578806a2909c9a75ba8b60272577f 100644 (file)
@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
 {
        char name[20];
 
-       atomic_set(&ctx->tid, 0);
+       atomic_set_unchecked(&ctx->tid, 0);
        sprintf(name, "mlx4_ib_mcg%d", ctx->port);
        ctx->mcg_wq = create_singlethread_workqueue(name);
        if (!ctx->mcg_wq)
index 6eb743f65f6f5633eb126ae5fe7ac768299fe77d..a7b0f6d3ffedfcf681f8eb402c03fab5242b3635 100644 (file)
@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
        struct list_head        mcg_mgid0_list;
        struct workqueue_struct *mcg_wq;
        struct mlx4_ib_demux_pv_ctx **tun;
-       atomic_t tid;
+       atomic_unchecked_t tid;
        int    flushing; /* flushing the work queue */
 };
 
index 9d3e5c1ac60e44ca5ba1014fa43fa1e6483694ec..6f166dfc4bd8824134b7c58ce013b2656ffa498a 100644 (file)
@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
        mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
 }
 
-int mthca_QUERY_FW(struct mthca_dev *dev)
+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
 {
        struct mthca_mailbox *mailbox;
        u32 *outbox;
@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                             CMD_TIME_CLASS_B);
 }
 
-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                    int num_mtt)
 {
        return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
                         0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
 }
 
-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                   int eq_num)
 {
        return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
                         CMD_TIME_CLASS_B);
 }
 
-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
                  int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
                  void *in_mad, void *response_mad)
 {
index ded76c101dde3d74a36c85fb9a71f90050969b55..0cf0a08c4c8936be169e8ef47bb7855f8e256a98 100644 (file)
@@ -692,7 +692,7 @@ err_close:
        return err;
 }
 
-static int mthca_setup_hca(struct mthca_dev *dev)
+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
 {
        int err;
 
index ed9a989e501b4f7fa4f42431b696c89ccd972776..6aa5dc2c679097a917aa3992e095ae52300e77f7 100644 (file)
@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
  * through the bitmaps)
  */
 
-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
 {
        int o;
        int m;
@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
                return key;
 }
 
-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
                   u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
 {
        struct mthca_mailbox *mailbox;
@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
        return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
 }
 
-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
                        u64 *buffer_list, int buffer_size_shift,
                        int list_len, u64 iova, u64 total_size,
                        u32 access, struct mthca_mr *mr)
index 415f8e1a54dbc82cf4ab81bf5ad98bd9034d0733..e34214e9a623f3e79dd298d0427fcaa416941133 100644 (file)
@@ -764,7 +764,7 @@ unlock:
        return 0;
 }
 
-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 {
        struct mthca_dev *dev = to_mdev(ibcq->device);
        struct mthca_cq *cq = to_mcq(ibcq);
index 3b2a6dc8ea99d734645a24cef66a78867f66a2cd..bce26ff89c0c9ad4795e3d486f7999aef7dc353c 100644 (file)
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
 LIST_HEAD(nes_adapter_list);
 static LIST_HEAD(nes_dev_list);
 
-atomic_t qps_destroyed;
+atomic_unchecked_t qps_destroyed;
 
 static unsigned int ee_flsh_adapter;
 static unsigned int sysfs_nonidx_addr;
@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
        struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
        struct nes_adapter *nesadapter = nesdev->nesadapter;
 
-       atomic_inc(&qps_destroyed);
+       atomic_inc_unchecked(&qps_destroyed);
 
        /* Free the control structures */
 
index bd9d132f11c7c48c6134ac13ccf7662c93cdd53c..70d84f412aa3082fd24dca185363b66fe0970eb9 100644 (file)
@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
 extern unsigned int wqm_quanta;
 extern struct list_head nes_adapter_list;
 
-extern atomic_t cm_connects;
-extern atomic_t cm_accepts;
-extern atomic_t cm_disconnects;
-extern atomic_t cm_closes;
-extern atomic_t cm_connecteds;
-extern atomic_t cm_connect_reqs;
-extern atomic_t cm_rejects;
-extern atomic_t mod_qp_timouts;
-extern atomic_t qps_created;
-extern atomic_t qps_destroyed;
-extern atomic_t sw_qps_destroyed;
+extern atomic_unchecked_t cm_connects;
+extern atomic_unchecked_t cm_accepts;
+extern atomic_unchecked_t cm_disconnects;
+extern atomic_unchecked_t cm_closes;
+extern atomic_unchecked_t cm_connecteds;
+extern atomic_unchecked_t cm_connect_reqs;
+extern atomic_unchecked_t cm_rejects;
+extern atomic_unchecked_t mod_qp_timouts;
+extern atomic_unchecked_t qps_created;
+extern atomic_unchecked_t qps_destroyed;
+extern atomic_unchecked_t sw_qps_destroyed;
 extern u32 mh_detected;
 extern u32 mh_pauses_sent;
 extern u32 cm_packets_sent;
@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
 extern u32 cm_packets_received;
 extern u32 cm_packets_dropped;
 extern u32 cm_packets_retrans;
-extern atomic_t cm_listens_created;
-extern atomic_t cm_listens_destroyed;
+extern atomic_unchecked_t cm_listens_created;
+extern atomic_unchecked_t cm_listens_destroyed;
 extern u32 cm_backlog_drops;
-extern atomic_t cm_loopbacks;
-extern atomic_t cm_nodes_created;
-extern atomic_t cm_nodes_destroyed;
-extern atomic_t cm_accel_dropped_pkts;
-extern atomic_t cm_resets_recvd;
-extern atomic_t pau_qps_created;
-extern atomic_t pau_qps_destroyed;
+extern atomic_unchecked_t cm_loopbacks;
+extern atomic_unchecked_t cm_nodes_created;
+extern atomic_unchecked_t cm_nodes_destroyed;
+extern atomic_unchecked_t cm_accel_dropped_pkts;
+extern atomic_unchecked_t cm_resets_recvd;
+extern atomic_unchecked_t pau_qps_created;
+extern atomic_unchecked_t pau_qps_destroyed;
 
 extern u32 int_mod_timer_init;
 extern u32 int_mod_cq_depth_256;
index 6f09a72e78d7d8ec9690413924e079af764aeaf3..cf4399daf5a4f99a64bab078e33ba73f5146adb3 100644 (file)
@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
 u32 cm_packets_retrans;
 u32 cm_packets_created;
 u32 cm_packets_received;
-atomic_t cm_listens_created;
-atomic_t cm_listens_destroyed;
+atomic_unchecked_t cm_listens_created;
+atomic_unchecked_t cm_listens_destroyed;
 u32 cm_backlog_drops;
-atomic_t cm_loopbacks;
-atomic_t cm_nodes_created;
-atomic_t cm_nodes_destroyed;
-atomic_t cm_accel_dropped_pkts;
-atomic_t cm_resets_recvd;
+atomic_unchecked_t cm_loopbacks;
+atomic_unchecked_t cm_nodes_created;
+atomic_unchecked_t cm_nodes_destroyed;
+atomic_unchecked_t cm_accel_dropped_pkts;
+atomic_unchecked_t cm_resets_recvd;
 
 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
 /* instance of function pointers for client API */
 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
 static struct nes_cm_ops nes_cm_api = {
-       mini_cm_accelerated,
-       mini_cm_listen,
-       mini_cm_del_listen,
-       mini_cm_connect,
-       mini_cm_close,
-       mini_cm_accept,
-       mini_cm_reject,
-       mini_cm_recv_pkt,
-       mini_cm_dealloc_core,
-       mini_cm_get,
-       mini_cm_set
+       .accelerated = mini_cm_accelerated,
+       .listen = mini_cm_listen,
+       .stop_listener = mini_cm_del_listen,
+       .connect = mini_cm_connect,
+       .close = mini_cm_close,
+       .accept = mini_cm_accept,
+       .reject = mini_cm_reject,
+       .recv_pkt = mini_cm_recv_pkt,
+       .destroy_cm_core = mini_cm_dealloc_core,
+       .get = mini_cm_get,
+       .set = mini_cm_set
 };
 
 static struct nes_cm_core *g_cm_core;
 
-atomic_t cm_connects;
-atomic_t cm_accepts;
-atomic_t cm_disconnects;
-atomic_t cm_closes;
-atomic_t cm_connecteds;
-atomic_t cm_connect_reqs;
-atomic_t cm_rejects;
+atomic_unchecked_t cm_connects;
+atomic_unchecked_t cm_accepts;
+atomic_unchecked_t cm_disconnects;
+atomic_unchecked_t cm_closes;
+atomic_unchecked_t cm_connecteds;
+atomic_unchecked_t cm_connect_reqs;
+atomic_unchecked_t cm_rejects;
 
 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
 {
@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
                kfree(listener);
                listener = NULL;
                ret = 0;
-               atomic_inc(&cm_listens_destroyed);
+               atomic_inc_unchecked(&cm_listens_destroyed);
        } else {
                spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
        }
@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
                  cm_node->rem_mac);
 
        add_hte_node(cm_core, cm_node);
-       atomic_inc(&cm_nodes_created);
+       atomic_inc_unchecked(&cm_nodes_created);
 
        return cm_node;
 }
@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
        }
 
        atomic_dec(&cm_core->node_cnt);
-       atomic_inc(&cm_nodes_destroyed);
+       atomic_inc_unchecked(&cm_nodes_destroyed);
        nesqp = cm_node->nesqp;
        if (nesqp) {
                nesqp->cm_node = NULL;
@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
 
 static void drop_packet(struct sk_buff *skb)
 {
-       atomic_inc(&cm_accel_dropped_pkts);
+       atomic_inc_unchecked(&cm_accel_dropped_pkts);
        dev_kfree_skb_any(skb);
 }
 
@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
 {
 
        int     reset = 0;      /* whether to send reset in case of err.. */
-       atomic_inc(&cm_resets_recvd);
+       atomic_inc_unchecked(&cm_resets_recvd);
        nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
                        " refcnt=%d\n", cm_node, cm_node->state,
                        atomic_read(&cm_node->ref_count));
@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
                                rem_ref_cm_node(cm_node->cm_core, cm_node);
                                return NULL;
                        }
-                       atomic_inc(&cm_loopbacks);
+                       atomic_inc_unchecked(&cm_loopbacks);
                        loopbackremotenode->loopbackpartner = cm_node;
                        loopbackremotenode->tcp_cntxt.rcv_wscale =
                                NES_CM_DEFAULT_RCV_WND_SCALE;
@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
                                nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
                        else {
                                rem_ref_cm_node(cm_core, cm_node);
-                               atomic_inc(&cm_accel_dropped_pkts);
+                               atomic_inc_unchecked(&cm_accel_dropped_pkts);
                                dev_kfree_skb_any(skb);
                        }
                        break;
@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
 
        if ((cm_id) && (cm_id->event_handler)) {
                if (issue_disconn) {
-                       atomic_inc(&cm_disconnects);
+                       atomic_inc_unchecked(&cm_disconnects);
                        cm_event.event = IW_CM_EVENT_DISCONNECT;
                        cm_event.status = disconn_status;
                        cm_event.local_addr = cm_id->local_addr;
@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                }
 
                if (issue_close) {
-                       atomic_inc(&cm_closes);
+                       atomic_inc_unchecked(&cm_closes);
                        nes_disconnect(nesqp, 1);
 
                        cm_id->provider_data = nesqp;
@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
                nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
-       atomic_inc(&cm_accepts);
+       atomic_inc_unchecked(&cm_accepts);
 
        nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
                        netdev_refcnt_read(nesvnic->netdev));
@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
        struct nes_cm_core *cm_core;
        u8 *start_buff;
 
-       atomic_inc(&cm_rejects);
+       atomic_inc_unchecked(&cm_rejects);
        cm_node = (struct nes_cm_node *)cm_id->provider_data;
        loopback = cm_node->loopbackpartner;
        cm_core = cm_node->cm_core;
@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                  ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
                  ntohs(laddr->sin_port));
 
-       atomic_inc(&cm_connects);
+       atomic_inc_unchecked(&cm_connects);
        nesqp->active_conn = 1;
 
        /* cache the cm_id in the qp */
@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
                        g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
                        return err;
                }
-               atomic_inc(&cm_listens_created);
+               atomic_inc_unchecked(&cm_listens_created);
        }
 
        cm_id->add_ref(cm_id);
@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
 
        if (nesqp->destroyed)
                return;
-       atomic_inc(&cm_connecteds);
+       atomic_inc_unchecked(&cm_connecteds);
        nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
                  " local port 0x%04X. jiffies = %lu.\n",
                  nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
 
        cm_id->add_ref(cm_id);
        ret = cm_id->event_handler(cm_id, &cm_event);
-       atomic_inc(&cm_closes);
+       atomic_inc_unchecked(&cm_closes);
        cm_event.event = IW_CM_EVENT_CLOSE;
        cm_event.status = 0;
        cm_event.provider_data = cm_id->provider_data;
@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
                return;
        cm_id = cm_node->cm_id;
 
-       atomic_inc(&cm_connect_reqs);
+       atomic_inc_unchecked(&cm_connect_reqs);
        nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
                  cm_node, cm_id, jiffies);
 
@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
                return;
        cm_id = cm_node->cm_id;
 
-       atomic_inc(&cm_connect_reqs);
+       atomic_inc_unchecked(&cm_connect_reqs);
        nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
                  cm_node, cm_id, jiffies);
 
index 416645259b0f84b6a444596e7c8acc85189d6c84..fc952c3519c7ce59fa3be5ed4da0b475777901a8 100644 (file)
@@ -40,8 +40,8 @@
 #include "nes.h"
 #include "nes_mgt.h"
 
-atomic_t pau_qps_created;
-atomic_t pau_qps_destroyed;
+atomic_unchecked_t pau_qps_created;
+atomic_unchecked_t pau_qps_destroyed;
 
 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
 {
@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
 {
        struct sk_buff *skb;
        unsigned long flags;
-       atomic_inc(&pau_qps_destroyed);
+       atomic_inc_unchecked(&pau_qps_destroyed);
 
        /* Free packets that have not yet been forwarded */
        /* Lock is acquired by skb_dequeue when removing the skb */
@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
                                        cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
                                skb_queue_head_init(&nesqp->pau_list);
                                spin_lock_init(&nesqp->pau_lock);
-                               atomic_inc(&pau_qps_created);
+                               atomic_inc_unchecked(&pau_qps_created);
                                nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
                        }
 
index 49eb5111d2cd0b96c2f8ce8e4260e1fbfd5497cd..a77436611999ee2a76c7d5b4265f2989e5ea23b9 100644 (file)
@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
        target_stat_values[++index] = mh_detected;
        target_stat_values[++index] = mh_pauses_sent;
        target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
-       target_stat_values[++index] = atomic_read(&cm_connects);
-       target_stat_values[++index] = atomic_read(&cm_accepts);
-       target_stat_values[++index] = atomic_read(&cm_disconnects);
-       target_stat_values[++index] = atomic_read(&cm_connecteds);
-       target_stat_values[++index] = atomic_read(&cm_connect_reqs);
-       target_stat_values[++index] = atomic_read(&cm_rejects);
-       target_stat_values[++index] = atomic_read(&mod_qp_timouts);
-       target_stat_values[++index] = atomic_read(&qps_created);
-       target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
-       target_stat_values[++index] = atomic_read(&qps_destroyed);
-       target_stat_values[++index] = atomic_read(&cm_closes);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
+       target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
+       target_stat_values[++index] = atomic_read_unchecked(&qps_created);
+       target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
+       target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
        target_stat_values[++index] = cm_packets_sent;
        target_stat_values[++index] = cm_packets_bounced;
        target_stat_values[++index] = cm_packets_created;
        target_stat_values[++index] = cm_packets_received;
        target_stat_values[++index] = cm_packets_dropped;
        target_stat_values[++index] = cm_packets_retrans;
-       target_stat_values[++index] = atomic_read(&cm_listens_created);
-       target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
        target_stat_values[++index] = cm_backlog_drops;
-       target_stat_values[++index] = atomic_read(&cm_loopbacks);
-       target_stat_values[++index] = atomic_read(&cm_nodes_created);
-       target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
-       target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
-       target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
+       target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
        target_stat_values[++index] = nesadapter->free_4kpbl;
        target_stat_values[++index] = nesadapter->free_256pbl;
        target_stat_values[++index] = int_mod_timer_init;
        target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
        target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
        target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-       target_stat_values[++index] = atomic_read(&pau_qps_created);
-       target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
+       target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
+       target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
 }
 
 /**
index c0d0296e7a003089dec6d8d180de2bb6dd05964d..3185f573891acbc6e272153a295d1a2cbab9cb31 100644 (file)
@@ -46,9 +46,9 @@
 
 #include <rdma/ib_umem.h>
 
-atomic_t mod_qp_timouts;
-atomic_t qps_created;
-atomic_t sw_qps_destroyed;
+atomic_unchecked_t mod_qp_timouts;
+atomic_unchecked_t qps_created;
+atomic_unchecked_t sw_qps_destroyed;
 
 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
 
@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
        if (init_attr->create_flags)
                return ERR_PTR(-EINVAL);
 
-       atomic_inc(&qps_created);
+       atomic_inc_unchecked(&qps_created);
        switch (init_attr->qp_type) {
                case IB_QPT_RC:
                        if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
        struct iw_cm_event cm_event;
        int ret = 0;
 
-       atomic_inc(&sw_qps_destroyed);
+       atomic_inc_unchecked(&sw_qps_destroyed);
        nesqp->destroyed = 1;
 
        /* Blow away the connection if it exists. */
index b218254ee41badaade5c7abfd2f94201478c5226..1d1aa3cf4a7cd3fbb5d6badf4deb0c5895c775a7 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/kref.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
+#include <linux/slab.h>
 
 #include "qib_common.h"
 #include "qib_verbs.h"
index cdc7df4fdb8aedea8c9026de82d774e5f5fe502b..a2fdfdbc1b536a594458cf6d10002d1d50936a33 100644 (file)
@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
                nla_total_size(2);      /* IFLA_IPOIB_UMCAST */
 }
 
-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+static struct rtnl_link_ops ipoib_link_ops = {
        .kind           = "ipoib",
        .maxtype        = IFLA_IPOIB_MAX,
        .policy         = ipoib_policy,
index e853a2134680d690c42e07230097a709c9fe1669..56fc5a8f35b261a544052fb11c0ee6e0a831ed41 100644 (file)
@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
  */
 static void gameport_init_port(struct gameport *gameport)
 {
-       static atomic_t gameport_no = ATOMIC_INIT(-1);
+       static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
 
        __module_get(THIS_MODULE);
 
        mutex_init(&gameport->drv_mutex);
        device_initialize(&gameport->dev);
        dev_set_name(&gameport->dev, "gameport%lu",
-                       (unsigned long)atomic_inc_return(&gameport_no));
+                       (unsigned long)atomic_inc_return_unchecked(&gameport_no));
        gameport->dev.bus = &gameport_bus;
        gameport->dev.release = gameport_release_port;
        if (gameport->parent)
index 213e3a1903ee1ddecf84a797ba19da6fa8d7b50c..4fea8376a571fbc4c2d5ff65bcaaff61d2eaaf6f 100644 (file)
@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
  */
 struct input_dev *input_allocate_device(void)
 {
-       static atomic_t input_no = ATOMIC_INIT(-1);
+       static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
        struct input_dev *dev;
 
        dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
                INIT_LIST_HEAD(&dev->node);
 
                dev_set_name(&dev->dev, "input%lu",
-                            (unsigned long)atomic_inc_return(&input_no));
+                            (unsigned long)atomic_inc_return_unchecked(&input_no));
 
                __module_get(THIS_MODULE);
        }
index 4a95b224169f33f32fbc106a97add8a5b62e141b..874c182af587ab9e896de70313f6e302efcec21d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/input.h>
 #include <linux/gameport.h>
 #include <linux/jiffies.h>
index 3aa2f3f3da5bcb2b3e8ad781b0fd3d07fd2c66e3..53c00ea740d2814a9f162d4f80b852268e4a6e2e 100644 (file)
@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
 
 static int xpad_led_probe(struct usb_xpad *xpad)
 {
-       static atomic_t led_seq = ATOMIC_INIT(-1);
+       static atomic_unchecked_t led_seq       = ATOMIC_INIT(-1);
        unsigned long led_no;
        struct xpad_led *led;
        struct led_classdev *led_cdev;
@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
        if (!led)
                return -ENOMEM;
 
-       led_no = atomic_inc_return(&led_seq);
+       led_no = atomic_inc_return_unchecked(&led_seq);
 
        snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
        led->xpad = xpad;
index ac1fa5f4458052a97d5e8793123ce6b54623a9fa..5f7502c774312aa879a083fcf65adfc00e33547e 100644 (file)
@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
 
 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
 {
-       static atomic_t device_no = ATOMIC_INIT(-1);
+       static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
 
        const struct ims_pcu_device_info *info;
        int error;
@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
        }
 
        /* Device appears to be operable, complete initialization */
-       pcu->device_no = atomic_inc_return(&device_no);
+       pcu->device_no = atomic_inc_return_unchecked(&device_no);
 
        /*
         * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
index f4cf664c7db3107f0f2503d21463257241dd0158..3204fdae9c98fa5f59b86b71aea0e830f527b7bc 100644 (file)
@@ -117,7 +117,7 @@ struct psmouse_attribute {
        ssize_t (*set)(struct psmouse *psmouse, void *data,
                        const char *buf, size_t count);
        bool protect;
-};
+} __do_const;
 #define to_psmouse_attr(a)     container_of((a), struct psmouse_attribute, dattr)
 
 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
index b604564dec5c9a5d8d154ac18a61c02341f77e5d..3f14ae4f52ff52be9e6a056dd0eee7e0eb9f0a5b 100644 (file)
@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
 
        spin_unlock_irq(&client->packet_lock);
 
-       if (copy_to_user(buffer, data, count))
+       if (count > sizeof(data) || copy_to_user(buffer, data, count))
                return -EFAULT;
 
        return count;
index a05a5179da32592b25b57525e5767cb42cdd4712..323a2fddfba0544fa4c07e73fd058d8a9ad999b1 100644 (file)
@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
  */
 static void serio_init_port(struct serio *serio)
 {
-       static atomic_t serio_no = ATOMIC_INIT(-1);
+       static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
 
        __module_get(THIS_MODULE);
 
@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
        mutex_init(&serio->drv_mutex);
        device_initialize(&serio->dev);
        dev_set_name(&serio->dev, "serio%lu",
-                    (unsigned long)atomic_inc_return(&serio_no));
+                    (unsigned long)atomic_inc_return_unchecked(&serio_no));
        serio->dev.bus = &serio_bus;
        serio->dev.release = serio_release_port;
        serio->dev.groups = serio_device_attr_groups;
index 71ef5d65a0c63a493e8252f2ba2639c33c711b13..93380a96a5a06df6f181b2f7457f6d1cc9936f1a 100644 (file)
@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
 
 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
 {
-       static atomic_t serio_raw_no = ATOMIC_INIT(-1);
+       static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
        struct serio_raw *serio_raw;
        int err;
 
@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
        }
 
        snprintf(serio_raw->name, sizeof(serio_raw->name),
-                "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
+                "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
        kref_init(&serio_raw->kref);
        INIT_LIST_HEAD(&serio_raw->client_list);
        init_waitqueue_head(&serio_raw->wait);
index 98024856df07fc89e744cb1d7b2356a72146de51..2e9941d833a2d3c382c730e32982388d6a3a9da1 100644 (file)
@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 
 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 {
+       phys_addr_t physaddr;
        WARN_ON(address & 0x7ULL);
 
        memset(cmd, 0, sizeof(*cmd));
-       cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
-       cmd->data[1] = upper_32_bits(__pa(address));
+
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       if (object_starts_on_stack((void *)address)) {
+               void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
+               physaddr = __pa((u64)adjbuf);
+       } else
+#endif
+       physaddr = __pa(address);
+
+       cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
+       cmd->data[1] = upper_32_bits(physaddr);
        cmd->data[2] = 1;
        CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 }
index 6cd47b75286f98d8cf8220378cca4aa32610f6dd..264d14af6f67ea56901fecaaa1c465b6343f83ba 100644 (file)
@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                cfg->irptndx = cfg->cbndx;
        }
 
-       ACCESS_ONCE(smmu_domain->smmu) = smmu;
+       ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
        arm_smmu_init_context_bank(smmu_domain);
        spin_unlock_irqrestore(&smmu_domain->lock, flags);
 
index f7718d73e98470bdbe139805860ddeb9e63722bf..3ef740b1b0f6f1d600d7639f4b6a7dac62f4e672 100644 (file)
@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
 {
        int err;
-       struct notifier_block *nb;
+       notifier_block_no_const *nb;
        struct iommu_callback_data cb = {
                .ops = ops,
        };
index 89c4846683be521a1b3fd968afcf9a031e69043a..1de796f614a5b84628c76323c5fc82c987197c3a 100644 (file)
@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
 void panic_if_irq_remap(const char *msg)
 {
        if (irq_remapping_enabled)
-               panic(msg);
+               panic("%s", msg);
 }
 
 static void ir_ack_apic_edge(struct irq_data *data)
@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
 
 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
 {
-       chip->irq_print_chip = ir_print_prefix;
-       chip->irq_ack = ir_ack_apic_edge;
-       chip->irq_eoi = ir_ack_apic_level;
-       chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+       pax_open_kernel();
+       *(void **)&chip->irq_print_chip = ir_print_prefix;
+       *(void **)&chip->irq_ack = ir_ack_apic_edge;
+       *(void **)&chip->irq_eoi = ir_ack_apic_level;
+       *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+       pax_close_kernel();
 }
 
 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
index d617ee5a3d8a839ddc1160ed00edda8e72f74dc4..df8be8b9876f74a04922ae8dae44f5a75b091608 100644 (file)
@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
  * Supported arch specific GIC irq extension.
  * Default make them NULL.
  */
-struct irq_chip gic_arch_extn = {
+irq_chip_no_const gic_arch_extn = {
        .irq_eoi        = NULL,
        .irq_mask       = NULL,
        .irq_unmask     = NULL,
@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static struct irq_chip gic_chip = {
+static irq_chip_no_const gic_chip __read_only = {
        .name                   = "GIC",
        .irq_mask               = gic_mask_irq,
        .irq_unmask             = gic_unmask_irq,
index 078cac5e2d0854e1f803db8d543312ddc6d8cf10..fb0f846b7886ed4e9d54bae8a39e64b4b4bec843 100644 (file)
@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
        struct intc_irqpin_iomem *i;
        struct resource *io[INTC_IRQPIN_REG_NR];
        struct resource *irq;
-       struct irq_chip *irq_chip;
+       irq_chip_no_const *irq_chip;
        void (*enable_fn)(struct irq_data *d);
        void (*disable_fn)(struct irq_data *d);
        const char *name = dev_name(dev);
index 384e6ed61d7c5e94a3081380b0bde6edc2d08129..7a771b2456675e9db15994560d8f076656f71bc0 100644 (file)
@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
        struct irqc_priv *p;
        struct resource *io;
        struct resource *irq;
-       struct irq_chip *irq_chip;
+       irq_chip_no_const *irq_chip;
        const char *name = dev_name(&pdev->dev);
        int ret;
        int k;
index 6a2df3297e77c92e4ca47d460ad41c9363425842..dc962f1db79c5a33462c1fedeec336d54abc405f 100644 (file)
@@ -81,8 +81,8 @@ struct capiminor {
 
        struct capi20_appl      *ap;
        u32                     ncci;
-       atomic_t                datahandle;
-       atomic_t                msgid;
+       atomic_unchecked_t      datahandle;
+       atomic_unchecked_t      msgid;
 
        struct tty_port port;
        int                ttyinstop;
@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
                capimsg_setu16(s, 2, mp->ap->applid);
                capimsg_setu8 (s, 4, CAPI_DATA_B3);
                capimsg_setu8 (s, 5, CAPI_RESP);
-               capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
+               capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
                capimsg_setu32(s, 8, mp->ncci);
                capimsg_setu16(s, 12, datahandle);
        }
@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
                mp->outbytes -= len;
                spin_unlock_bh(&mp->outlock);
 
-               datahandle = atomic_inc_return(&mp->datahandle);
+               datahandle = atomic_inc_return_unchecked(&mp->datahandle);
                skb_push(skb, CAPI_DATA_B3_REQ_LEN);
                memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
                capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
                capimsg_setu16(skb->data, 2, mp->ap->applid);
                capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
                capimsg_setu8 (skb->data, 5, CAPI_REQ);
-               capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
+               capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
                capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
                capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
                capimsg_setu16(skb->data, 16, len);     /* Data length */
index aecec6d3246370004a0e9434a1f58303e96b7c46..11e13c56126fba31fca9c59d66252ffeced55c8a 100644 (file)
@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
 
 
 static const struct gigaset_ops gigops = {
-       gigaset_write_cmd,
-       gigaset_write_room,
-       gigaset_chars_in_buffer,
-       gigaset_brkchars,
-       gigaset_init_bchannel,
-       gigaset_close_bchannel,
-       gigaset_initbcshw,
-       gigaset_freebcshw,
-       gigaset_reinitbcshw,
-       gigaset_initcshw,
-       gigaset_freecshw,
-       gigaset_set_modem_ctrl,
-       gigaset_baud_rate,
-       gigaset_set_line_ctrl,
-       gigaset_isoc_send_skb,
-       gigaset_isoc_input,
+       .write_cmd = gigaset_write_cmd,
+       .write_room = gigaset_write_room,
+       .chars_in_buffer = gigaset_chars_in_buffer,
+       .brkchars = gigaset_brkchars,
+       .init_bchannel = gigaset_init_bchannel,
+       .close_bchannel = gigaset_close_bchannel,
+       .initbcshw = gigaset_initbcshw,
+       .freebcshw = gigaset_freebcshw,
+       .reinitbcshw = gigaset_reinitbcshw,
+       .initcshw = gigaset_initcshw,
+       .freecshw = gigaset_freecshw,
+       .set_modem_ctrl = gigaset_set_modem_ctrl,
+       .baud_rate = gigaset_baud_rate,
+       .set_line_ctrl = gigaset_set_line_ctrl,
+       .send_skb = gigaset_isoc_send_skb,
+       .handle_input = gigaset_isoc_input,
 };
 
 /* bas_gigaset_init
index 600c79b030cdb68ddfb2fefd4cd53b6d10433511..3752bab3a55b84623804d02779e82d36d6163c13 100644 (file)
@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
        }
        tty->driver_data = cs;
 
-       ++cs->port.count;
+       atomic_inc(&cs->port.count);
 
-       if (cs->port.count == 1) {
+       if (atomic_read(&cs->port.count) == 1) {
                tty_port_tty_set(&cs->port, tty);
                cs->port.low_latency = 1;
        }
@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
 
        if (!cs->connected)
                gig_dbg(DEBUG_IF, "not connected");     /* nothing to do */
-       else if (!cs->port.count)
+       else if (!atomic_read(&cs->port.count))
                dev_warn(cs->dev, "%s: device not opened\n", __func__);
-       else if (!--cs->port.count)
+       else if (!atomic_dec_return(&cs->port.count))
                tty_port_tty_set(&cs->port, NULL);
 
        mutex_unlock(&cs->mutex);
index 8c91fd5eb6fdd4696f8b6d298eef8684525fc94e..14f13ce5feb38f838c8f6788f1a538cca751865a 100644 (file)
@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
 }
 
 static const struct gigaset_ops ops = {
-       gigaset_write_cmd,
-       gigaset_write_room,
-       gigaset_chars_in_buffer,
-       gigaset_brkchars,
-       gigaset_init_bchannel,
-       gigaset_close_bchannel,
-       gigaset_initbcshw,
-       gigaset_freebcshw,
-       gigaset_reinitbcshw,
-       gigaset_initcshw,
-       gigaset_freecshw,
-       gigaset_set_modem_ctrl,
-       gigaset_baud_rate,
-       gigaset_set_line_ctrl,
-       gigaset_m10x_send_skb,  /* asyncdata.c */
-       gigaset_m10x_input,     /* asyncdata.c */
+       .write_cmd = gigaset_write_cmd,
+       .write_room = gigaset_write_room,
+       .chars_in_buffer = gigaset_chars_in_buffer,
+       .brkchars = gigaset_brkchars,
+       .init_bchannel = gigaset_init_bchannel,
+       .close_bchannel = gigaset_close_bchannel,
+       .initbcshw = gigaset_initbcshw,
+       .freebcshw = gigaset_freebcshw,
+       .reinitbcshw = gigaset_reinitbcshw,
+       .initcshw = gigaset_initcshw,
+       .freecshw = gigaset_freecshw,
+       .set_modem_ctrl = gigaset_set_modem_ctrl,
+       .baud_rate = gigaset_baud_rate,
+       .set_line_ctrl = gigaset_set_line_ctrl,
+       .send_skb = gigaset_m10x_send_skb,      /* asyncdata.c */
+       .handle_input = gigaset_m10x_input,     /* asyncdata.c */
 };
 
 
index 5f306e2eece5e2a93c1dd86ac8eb29ab59a33e06..5342f88500791dcde1159f6b8c34997251357b74 100644 (file)
@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
        gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
        memcpy(cs->hw.usb->bchars, buf, 6);
        return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
-                              0, 0, &buf, 6, 2000);
+                              0, 0, buf, 6, 2000);
 }
 
 static void gigaset_freebcshw(struct bc_state *bcs)
@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
 }
 
 static const struct gigaset_ops ops = {
-       gigaset_write_cmd,
-       gigaset_write_room,
-       gigaset_chars_in_buffer,
-       gigaset_brkchars,
-       gigaset_init_bchannel,
-       gigaset_close_bchannel,
-       gigaset_initbcshw,
-       gigaset_freebcshw,
-       gigaset_reinitbcshw,
-       gigaset_initcshw,
-       gigaset_freecshw,
-       gigaset_set_modem_ctrl,
-       gigaset_baud_rate,
-       gigaset_set_line_ctrl,
-       gigaset_m10x_send_skb,
-       gigaset_m10x_input,
+       .write_cmd = gigaset_write_cmd,
+       .write_room = gigaset_write_room,
+       .chars_in_buffer = gigaset_chars_in_buffer,
+       .brkchars = gigaset_brkchars,
+       .init_bchannel = gigaset_init_bchannel,
+       .close_bchannel = gigaset_close_bchannel,
+       .initbcshw = gigaset_initbcshw,
+       .freebcshw = gigaset_freebcshw,
+       .reinitbcshw = gigaset_reinitbcshw,
+       .initcshw = gigaset_initcshw,
+       .freecshw = gigaset_freecshw,
+       .set_modem_ctrl = gigaset_set_modem_ctrl,
+       .baud_rate = gigaset_baud_rate,
+       .set_line_ctrl = gigaset_set_line_ctrl,
+       .send_skb = gigaset_m10x_send_skb,
+       .handle_input = gigaset_m10x_input,
 };
 
 /*
index 4d9b195547c5cc253b827c7c6d79d036612e43f6..455075c603bf870f5b7f8702aed443d4154cbaa8 100644 (file)
@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
        }
        if (left) {
                if (t4file->user) {
-                       if (copy_from_user(buf, dp, left))
+                       if (left > sizeof buf || copy_from_user(buf, dp, left))
                                return -EFAULT;
                } else {
                        memcpy(buf, dp, left);
@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
        }
        if (left) {
                if (config->user) {
-                       if (copy_from_user(buf, dp, left))
+                       if (left > sizeof buf || copy_from_user(buf, dp, left))
                                return -EFAULT;
                } else {
                        memcpy(buf, dp, left);
index 9b856e1890d1ebc4f7c82ebb6b045357abe5324b..fa03c92e80fda638aca44efa42607f09487c2ad4 100644 (file)
@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
                        } else
                                return -EINVAL;
                case IIOCDBGVAR:
+                       if (!capable(CAP_SYS_RAWIO))
+                               return -EPERM;
                        if (arg) {
                                if (copy_to_user(argp, &dev, sizeof(ulong)))
                                        return -EFAULT;
index 91d57304d4d312cdebc317fbe6320b21e232d892..336523ec077c9cbdbe8941f2e2e42596f7a74b73 100644 (file)
@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
 }
 
 struct concap_device_ops isdn_concap_reliable_dl_dops = {
-       &isdn_concap_dl_data_req,
-       &isdn_concap_dl_connect_req,
-       &isdn_concap_dl_disconn_req
+       .data_req = &isdn_concap_dl_data_req,
+       .connect_req = &isdn_concap_dl_connect_req,
+       .disconn_req = &isdn_concap_dl_disconn_req
 };
 
 /* The following should better go into a dedicated source file such that
index bc912611fe0974d7cb3c6759158210aafbf9ec02..2ef7e36fadcf7fd75b110c425059ffce789b10df 100644 (file)
@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
 
 #ifdef ISDN_DEBUG_MODEM_OPEN
        printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
-              port->count);
+              atomic_read(&port->count));
 #endif
-       port->count++;
+       atomic_inc(&port->count);
        port->tty = tty;
        /*
         * Start up serial port
@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
 #endif
                return;
        }
-       if ((tty->count == 1) && (port->count != 1)) {
+       if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
                /*
                 * Uh, oh.  tty->count is 1, which means that the tty
                 * structure will be freed.  Info->count should always
@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
                 * serial port won't be shutdown.
                 */
                printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
-                      "info->count is %d\n", port->count);
-               port->count = 1;
+                      "info->count is %d\n", atomic_read(&port->count));
+               atomic_set(&port->count, 1);
        }
-       if (--port->count < 0) {
+       if (atomic_dec_return(&port->count) < 0) {
                printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
-                      info->line, port->count);
-               port->count = 0;
+                      info->line, atomic_read(&port->count));
+               atomic_set(&port->count, 0);
        }
-       if (port->count) {
+       if (atomic_read(&port->count)) {
 #ifdef ISDN_DEBUG_MODEM_OPEN
                printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
 #endif
@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
        if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
                return;
        isdn_tty_shutdown(info);
-       port->count = 0;
+       atomic_set(&port->count, 0);
        port->flags &= ~ASYNC_NORMAL_ACTIVE;
        port->tty = NULL;
        wake_up_interruptible(&port->open_wait);
@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
        for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
                modem_info *info = &dev->mdm.info[i];
 
-               if (info->port.count == 0)
+               if (atomic_read(&info->port.count) == 0)
                        continue;
                if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) &&  /* SI1 is matching */
                    (info->emu.mdmreg[REG_SI2] == si2)) {         /* SI2 is matching */
index e2d4e58230f581c2f1ac7a86f89e44e8fe529f05..40cd045e8f78a737c075bb69cf8076cd37399790 100644 (file)
@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
 
 
 static struct concap_proto_ops ix25_pops = {
-       &isdn_x25iface_proto_new,
-       &isdn_x25iface_proto_del,
-       &isdn_x25iface_proto_restart,
-       &isdn_x25iface_proto_close,
-       &isdn_x25iface_xmit,
-       &isdn_x25iface_receive,
-       &isdn_x25iface_connect_ind,
-       &isdn_x25iface_disconn_ind
+       .proto_new = &isdn_x25iface_proto_new,
+       .proto_del = &isdn_x25iface_proto_del,
+       .restart = &isdn_x25iface_proto_restart,
+       .close = &isdn_x25iface_proto_close,
+       .encap_and_xmit = &isdn_x25iface_xmit,
+       .data_ind = &isdn_x25iface_receive,
+       .connect_ind = &isdn_x25iface_connect_ind,
+       .disconn_ind = &isdn_x25iface_disconn_ind
 };
 
 /* error message helper function */
index 6a7447c304acc39e6edc422bf8f6533acfde93b5..b4987ea42e63dee7fff096f59116011812123a65 100644 (file)
@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
                if (count > len)
                        count = len;
                if (user) {
-                       if (copy_from_user(msg, buf, count))
+                       if (count > sizeof msg || copy_from_user(msg, buf, count))
                                return -EFAULT;
                } else
                        memcpy(msg, buf, count);
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
        if (ints[0] > 1)
                membase = (unsigned long)ints[2];
        if (str && *str) {
-               strcpy(sid, str);
+               strlcpy(sid, str, sizeof(sid));
                icn_id = sid;
                if ((p = strchr(sid, ','))) {
                        *p++ = 0;
index 87f7dff20ff66b269537b166642690722fd4accc..7300125f7abe91bdf48e5bfcd73b9dbbad465af1 100644 (file)
@@ -1625,7 +1625,7 @@ unsigned long     dsp_spl_jiffies; /* calculate the next time to fire */
 static u16     dsp_count; /* last sample count */
 static int     dsp_count_valid; /* if we have last sample count */
 
-void
+void __intentional_overflow(-1)
 dsp_cmx_send(void *arg)
 {
        struct dsp_conf *conf;
index 0f9ed1ea0e891d90b3c44141b9d38ae20603c30c..2715d6f6fa582f4c255ac2b7a0091ae386e8966a 100644 (file)
@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
  * detected as working, but in reality it is not) as low as
  * possible.
  */
-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
        {
                .callback = clevo_mail_led_dmi_callback,
                .ident = "Clevo D410J",
index 046cb70087452ef8cea540b073ae0fa71ceeef94..6b20d391f9118f031cd668d017a8e5497d99ee88 100644 (file)
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
  * detected as working, but in reality it is not) as low as
  * possible.
  */
-static struct dmi_system_id nas_led_whitelist[] __initdata = {
+static struct dmi_system_id nas_led_whitelist[] __initconst = {
        {
                .callback = ss4200_led_dmi_callback,
                .ident = "Intel SS4200-E",
index 6590558d1d31c600b23c8d50f48b4e3b1c461326..a74c5dd06cdce35251031ad42cd223b5b427815a 100644 (file)
@@ -96,9 +96,17 @@ static __init int map_switcher(void)
         * The end address needs +1 because __get_vm_area allocates an
         * extra guard page, so we need space for that.
         */
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+                                    VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
+                                    + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
+#else
        switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
                                     VM_ALLOC, switcher_addr, switcher_addr
                                     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
+#endif
+
        if (!switcher_vma) {
                err = -ENOMEM;
                printk("lguest: could not map switcher pages high\n");
@@ -121,7 +129,7 @@ static __init int map_switcher(void)
         * Now the Switcher is mapped at the right address, we can't fail!
         * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
         */
-       memcpy(switcher_vma->addr, start_switcher_text,
+       memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
               end_switcher_text - start_switcher_text);
 
        printk(KERN_INFO "lguest: mapped switcher at %p\n",
index e8b55c3a617042e91c2936c5576b45b6387f9fc4..3514c3753179178c8f18d944287a24c66d862d4e 100644 (file)
@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
 /*:*/
 
 #ifdef CONFIG_X86_PAE
-static void release_pmd(pmd_t *spmd)
+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
 {
        /* If the entry's not present, there's nothing to release. */
        if (pmd_flags(*spmd) & _PAGE_PRESENT) {
index 922a1acbf652b9376e4b2dd39ba6be2859307594..9dd0c2acb97b6ff9f7c8911426bc93e52031e38c 100644 (file)
@@ -59,7 +59,7 @@ static struct {
 /* Offset from where switcher.S was compiled to where we've copied it */
 static unsigned long switcher_offset(void)
 {
-       return switcher_addr - (unsigned long)start_switcher_text;
+       return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
 }
 
 /* This cpu's struct lguest_pages (after the Switcher text page) */
@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
         * These copies are pretty cheap, so we do them unconditionally: */
        /* Save the current Host top-level page directory.
         */
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+       pages->state.host_cr3 = read_cr3();
+#else
        pages->state.host_cr3 = __pa(current->mm->pgd);
+#endif
+
        /*
         * Set up the Guest's page tables to see this CPU's pages (and no
         * other CPU's pages).
@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
         * compiled-in switcher code and the high-mapped copy we just made.
         */
        for (i = 0; i < IDT_ENTRIES; i++)
-               default_idt_entries[i] += switcher_offset();
+               default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
 
        /*
         * Set up the Switcher's per-cpu areas.
@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
         * it will be undisturbed when we switch.  To change %cs and jump we
         * need this structure to feed to Intel's "lcall" instruction.
         */
-       lguest_entry.offset = (long)switch_to_guest + switcher_offset();
+       lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
        lguest_entry.segment = LGUEST_CS;
 
        /*
index 40634b0db9f754fe822e63856d4a895cdca4100d..4f5855e7c0776fd5ace0d702b03724a205e607b6 100644 (file)
@@ -87,6 +87,7 @@
 #include <asm/page.h>
 #include <asm/segment.h>
 #include <asm/lguest.h>
+#include <asm/processor-flags.h>
 
 // We mark the start of the code to copy
 // It's placed in .text tho it's never run here
@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
        // Changes type when we load it: damn Intel!
        // For after we switch over our page tables
        // That entry will be read-only: we'd crash.
+
+#ifdef CONFIG_PAX_KERNEXEC
+       mov     %cr0, %edx
+       xor     $X86_CR0_WP, %edx
+       mov     %edx, %cr0
+#endif
+
        movl    $(GDT_ENTRY_TSS*8), %edx
        ltr     %dx
 
@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
        // Let's clear it again for our return.
        // The GDT descriptor of the Host
        // Points to the table after two "size" bytes
-       movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+       movl    (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
        // Clear "used" from type field (byte 5, bit 2)
-       andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
+       andb    $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
+
+#ifdef CONFIG_PAX_KERNEXEC
+       mov     %cr0, %eax
+       xor     $X86_CR0_WP, %eax
+       mov     %eax, %cr0
+#endif
 
        // Once our page table's switched, the Guest is live!
        // The Host fades as we run this final step.
@@ -295,13 +309,12 @@ deliver_to_host:
        // I consulted gcc, and it gave
        // These instructions, which I gladly credit:
        leal    (%edx,%ebx,8), %eax
-       movzwl  (%eax),%edx
-       movl    4(%eax), %eax
-       xorw    %ax, %ax
-       orl     %eax, %edx
+       movl    4(%eax), %edx
+       movw    (%eax), %dx
        // Now the address of the handler's in %edx
        // We call it now: its "iret" drops us home.
-       jmp     *%edx
+       ljmp    $__KERNEL_CS, $1f
+1:     jmp     *%edx
 
 // Every interrupt can come to us here
 // But we must truly tell each apart.
index a08e3eeac3c5fbf389ede37b839f40f705cd4d47..df8ade22327a4433d81711813a9e5e86a3db1a36 100644 (file)
@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
                                  struct workqueue_struct *wq)
 {
-       BUG_ON(object_is_on_stack(cl));
+       BUG_ON(object_starts_on_stack(cl));
        closure_set_ip(cl);
        cl->fn = fn;
        cl->wq = wq;
index 1695ee5f3ffc30b883c83c1926745ba22e48a7e1..89f18abda5aa3534aa983a76c8c1bbd36bc620f6 100644 (file)
@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
                   chunk_kb ? "KB" : "B");
        if (bitmap->storage.file) {
                seq_printf(seq, ", file: ");
-               seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
+               seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
        }
 
        seq_printf(seq, "\n");
index 73f791bb9ea4f06bd6c45da8356ba476a3ef76c3..8c5d3acc83bbd4bf9fed681e83b61bce6f6c7497 100644 (file)
@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
            cmd == DM_LIST_VERSIONS_CMD)
                return 0;
 
-       if ((cmd == DM_DEV_CREATE_CMD)) {
+       if (cmd == DM_DEV_CREATE_CMD) {
                if (!*param->name) {
                        DMWARN("name not supplied when creating device");
                        return -EINVAL;
index 089d62751f7ff2a3aedf7e441cb88bec0d06b8a7..ef7352e69ca02c117cc94f548b83861aab5da683 100644 (file)
@@ -40,7 +40,7 @@ enum dm_raid1_error {
 
 struct mirror {
        struct mirror_set *ms;
-       atomic_t error_count;
+       atomic_unchecked_t error_count;
        unsigned long error_type;
        struct dm_dev *dev;
        sector_t offset;
@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
        struct mirror *m;
 
        for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
-               if (!atomic_read(&m->error_count))
+               if (!atomic_read_unchecked(&m->error_count))
                        return m;
 
        return NULL;
@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
         * simple way to tell if a device has encountered
         * errors.
         */
-       atomic_inc(&m->error_count);
+       atomic_inc_unchecked(&m->error_count);
 
        if (test_and_set_bit(error_type, &m->error_type))
                return;
@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
        struct mirror *m = get_default_mirror(ms);
 
        do {
-               if (likely(!atomic_read(&m->error_count)))
+               if (likely(!atomic_read_unchecked(&m->error_count)))
                        return m;
 
                if (m-- == ms->mirror)
@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
 {
        struct mirror *default_mirror = get_default_mirror(m->ms);
 
-       return !atomic_read(&default_mirror->error_count);
+       return !atomic_read_unchecked(&default_mirror->error_count);
 }
 
 static int mirror_available(struct mirror_set *ms, struct bio *bio)
@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
                 */
                if (likely(region_in_sync(ms, region, 1)))
                        m = choose_mirror(ms, bio->bi_iter.bi_sector);
-               else if (m && atomic_read(&m->error_count))
+               else if (m && atomic_read_unchecked(&m->error_count))
                        m = NULL;
 
                if (likely(m))
@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
        }
 
        ms->mirror[mirror].ms = ms;
-       atomic_set(&(ms->mirror[mirror].error_count), 0);
+       atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
        ms->mirror[mirror].error_type = 0;
        ms->mirror[mirror].offset = offset;
 
@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
  */
 static char device_status_char(struct mirror *m)
 {
-       if (!atomic_read(&(m->error_count)))
+       if (!atomic_read_unchecked(&(m->error_count)))
                return 'A';
 
        return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
index f478a4c96d2f55cb2cf75abe50bfc2ce365c2a8e..4b8e5ef66e63758c6066022cec5c6308d499bdb5 100644 (file)
@@ -382,7 +382,7 @@ do_sync_free:
                synchronize_rcu_expedited();
                dm_stat_free(&s->rcu_head);
        } else {
-               ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+               ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
                call_rcu(&s->rcu_head, dm_stat_free);
        }
        return 0;
@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
                                       ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
                                        (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
                                       ));
-               ACCESS_ONCE(last->last_sector) = end_sector;
-               ACCESS_ONCE(last->last_rw) = bi_rw;
+               ACCESS_ONCE_RW(last->last_sector) = end_sector;
+               ACCESS_ONCE_RW(last->last_rw) = bi_rw;
        }
 
        rcu_read_lock();
index f8b37d4c05d8c301658a42c116110d1b63fc9323..5c5cafd5b03b1fb2c34ae36721aa33deafcfa254 100644 (file)
@@ -21,7 +21,7 @@ struct stripe {
        struct dm_dev *dev;
        sector_t physical_start;
 
-       atomic_t error_count;
+       atomic_unchecked_t error_count;
 };
 
 struct stripe_c {
@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                        kfree(sc);
                        return r;
                }
-               atomic_set(&(sc->stripe[i].error_count), 0);
+               atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
        }
 
        ti->private = sc;
@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
                DMEMIT("%d ", sc->stripes);
                for (i = 0; i < sc->stripes; i++)  {
                        DMEMIT("%s ", sc->stripe[i].dev->name);
-                       buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
+                       buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
                                'D' : 'A';
                }
                buffer[i] = '\0';
@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
         */
        for (i = 0; i < sc->stripes; i++)
                if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
-                       atomic_inc(&(sc->stripe[i].error_count));
-                       if (atomic_read(&(sc->stripe[i].error_count)) <
+                       atomic_inc_unchecked(&(sc->stripe[i].error_count));
+                       if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
                            DM_IO_ERROR_THRESHOLD)
                                schedule_work(&sc->trigger_event);
                }
index 3afae9e062f842687855fb11ec9b0cca3b1c8580..4e1c9548b2b2abfb58680243ca1b2062f3de9a5a 100644 (file)
@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
        if (!dev_size)
                return 0;
 
-       if ((start >= dev_size) || (start + len > dev_size)) {
+       if ((start >= dev_size) || (len > dev_size - start)) {
                DMWARN("%s: %s too small for target: "
                       "start=%llu, len=%llu, dev_size=%llu",
                       dm_device_name(ti->table->md), bdevname(bdev, b),
index 43adbb863f5a9e5426dc0641f8c12ba696b09e10..7b34305140895b92844ffad8f57755dd46993cff 100644 (file)
@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
 {
        pmd->info.tm = pmd->tm;
        pmd->info.levels = 2;
-       pmd->info.value_type.context = pmd->data_sm;
+       pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
        pmd->info.value_type.size = sizeof(__le64);
        pmd->info.value_type.inc = data_block_inc;
        pmd->info.value_type.dec = data_block_dec;
@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
 
        pmd->bl_info.tm = pmd->tm;
        pmd->bl_info.levels = 1;
-       pmd->bl_info.value_type.context = pmd->data_sm;
+       pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
        pmd->bl_info.value_type.size = sizeof(__le64);
        pmd->bl_info.value_type.inc = data_block_inc;
        pmd->bl_info.value_type.dec = data_block_dec;
index 64b10e006f9c6334729a49cab028b616cfc89504..07db8f47b144a61f9ccac96250cf8e2d56bfb163 100644 (file)
@@ -185,9 +185,9 @@ struct mapped_device {
        /*
         * Event handling.
         */
-       atomic_t event_nr;
+       atomic_unchecked_t event_nr;
        wait_queue_head_t eventq;
-       atomic_t uevent_seq;
+       atomic_unchecked_t uevent_seq;
        struct list_head uevent_list;
        spinlock_t uevent_lock; /* Protect access to uevent_list */
 
@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
        spin_lock_init(&md->deferred_lock);
        atomic_set(&md->holders, 1);
        atomic_set(&md->open_count, 0);
-       atomic_set(&md->event_nr, 0);
-       atomic_set(&md->uevent_seq, 0);
+       atomic_set_unchecked(&md->event_nr, 0);
+       atomic_set_unchecked(&md->uevent_seq, 0);
        INIT_LIST_HEAD(&md->uevent_list);
        INIT_LIST_HEAD(&md->table_devices);
        spin_lock_init(&md->uevent_lock);
@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
 
        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
 
-       atomic_inc(&md->event_nr);
+       atomic_inc_unchecked(&md->event_nr);
        wake_up(&md->eventq);
 }
 
@@ -3034,18 +3034,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
 {
-       return atomic_add_return(1, &md->uevent_seq);
+       return atomic_add_return_unchecked(1, &md->uevent_seq);
 }
 
 uint32_t dm_get_event_nr(struct mapped_device *md)
 {
-       return atomic_read(&md->event_nr);
+       return atomic_read_unchecked(&md->event_nr);
 }
 
 int dm_wait_event(struct mapped_device *md, int event_nr)
 {
        return wait_event_interruptible(md->eventq,
-                       (event_nr != atomic_read(&md->event_nr)));
+                       (event_nr != atomic_read_unchecked(&md->event_nr)));
 }
 
 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
index 709755fb6d7b62823d2291c99397ca0111adcbaf..5bc3fa480f24e59adb33b3a48147a0ac3821c0af 100644 (file)
@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
  *  start build, activate spare
  */
 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
-static atomic_t md_event_count;
+static atomic_unchecked_t md_event_count;
 void md_new_event(struct mddev *mddev)
 {
-       atomic_inc(&md_event_count);
+       atomic_inc_unchecked(&md_event_count);
        wake_up(&md_event_waiters);
 }
 EXPORT_SYMBOL_GPL(md_new_event);
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
  */
 static void md_new_event_inintr(struct mddev *mddev)
 {
-       atomic_inc(&md_event_count);
+       atomic_inc_unchecked(&md_event_count);
        wake_up(&md_event_waiters);
 }
 
@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
        if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
            (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
                rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
-       atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+       atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
 
        rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
        bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
        else
                sb->resync_offset = cpu_to_le64(0);
 
-       sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
+       sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
 
        sb->raid_disks = cpu_to_le32(mddev->raid_disks);
        sb->size = cpu_to_le64(mddev->dev_sectors);
@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
 static ssize_t
 errors_show(struct md_rdev *rdev, char *page)
 {
-       return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
+       return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
 }
 
 static ssize_t
@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
        char *e;
        unsigned long n = simple_strtoul(buf, &e, 10);
        if (*buf && (*e == 0 || *e == '\n')) {
-               atomic_set(&rdev->corrected_errors, n);
+               atomic_set_unchecked(&rdev->corrected_errors, n);
                return len;
        }
        return -EINVAL;
@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
        rdev->sb_loaded = 0;
        rdev->bb_page = NULL;
        atomic_set(&rdev->nr_pending, 0);
-       atomic_set(&rdev->read_errors, 0);
-       atomic_set(&rdev->corrected_errors, 0);
+       atomic_set_unchecked(&rdev->read_errors, 0);
+       atomic_set_unchecked(&rdev->corrected_errors, 0);
 
        INIT_LIST_HEAD(&rdev->same_set);
        init_waitqueue_head(&rdev->blocked_wait);
@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
 
                spin_unlock(&pers_lock);
                seq_printf(seq, "\n");
-               seq->poll_event = atomic_read(&md_event_count);
+               seq->poll_event = atomic_read_unchecked(&md_event_count);
                return 0;
        }
        if (v == (void*)2) {
@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
                return error;
 
        seq = file->private_data;
-       seq->poll_event = atomic_read(&md_event_count);
+       seq->poll_event = atomic_read_unchecked(&md_event_count);
        return error;
 }
 
@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
        /* always allow read */
        mask = POLLIN | POLLRDNORM;
 
-       if (seq->poll_event != atomic_read(&md_event_count))
+       if (seq->poll_event != atomic_read_unchecked(&md_event_count))
                mask |= POLLERR | POLLPRI;
        return mask;
 }
@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
                struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
                curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
                              (int)part_stat_read(&disk->part0, sectors[1]) -
-                             atomic_read(&disk->sync_io);
+                             atomic_read_unchecked(&disk->sync_io);
                /* sync IO will cause sync_io to increase before the disk_stats
                 * as sync_io is counted when a request starts, and
                 * disk_stats is counted when it completes.
index 03cec5bdcaae751378f8ac2ad5a470c60219d2c7..0a658c1e570fbb8573be37a8b2644c583b059317 100644 (file)
@@ -94,13 +94,13 @@ struct md_rdev {
                                         * only maintained for arrays that
                                         * support hot removal
                                         */
-       atomic_t        read_errors;    /* number of consecutive read errors that
+       atomic_unchecked_t      read_errors;    /* number of consecutive read errors that
                                         * we have tried to ignore.
                                         */
        struct timespec last_read_error;        /* monotonic time since our
                                                 * last read error
                                                 */
-       atomic_t        corrected_errors; /* number of corrected read errors,
+       atomic_unchecked_t      corrected_errors; /* number of corrected read errors,
                                           * for reporting to userspace and storing
                                           * in superblock.
                                           */
@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
 
 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
 {
-       atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+       atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
 }
 
 struct md_personality
index e8a9042988871c2fcfb12b8a85c20b8c8a4629c3..35bd1452fea8ad262bf43fa0ab07dbf61c8756fc 100644 (file)
@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
         * Flick into a mode where all blocks get allocated in the new area.
         */
        smm->begin = old_len;
-       memcpy(sm, &bootstrap_ops, sizeof(*sm));
+       memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
 
        /*
         * Extend.
@@ -714,7 +714,7 @@ out:
        /*
         * Switch back to normal behaviour.
         */
-       memcpy(sm, &ops, sizeof(*sm));
+       memcpy((void *)sm, &ops, sizeof(*sm));
        return r;
 }
 
index 3e6d1153b7c4b898b5a1355ad8bcc43cac2de4f4..ffecdeb19ee756ac3c73a9f990fb488bf4916f00 100644 (file)
@@ -71,6 +71,7 @@ struct dm_space_map {
                                           dm_sm_threshold_fn fn,
                                           void *context);
 };
+typedef struct dm_space_map __no_const dm_space_map_no_const;
 
 /*----------------------------------------------------------------*/
 
index 2f2f38f4d83c82b3089e49a867a75609dff3779c..f6a8ebead5892c2edaab02d6593b739a3b6b36f0 100644 (file)
@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                        if (r1_sync_page_io(rdev, sect, s,
                                            bio->bi_io_vec[idx].bv_page,
                                            READ) != 0)
-                               atomic_add(s, &rdev->corrected_errors);
+                               atomic_add_unchecked(s, &rdev->corrected_errors);
                }
                sectors -= s;
                sect += s;
@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
                            !test_bit(Faulty, &rdev->flags)) {
                                if (r1_sync_page_io(rdev, sect, s,
                                                    conf->tmppage, READ)) {
-                                       atomic_add(s, &rdev->corrected_errors);
+                                       atomic_add_unchecked(s, &rdev->corrected_errors);
                                        printk(KERN_INFO
                                               "md/raid1:%s: read error corrected "
                                               "(%d sectors at %llu on %s)\n",
index 32e282f4c83c3aa2bfe7e911327b4172cf8dc22d..5cec80342845990cc8137d8816418ba415f832fb 100644 (file)
@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
                /* The write handler will notice the lack of
                 * R10BIO_Uptodate and record any errors etc
                 */
-               atomic_add(r10_bio->sectors,
+               atomic_add_unchecked(r10_bio->sectors,
                           &conf->mirrors[d].rdev->corrected_errors);
 
        /* for reconstruct, we always reschedule after a read.
@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
 {
        struct timespec cur_time_mon;
        unsigned long hours_since_last;
-       unsigned int read_errors = atomic_read(&rdev->read_errors);
+       unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
 
        ktime_get_ts(&cur_time_mon);
 
@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
         * overflowing the shift of read_errors by hours_since_last.
         */
        if (hours_since_last >= 8 * sizeof(read_errors))
-               atomic_set(&rdev->read_errors, 0);
+               atomic_set_unchecked(&rdev->read_errors, 0);
        else
-               atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+               atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
 }
 
 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                return;
 
        check_decay_read_errors(mddev, rdev);
-       atomic_inc(&rdev->read_errors);
-       if (atomic_read(&rdev->read_errors) > max_read_errors) {
+       atomic_inc_unchecked(&rdev->read_errors);
+       if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
                char b[BDEVNAME_SIZE];
                bdevname(rdev->bdev, b);
 
@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                       "md/raid10:%s: %s: Raid device exceeded "
                       "read_error threshold [cur %d:max %d]\n",
                       mdname(mddev), b,
-                      atomic_read(&rdev->read_errors), max_read_errors);
+                      atomic_read_unchecked(&rdev->read_errors), max_read_errors);
                printk(KERN_NOTICE
                       "md/raid10:%s: %s: Failing raid device\n",
                       mdname(mddev), b);
@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                               sect +
                                               choose_data_offset(r10_bio, rdev)),
                                       bdevname(rdev->bdev, b));
-                               atomic_add(s, &rdev->corrected_errors);
+                               atomic_add_unchecked(s, &rdev->corrected_errors);
                        }
 
                        rdev_dec_pending(rdev, mddev);
index 8577cc7db47ef60ab4a550ca4a723c280e7573a1..e80e05d6d7c6fdac567574f2f3250836f31ef4fa 100644 (file)
@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
        return 1;
 }
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
+#endif
+
 static int grow_stripes(struct r5conf *conf, int num)
 {
        struct kmem_cache *sc;
@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
                        "raid%d-%s", conf->level, mdname(conf->mddev));
        else
                sprintf(conf->cache_name[0],
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+                       "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
+#else
                        "raid%d-%p", conf->level, conf->mddev);
+#endif
        sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
 
        conf->active_name = 0;
@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev), STRIPE_SECTORS,
                                (unsigned long long)s,
                                bdevname(rdev->bdev, b));
-                       atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
+                       atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
                } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
                        clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 
-               if (atomic_read(&rdev->read_errors))
-                       atomic_set(&rdev->read_errors, 0);
+               if (atomic_read_unchecked(&rdev->read_errors))
+                       atomic_set_unchecked(&rdev->read_errors, 0);
        } else {
                const char *bdn = bdevname(rdev->bdev, b);
                int retry = 0;
                int set_bad = 0;
 
                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-               atomic_inc(&rdev->read_errors);
+               atomic_inc_unchecked(&rdev->read_errors);
                if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
                        printk_ratelimited(
                                KERN_WARNING
@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
                                mdname(conf->mddev),
                                (unsigned long long)s,
                                bdn);
-               } else if (atomic_read(&rdev->read_errors)
+               } else if (atomic_read_unchecked(&rdev->read_errors)
                         > conf->max_nr_stripes)
                        printk(KERN_WARNING
                               "md/raid:%s: Too many read errors, failing device %s.\n",
index 983db75de3503952545f88700858f7a24131a9f9..ef9248c18d850d12d99b73e4698cda943d84cce9 100644 (file)
@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
                        const struct dvb_device *template, void *priv, int type)
 {
        struct dvb_device *dvbdev;
-       struct file_operations *dvbdevfops;
+       file_operations_no_const *dvbdevfops;
        struct device *clsdev;
        int minor;
        int id;
index 6ad22b69a63605e6b14c5516614fc40640e9085e..6e90e2af98647d1be760e8abfaf8a837049d2549 100644 (file)
@@ -96,6 +96,6 @@ struct af9033_ops {
        int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
        int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
                          int onoff);
-};
+} __no_const;
 
 #endif /* AF9033_H */
index 9b6c3bbc983af2c6ce26cb9970db9ec6df6d8002..baeb5c74a7e352a37685d8076590412d725e3a2f 100644 (file)
@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
        int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
        int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
        int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
-};
+} __no_const;
 
 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
index 1fea0e97265446fbd38b811b95d1601c840f9d82..321ce8f63ed84578303683f4bc813742e8d613ee 100644 (file)
@@ -64,7 +64,7 @@ struct dib7000p_ops {
        int (*get_adc_power)(struct dvb_frontend *fe);
        int (*slave_reset)(struct dvb_frontend *fe);
        struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
-};
+} __no_const;
 
 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
 void *dib7000p_attach(struct dib7000p_ops *ops);
index 84cc10383dcd226efd764a834fc8ac083f64c853..5780c546f154bf0e4e738bb8e1df13c3cd83a341 100644 (file)
@@ -61,7 +61,7 @@ struct dib8000_ops {
        int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
        int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
        struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
-};
+} __no_const;
 
 #if IS_ENABLED(CONFIG_DVB_DIB8000)
 void *dib8000_attach(struct dib8000_ops *ops);
index 860c98fc72c7f95f12763fc2c0edfaacb8a76890..497fa2506689a3dd942f1b0c2d108ac5b3e4c751 100644 (file)
@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
 
 /* ------------------------------------------------------------------ */
 
-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
 
 module_param_array(video_nr, int, NULL, 0444);
 module_param_array(vbi_nr,   int, NULL, 0444);
index 802642d266438ffaaf6f136a30e141ffb452810e..55349009a3f28981c6e66ecdf81fa3a383814f06 100644 (file)
@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
 
 /* ivtv instance counter */
-static atomic_t ivtv_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
 
 /* Parameter declarations */
 static int cardtype[IVTV_MAX_CARDS];
index 8cbe6b49f4c238de365b0a231eef8f4dff2425ff..ea3601cce98c4f534c160485abef111e5f233ce9 100644 (file)
@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
 
 static int solo_sysfs_init(struct solo_dev *solo_dev)
 {
-       struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
+       bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
        struct device *dev = &solo_dev->dev;
        const char *driver;
        int i;
index c7141f2e63bdb228b5243e8e926cfb0b181700e0..5301fecf68005f2969945314d57c1c8500a9d49c 100644 (file)
@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
 
 int solo_g723_init(struct solo_dev *solo_dev)
 {
-       static struct snd_device_ops ops = { NULL };
+       static struct snd_device_ops ops = { };
        struct snd_card *card;
        struct snd_kcontrol_new kctl;
        char name[32];
index 8c8484674d2f6cc7d1ff60539ec960df7e133f3c..27b4f83d50ff5057d2c8b04f316ba7e026ed1173 100644 (file)
@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
 
        /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
        if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
-               p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
+               p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
                if (p2m_id < 0)
                        p2m_id = -p2m_id;
        }
index bd8edfa319b80e5b878dc2c2611075b858b0da28..e82ed85cd3ce0cdefa5d023ba7b2a471effd8c01 100644 (file)
@@ -220,7 +220,7 @@ struct solo_dev {
 
        /* P2M DMA Engine */
        struct solo_p2m_dev     p2m_dev[SOLO_NR_P2M];
-       atomic_t                p2m_count;
+       atomic_unchecked_t      p2m_count;
        int                     p2m_jiffies;
        unsigned int            p2m_timeouts;
 
index c135165a8b26d3df257195c84eb7938db812c673..dc69499653769963b10a6a5928818381da25c356 100644 (file)
@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
 module_param_array(card, int, NULL, 0444);
 MODULE_PARM_DESC(card, "card type");
 
-static atomic_t tw68_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
 
 /* ------------------------------------------------------------------ */
 
index ba2d8f973d580aca85e4195dd9d9eb68cf86568e..1566684c7d404a3bedd656e66caf881d9cdb786d 100644 (file)
@@ -63,7 +63,6 @@ enum omap_vout_channels {
        OMAP_VIDEO2,
 };
 
-static struct videobuf_queue_ops video_vbq_ops;
 /* Variables configurable through module params*/
 static u32 video1_numbuffers = 3;
 static u32 video2_numbuffers = 3;
@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
 {
        struct videobuf_queue *q;
        struct omap_vout_device *vout = NULL;
+       static struct videobuf_queue_ops video_vbq_ops = {
+               .buf_setup = omap_vout_buffer_setup,
+               .buf_prepare = omap_vout_buffer_prepare,
+               .buf_release = omap_vout_buffer_release,
+               .buf_queue = omap_vout_buffer_queue,
+       };
 
        vout = video_drvdata(file);
        v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
        vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
 
        q = &vout->vbq;
-       video_vbq_ops.buf_setup = omap_vout_buffer_setup;
-       video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
-       video_vbq_ops.buf_release = omap_vout_buffer_release;
-       video_vbq_ops.buf_queue = omap_vout_buffer_queue;
        spin_lock_init(&vout->vbq_lock);
 
        videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
index fb2acc53112a47201be7ec785057cdb491eabffc..a2fcbdc4122570a09c5fd35a8f44b96aeef70729 100644 (file)
@@ -156,7 +156,7 @@ struct mxr_layer {
        /** layer index (unique identifier) */
        int idx;
        /** callbacks for layer methods */
-       struct mxr_layer_ops ops;
+       struct mxr_layer_ops *ops;
        /** format array */
        const struct mxr_format **fmt_array;
        /** size of format array */
index 74344c764daad5ebe4fa19011a83d70f5f8751e1..a39e70e2baa8f0c312bcfd8a1a9e233c9f651f6c 100644 (file)
@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
 {
        struct mxr_layer *layer;
        int ret;
-       struct mxr_layer_ops ops = {
+       static struct mxr_layer_ops ops = {
                .release = mxr_graph_layer_release,
                .buffer_set = mxr_graph_buffer_set,
                .stream_set = mxr_graph_stream_set,
index b713403024ef996365bd420d7af9fc7470001d8a..53cb5adcc8be87f74ba567d76b693c9fadb7fddb 100644 (file)
@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
                layer->update_buf = next;
        }
 
-       layer->ops.buffer_set(layer, layer->update_buf);
+       layer->ops->buffer_set(layer, layer->update_buf);
 
        if (done && done != layer->shadow_buf)
                vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
index b4d2696501e409cfd765091b68ce0a017cbfdcca..91df48e910c6b669672467f6e6ef2312d56a8110 100644 (file)
@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
        layer->geo.src.height = layer->geo.src.full_height;
 
        mxr_geometry_dump(mdev, &layer->geo);
-       layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
        mxr_geometry_dump(mdev, &layer->geo);
 }
 
@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
        layer->geo.dst.full_width = mbus_fmt.width;
        layer->geo.dst.full_height = mbus_fmt.height;
        layer->geo.dst.field = mbus_fmt.field;
-       layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+       layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
 
        mxr_geometry_dump(mdev, &layer->geo);
 }
@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
        /* set source size to highest accepted value */
        geo->src.full_width = max(geo->dst.full_width, pix->width);
        geo->src.full_height = max(geo->dst.full_height, pix->height);
-       layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
        mxr_geometry_dump(mdev, &layer->geo);
        /* set cropping to total visible screen */
        geo->src.width = pix->width;
@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
        geo->src.x_offset = 0;
        geo->src.y_offset = 0;
        /* assure consistency of geometry */
-       layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+       layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
        mxr_geometry_dump(mdev, &layer->geo);
        /* set full size to lowest possible value */
        geo->src.full_width = 0;
        geo->src.full_height = 0;
-       layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+       layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
        mxr_geometry_dump(mdev, &layer->geo);
 
        /* returning results */
@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
                target->width = s->r.width;
                target->height = s->r.height;
 
-               layer->ops.fix_geometry(layer, stage, s->flags);
+               layer->ops->fix_geometry(layer, stage, s->flags);
 
                /* retrieve update selection rectangle */
                res.left = target->x_offset;
@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
        mxr_output_get(mdev);
 
        mxr_layer_update_output(layer);
-       layer->ops.format_set(layer);
+       layer->ops->format_set(layer);
        /* enabling layer in hardware */
        spin_lock_irqsave(&layer->enq_slock, flags);
        layer->state = MXR_LAYER_STREAMING;
        spin_unlock_irqrestore(&layer->enq_slock, flags);
 
-       layer->ops.stream_set(layer, MXR_ENABLE);
+       layer->ops->stream_set(layer, MXR_ENABLE);
        mxr_streamer_get(mdev);
 
        return 0;
@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
        spin_unlock_irqrestore(&layer->enq_slock, flags);
 
        /* disabling layer in hardware */
-       layer->ops.stream_set(layer, MXR_DISABLE);
+       layer->ops->stream_set(layer, MXR_DISABLE);
        /* remove one streamer */
        mxr_streamer_put(mdev);
        /* allow changes in output configuration */
@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
 
 void mxr_layer_release(struct mxr_layer *layer)
 {
-       if (layer->ops.release)
-               layer->ops.release(layer);
+       if (layer->ops->release)
+               layer->ops->release(layer);
 }
 
 void mxr_base_layer_release(struct mxr_layer *layer)
@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
 
        layer->mdev = mdev;
        layer->idx = idx;
-       layer->ops = *ops;
+       layer->ops = ops;
 
        spin_lock_init(&layer->enq_slock);
        INIT_LIST_HEAD(&layer->enq_list);
index c9388c45ad757b48d42c7b45f7b603d03fdd0b12..ce71ece866230daa5dbdbf10432edef646fbcc5f 100644 (file)
@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
 {
        struct mxr_layer *layer;
        int ret;
-       struct mxr_layer_ops ops = {
+       static struct mxr_layer_ops ops = {
                .release = mxr_vp_layer_release,
                .buffer_set = mxr_vp_buffer_set,
                .stream_set = mxr_vp_stream_set,
index 82affaedf06778ad73d0d40fa1a87b0e621630cd..42833ec0020f4c7b9f36dce7daa3b085fe50011f 100644 (file)
@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
        unsigned char readbuf[RDS_BUFFER];
        int i = 0;
 
+       if (count > RDS_BUFFER)
+               return -EFAULT;
        mutex_lock(&dev->lock);
        if (dev->rdsstat == 0)
                cadet_start_rds(dev);
@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
                readbuf[i++] = dev->rdsbuf[dev->rdsout++];
        mutex_unlock(&dev->lock);
 
-       if (i && copy_to_user(data, readbuf, i))
-               return -EFAULT;
+       if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
+               i = -EFAULT;
+
        return i;
 }
 
index 5236035f0f2a464423f7379347a19ed81acc9975..c622c74bcb9e6f6c69e7240a9f09a1cf0378e9b6 100644 (file)
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
 /* TEA5757 pin mappings */
 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
 
-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
 
 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
index 050b3bb96fecc13a15f05d3f294147200954e8dd..79f62b98108a8f0ca03626233820b5d4ebe6e83c 100644 (file)
@@ -79,7 +79,7 @@ struct shark_device {
        u32 last_val;
 };
 
-static atomic_t shark_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
 
 static void shark_write_val(struct snd_tea575x *tea, u32 val)
 {
index 8654e0dc5c95376aa7140498e10af342b955d15a..0608a6473fb9fcc9c06aa5aa741d1ea63af2f753 100644 (file)
@@ -74,7 +74,7 @@ struct shark_device {
        u8 *transfer_buffer;
 };
 
-static atomic_t shark_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
 
 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
 {
index dccf58691650d5d3c5432f79794fe09fdac30d77..d5db4115af40085f39008e24ba799923bb0601a5 100644 (file)
@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
        struct si476x_radio *radio;
        struct v4l2_ctrl *ctrl;
 
-       static atomic_t instance = ATOMIC_INIT(0);
+       static atomic_unchecked_t instance = ATOMIC_INIT(0);
 
        radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
        if (!radio)
index 704397f3c106b6419ed7a3c88b8dcb2f7ae3a902..4d059779ca609802c5656b0f3a18eb0623e85a91 100644 (file)
@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
 
 /* Radio Nr */
-static u32 radio_nr = -1;
+static int radio_nr = -1;
 module_param(radio_nr, int, 0444);
 MODULE_PARM_DESC(radio_nr, "Radio Nr");
 
index 9fd1527494ebd65ab200c38f13a20e28330034a1..8927230aded893980ad2f561c10d46f2e2b63d29 100644 (file)
@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
 
 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
 {
-       char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
-       char result[64];
-       return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
-                               sizeof(result), 0);
+       char *buf;
+       char *result;
+       int retval;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+       result = kmalloc(64, GFP_KERNEL);
+       if (result == NULL) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+
+       buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
+       buf[1] = enable ? 1 : 0;
+
+       retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
+
+       kfree(buf);
+       kfree(result);
+       return retval;
 }
 
 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
 {
-       char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
-       char state[3];
-       return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
+       char *buf;
+       char *state;
+       int retval;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+       state = kmalloc(3, GFP_KERNEL);
+       if (state == NULL) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+
+       buf[0] = CINERGYT2_EP1_SLEEP_MODE;
+       buf[1] = enable ? 1 : 0;
+
+       retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
+
+       kfree(buf);
+       kfree(state);
+       return retval;
 }
 
 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
 {
-       char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
-       char state[3];
+       char *query;
+       char *state;
        int ret;
+       query = kmalloc(1, GFP_KERNEL);
+       if (query == NULL)
+               return -ENOMEM;
+       state = kmalloc(3, GFP_KERNEL);
+       if (state == NULL) {
+               kfree(query);
+               return -ENOMEM;
+       }
+
+       query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
 
        adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
 
-       ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
-                               sizeof(state), 0);
+       ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
        if (ret < 0) {
                deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
                        "state info\n");
@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
 
        /* Copy this pointer as we are gonna need it in the release phase */
        cinergyt2_usb_device = adap->dev;
-
+       kfree(query);
+       kfree(state);
        return 0;
 }
 
@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
 {
        struct cinergyt2_state *st = d->priv;
-       u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
+       u8 *key, *cmd;
        int i;
 
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -EINVAL;
+       key = kzalloc(5, GFP_KERNEL);
+       if (key == NULL) {
+               kfree(cmd);
+               return -EINVAL;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
+
        *state = REMOTE_NO_KEY_PRESSED;
 
-       dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
+       dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
        if (key[4] == 0xff) {
                /* key repeat */
                st->rc_counter++;
@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
                                        *event = d->last_event;
                                        deb_rc("repeat key, event %x\n",
                                                   *event);
-                                       return 0;
+                                       goto out;
                                }
                        }
                        deb_rc("repeated key (non repeatable)\n");
                }
-               return 0;
+               goto out;
        }
 
        /* hack to pass checksum on the custom field */
@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
 
                deb_rc("key: %*ph\n", 5, key);
        }
+out:
+       kfree(cmd);
+       kfree(key);
        return 0;
 }
 
index c890fe46acd3966b8a1b6c0626e1d459c77d6bdd..f9b2ae68abd00066a13efbeef2b04e70d07a6610 100644 (file)
@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
                                        fe_status_t *status)
 {
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_get_status_msg result;
-       u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
+       struct dvbt_get_status_msg *result;
+       u8 *cmd;
        int ret;
 
-       ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
-                       sizeof(result), 0);
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+       result = kmalloc(sizeof(*result), GFP_KERNEL);
+       if (result == NULL) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+       ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
+                       sizeof(*result), 0);
        if (ret < 0)
-               return ret;
+               goto out;
 
        *status = 0;
 
-       if (0xffff - le16_to_cpu(result.gain) > 30)
+       if (0xffff - le16_to_cpu(result->gain) > 30)
                *status |= FE_HAS_SIGNAL;
-       if (result.lock_bits & (1 << 6))
+       if (result->lock_bits & (1 << 6))
                *status |= FE_HAS_LOCK;
-       if (result.lock_bits & (1 << 5))
+       if (result->lock_bits & (1 << 5))
                *status |= FE_HAS_SYNC;
-       if (result.lock_bits & (1 << 4))
+       if (result->lock_bits & (1 << 4))
                *status |= FE_HAS_CARRIER;
-       if (result.lock_bits & (1 << 1))
+       if (result->lock_bits & (1 << 1))
                *status |= FE_HAS_VITERBI;
 
        if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
                        (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
                *status &= ~FE_HAS_LOCK;
 
-       return 0;
+out:
+       kfree(cmd);
+       kfree(result);
+       return ret;
 }
 
 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
 {
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_get_status_msg status;
-       char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
+       struct dvbt_get_status_msg *status;
+       char *cmd;
        int ret;
 
-       ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-                               sizeof(status), 0);
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (status == NULL) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+       ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
+                               sizeof(*status), 0);
        if (ret < 0)
-               return ret;
+               goto out;
 
-       *ber = le32_to_cpu(status.viterbi_error_rate);
+       *ber = le32_to_cpu(status->viterbi_error_rate);
+out:
+       kfree(cmd);
+       kfree(status);
        return 0;
 }
 
 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
 {
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_get_status_msg status;
-       u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
+       struct dvbt_get_status_msg *status;
+       u8 *cmd;
        int ret;
 
-       ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
-                               sizeof(status), 0);
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (status == NULL) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+       ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
+                               sizeof(*status), 0);
        if (ret < 0) {
                err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
                        ret);
-               return ret;
+               goto out;
        }
-       *unc = le32_to_cpu(status.uncorrected_block_count);
-       return 0;
+       *unc = le32_to_cpu(status->uncorrected_block_count);
+
+out:
+       kfree(cmd);
+       kfree(status);
+       return ret;
 }
 
 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
                                                u16 *strength)
 {
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_get_status_msg status;
-       char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
+       struct dvbt_get_status_msg *status;
+       char *cmd;
        int ret;
 
-       ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-                               sizeof(status), 0);
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (status == NULL) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+       ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
+                               sizeof(*status), 0);
        if (ret < 0) {
                err("cinergyt2_fe_read_signal_strength() Failed!"
                        " (Error=%d)\n", ret);
-               return ret;
+               goto out;
        }
-       *strength = (0xffff - le16_to_cpu(status.gain));
+       *strength = (0xffff - le16_to_cpu(status->gain));
+
+out:
+       kfree(cmd);
+       kfree(status);
        return 0;
 }
 
 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
 {
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_get_status_msg status;
-       char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
+       struct dvbt_get_status_msg *status;
+       char *cmd;
        int ret;
 
-       ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
-                               sizeof(status), 0);
+       cmd = kmalloc(1, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (status == NULL) {
+               kfree(cmd);
+               return -ENOMEM;
+       }
+
+       cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+       ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
+                               sizeof(*status), 0);
        if (ret < 0) {
                err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
-               return ret;
+               goto out;
        }
-       *snr = (status.snr << 8) | status.snr;
-       return 0;
+       *snr = (status->snr << 8) | status->snr;
+
+out:
+       kfree(cmd);
+       kfree(status);
+       return ret;
 }
 
 static int cinergyt2_fe_init(struct dvb_frontend *fe)
@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
        struct cinergyt2_fe_state *state = fe->demodulator_priv;
-       struct dvbt_set_parameters_msg param;
-       char result[2];
+       struct dvbt_set_parameters_msg *param;
+       char *result;
        int err;
 
-       param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
-       param.tps = cpu_to_le16(compute_tps(fep));
-       param.freq = cpu_to_le32(fep->frequency / 1000);
-       param.flags = 0;
+       result = kmalloc(2, GFP_KERNEL);
+       if (result == NULL)
+               return -ENOMEM;
+       param = kmalloc(sizeof(*param), GFP_KERNEL);
+       if (param == NULL) {
+               kfree(result);
+               return -ENOMEM;
+       }
+
+       param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+       param->tps = cpu_to_le16(compute_tps(fep));
+       param->freq = cpu_to_le32(fep->frequency / 1000);
+       param->flags = 0;
 
        switch (fep->bandwidth_hz) {
        default:
        case 8000000:
-               param.bandwidth = 8;
+               param->bandwidth = 8;
                break;
        case 7000000:
-               param.bandwidth = 7;
+               param->bandwidth = 7;
                break;
        case 6000000:
-               param.bandwidth = 6;
+               param->bandwidth = 6;
                break;
        }
 
        err = dvb_usb_generic_rw(state->d,
-                       (char *)&param, sizeof(param),
-                       result, sizeof(result), 0);
+                       (char *)param, sizeof(*param),
+                       result, 2, 0);
        if (err < 0)
                err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
 
-       return (err < 0) ? err : 0;
+       kfree(result);
+       kfree(param);
+       return err;
 }
 
 static void cinergyt2_fe_release(struct dvb_frontend *fe)
index 733a7ff7b207819bcd05f2a816388fa21e9f69b5..f8b52e3237b207ede319c78a8cf80da12ea0590d 100644 (file)
@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
 
 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
 {
-       struct hexline hx;
-       u8 reset;
+       struct hexline *hx;
+       u8 *reset;
        int ret,pos=0;
 
+       reset = kmalloc(1, GFP_KERNEL);
+       if (reset == NULL)
+               return -ENOMEM;
+
+       hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
+       if (hx == NULL) {
+               kfree(reset);
+               return -ENOMEM;
+       }
+
        /* stop the CPU */
-       reset = 1;
-       if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+       reset[0] = 1;
+       if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
                err("could not stop the USB controller CPU.");
 
-       while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
-               deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
-               ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+       while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
+               deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
+               ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
 
-               if (ret != hx.len) {
+               if (ret != hx->len) {
                        err("error while transferring firmware "
                                "(transferred size: %d, block size: %d)",
-                               ret,hx.len);
+                               ret,hx->len);
                        ret = -EINVAL;
                        break;
                }
        }
        if (ret < 0) {
                err("firmware download failed at %d with %d",pos,ret);
+               kfree(reset);
+               kfree(hx);
                return ret;
        }
 
        if (ret == 0) {
                /* restart the CPU */
-               reset = 0;
-               if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+               reset[0] = 0;
+               if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
                        err("could not restart the USB controller CPU.");
                        ret = -EINVAL;
                }
        } else
                ret = -EIO;
 
+       kfree(reset);
+       kfree(hx);
+
        return ret;
 }
 EXPORT_SYMBOL(usb_cypress_load_firmware);
index 1a3df10d6bad5e1c1354874c8d715a7f6e5629c0..57997a5cd5e8cc2ca5c306fa427bf068cca8980d 100644 (file)
@@ -118,7 +118,7 @@ struct su3000_state {
 
 struct s6x0_state {
        int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
-};
+} __no_const;
 
 /* debug */
 static int dvb_usb_dw2102_debug;
index 5801ae7f672abdb02f2657a9f17b4192cb171adb..83f71faa28161a194422d8944325ff33d0ee5207 100644 (file)
@@ -87,8 +87,11 @@ struct technisat_usb2_state {
 static int technisat_usb2_i2c_access(struct usb_device *udev,
                u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
 {
-       u8 b[64];
-       int ret, actual_length;
+       u8 *b = kmalloc(64, GFP_KERNEL);
+       int ret, actual_length, error = 0;
+
+       if (b == NULL)
+               return -ENOMEM;
 
        deb_i2c("i2c-access: %02x, tx: ", device_addr);
        debug_dump(tx, txlen, deb_i2c);
@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
 
        if (ret < 0) {
                err("i2c-error: out failed %02x = %d", device_addr, ret);
-               return -ENODEV;
+               error = -ENODEV;
+               goto out;
        }
 
        ret = usb_bulk_msg(udev,
@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
                        b, 64, &actual_length, 1000);
        if (ret < 0) {
                err("i2c-error: in failed %02x = %d", device_addr, ret);
-               return -ENODEV;
+               error = -ENODEV;
+               goto out;
        }
 
        if (b[0] != I2C_STATUS_OK) {
@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
                /* handle tuner-i2c-nak */
                if (!(b[0] == I2C_STATUS_NAK &&
                                device_addr == 0x60
-                               /* && device_is_technisat_usb2 */))
-                       return -ENODEV;
+                               /* && device_is_technisat_usb2 */)) {
+                       error = -ENODEV;
+                       goto out;
+               }
        }
 
        deb_i2c("status: %d, ", b[0]);
@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
 
        deb_i2c("\n");
 
-       return 0;
+out:
+       kfree(b);
+       return error;
 }
 
 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
 {
        int ret;
 
-       u8 led[8] = {
-               red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
-               0
-       };
+       u8 *led = kzalloc(8, GFP_KERNEL);
+
+       if (led == NULL)
+               return -ENOMEM;
 
        if (disable_led_control && state != TECH_LED_OFF)
                return 0;
 
+       led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
+
        switch (state) {
        case TECH_LED_ON:
                led[1] = 0x82;
@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
                red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
                USB_TYPE_VENDOR | USB_DIR_OUT,
                0, 0,
-               led, sizeof(led), 500);
+               led, 8, 500);
 
        mutex_unlock(&d->i2c_mutex);
+
+       kfree(led);
+
        return ret;
 }
 
 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
 {
        int ret;
-       u8 b = 0;
+       u8 *b = kzalloc(1, GFP_KERNEL);
+
+       if (b == NULL)
+               return -ENOMEM;
 
        if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
                return -EAGAIN;
@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
                SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
                USB_TYPE_VENDOR | USB_DIR_OUT,
                (red << 8) | green, 0,
-               &b, 1, 500);
+               b, 1, 500);
 
        mutex_unlock(&d->i2c_mutex);
 
+       kfree(b);
+
        return ret;
 }
 
@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
                struct dvb_usb_device_description **desc, int *cold)
 {
        int ret;
-       u8 version[3];
+       u8 *version = kmalloc(3, GFP_KERNEL);
 
        /* first select the interface */
        if (usb_set_interface(udev, 0, 1) != 0)
@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
 
        *cold = 0; /* by default do not download a firmware - just in case something is wrong */
 
+       if (version == NULL)
+               return 0;
+
        ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                GET_VERSION_INFO_VENDOR_REQUEST,
                USB_TYPE_VENDOR | USB_DIR_IN,
                0, 0,
-               version, sizeof(version), 500);
+               version, 3, 500);
 
        if (ret < 0)
                *cold = 1;
@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
                *cold = 0;
        }
 
+       kfree(version);
+
        return 0;
 }
 
@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
 
 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
 {
-       u8 buf[62], *b;
+       u8 *buf, *b;
        int ret;
        struct ir_raw_event ev;
 
+       buf = kmalloc(62, GFP_KERNEL);
+
+       if (buf == NULL)
+               return -ENOMEM;
+
        buf[0] = GET_IR_DATA_VENDOR_REQUEST;
        buf[1] = 0x08;
        buf[2] = 0x8f;
@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
                        GET_IR_DATA_VENDOR_REQUEST,
                        USB_TYPE_VENDOR | USB_DIR_IN,
                        0x8080, 0,
-                       buf, sizeof(buf), 500);
+                       buf, 62, 500);
 
 unlock:
        mutex_unlock(&d->i2c_mutex);
 
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(buf);
                return ret;
+       }
 
-       if (ret == 1)
+       if (ret == 1) {
+               kfree(buf);
                return 0; /* no key pressed */
+       }
 
        /* decoding */
        b = buf+1;
@@ -656,6 +689,8 @@ unlock:
 
        ir_raw_event_handle(d->rc_dev);
 
+       kfree(buf);
+
        return 1;
 }
 
index af635430524e86a4cf5129d9be0c88a180b303e2..0436f200215786a98a60417dbaf394e6cf3153c3 100644 (file)
@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                 * by passing a very big num_planes value */
                uplane = compat_alloc_user_space(num_planes *
                                                sizeof(struct v4l2_plane));
-               kp->m.planes = (__force struct v4l2_plane *)uplane;
+               kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
 
                while (--num_planes >= 0) {
                        ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                if (num_planes == 0)
                        return 0;
 
-               uplane = (__force struct v4l2_plane __user *)kp->m.planes;
+               uplane = (struct v4l2_plane __force_user *)kp->m.planes;
                if (get_user(p, &up->m.planes))
                        return -EFAULT;
                uplane32 = compat_ptr(p);
@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
                get_user(kp->flags, &up->flags) ||
                copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
                        return -EFAULT;
-       kp->base = (__force void *)compat_ptr(tmp);
+       kp->base = (__force_kernel void *)compat_ptr(tmp);
        return 0;
 }
 
@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
                        n * sizeof(struct v4l2_ext_control32)))
                return -EFAULT;
        kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
-       kp->controls = (__force struct v4l2_ext_control *)kcontrols;
+       kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
        while (--n >= 0) {
                u32 id;
 
@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
 {
        struct v4l2_ext_control32 __user *ucontrols;
        struct v4l2_ext_control __user *kcontrols =
-               (__force struct v4l2_ext_control __user *)kp->controls;
+               (struct v4l2_ext_control __force_user *)kp->controls;
        int n = kp->count;
        compat_caddr_t p;
 
@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
                get_user(tmp, &up->edid) ||
                copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
                        return -EFAULT;
-       kp->edid = (__force u8 *)compat_ptr(tmp);
+       kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
        return 0;
 }
 
index 015f92aab44a85b602c0ae58b3632fe7661015cf..59e311edbcb0969e41185cf410e1d50d23767150 100644 (file)
@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
 EXPORT_SYMBOL_GPL(v4l2_device_put);
 
 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-                                               atomic_t *instance)
+                                               atomic_unchecked_t *instance)
 {
-       int num = atomic_inc_return(instance) - 1;
+       int num = atomic_inc_return_unchecked(instance) - 1;
        int len = strlen(basename);
 
        if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
index faac2f4e0f3ad358699a55123ebe18b38e8f65f5..e39dcd9f4d580ee21855ae916ff48a14f736f62f 100644 (file)
@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
                                struct file *file, void *fh, void *p);
        } u;
        void (*debug)(const void *arg, bool write_only);
-};
+} __do_const;
+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
 
 /* This control needs a priority check */
 #define INFO_FL_PRIO   (1 << 0)
@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
        struct video_device *vfd = video_devdata(file);
        const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
        bool write_only = false;
-       struct v4l2_ioctl_info default_info;
+       v4l2_ioctl_info_no_const default_info;
        const struct v4l2_ioctl_info *info;
        void *fh = file->private_data;
        struct v4l2_fh *vfh = NULL;
@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
                                ret = -EINVAL;
                                break;
                        }
-                       *user_ptr = (void __user *)buf->m.planes;
+                       *user_ptr = (void __force_user *)buf->m.planes;
                        *kernel_ptr = (void **)&buf->m.planes;
                        *array_size = sizeof(struct v4l2_plane) * buf->length;
                        ret = 1;
@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
                                ret = -EINVAL;
                                break;
                        }
-                       *user_ptr = (void __user *)edid->edid;
+                       *user_ptr = (void __force_user *)edid->edid;
                        *kernel_ptr = (void **)&edid->edid;
                        *array_size = edid->blocks * 128;
                        ret = 1;
@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
                                ret = -EINVAL;
                                break;
                        }
-                       *user_ptr = (void __user *)ctrls->controls;
+                       *user_ptr = (void __force_user *)ctrls->controls;
                        *kernel_ptr = (void **)&ctrls->controls;
                        *array_size = sizeof(struct v4l2_ext_control)
                                    * ctrls->count;
@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
        }
 
        if (has_array_args) {
-               *kernel_ptr = (void __force *)user_ptr;
+               *kernel_ptr = (void __force_kernel *)user_ptr;
                if (copy_to_user(user_ptr, mbuf, array_size))
                        err = -EFAULT;
                goto out_array_args;
index 24696f59215b6b819b44b92719c86afa381fec2e..3637780bd06f282dad3604da098082c5b55be4b4 100644 (file)
@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
 };
 
 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
-static struct irq_chip gpmc_irq_chip;
 static int gpmc_irq_start;
 
 static struct resource gpmc_mem_root;
@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
 
 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
 
+static struct irq_chip gpmc_irq_chip = {
+       .name = "gpmc",
+       .irq_startup = gpmc_irq_noop_ret,
+       .irq_enable = gpmc_irq_enable,
+       .irq_disable = gpmc_irq_disable,
+       .irq_shutdown = gpmc_irq_noop,
+       .irq_ack = gpmc_irq_noop,
+       .irq_mask = gpmc_irq_noop,
+       .irq_unmask = gpmc_irq_noop,
+};
+
 static int gpmc_setup_irq(void)
 {
        int i;
@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
                return gpmc_irq_start;
        }
 
-       gpmc_irq_chip.name = "gpmc";
-       gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
-       gpmc_irq_chip.irq_enable = gpmc_irq_enable;
-       gpmc_irq_chip.irq_disable = gpmc_irq_disable;
-       gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
-       gpmc_irq_chip.irq_ack = gpmc_irq_noop;
-       gpmc_irq_chip.irq_mask = gpmc_irq_noop;
-       gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
-
        gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
        gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
 
index 187f83629f7ef8df1fcf4f5d7c6c7d83cff49fb5..679544bcdc73fee70b1fb759fb8ad21722208888 100644 (file)
@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
        seq_printf(m, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
        seq_printf(m, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
+#else
        seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
                                        (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
+#endif
+
        /*
         *  Rounding UP to nearest 4-kB boundary here...
         */
@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
                                        ioc->facts.GlobalCredits);
 
        seq_printf(m, "  Frames   @ 0x%p (Dma @ 0x%p)\n",
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+                                       NULL, NULL);
+#else
                                        (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
+#endif
        sz = (ioc->reply_sz * ioc->reply_depth) + 128;
        seq_printf(m, "    {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
                                        ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
index 5bdaae15a74257d9c252abed3b15ae5ce5cde69a..eced16f6b93e1079ee73c148c600345be7768c8c 100644 (file)
@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
                return 0;
 }
 
+static inline void
+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+{
+       if (phy_info->port_details) {
+               phy_info->port_details->rphy = rphy;
+               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+                   ioc->name, rphy));
+       }
+
+       if (rphy) {
+               dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+                   &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+                   ioc->name, rphy, rphy->dev.release));
+       }
+}
+
 /* no mutex */
 static void
 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
                return NULL;
 }
 
-static inline void
-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
-{
-       if (phy_info->port_details) {
-               phy_info->port_details->rphy = rphy;
-               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
-                   ioc->name, rphy));
-       }
-
-       if (rphy) {
-               dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
-                   &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
-               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
-                   ioc->name, rphy, rphy->dev.release));
-       }
-}
-
 static inline struct sas_port *
 mptsas_get_port(struct mptsas_phyinfo *phy_info)
 {
index b7d87cd227a902b4de1c405dcf726419217b1adc..3fb36da308007ced0d9f252ecf5232ab734b098b 100644 (file)
@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
        "Array Controller Device"
 };
 
-static char *chtostr(char *tmp, u8 *chars, int n)
-{
-       tmp[0] = 0;
-       return strncat(tmp, (char *)chars, n);
-}
-
 static int i2o_report_query_status(struct seq_file *seq, int block_status,
                                   char *group)
 {
@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
 {
        struct i2o_controller *c = (struct i2o_controller *)seq->private;
-       static u32 work32[5];
-       static u8 *work8 = (u8 *) work32;
-       static u16 *work16 = (u16 *) work32;
+       u32 work32[5];
+       u8 *work8 = (u8 *) work32;
+       u16 *work16 = (u16 *) work32;
        int token;
        u32 hwcap;
 
@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
        } *result;
 
        i2o_exec_execute_ddm_table ddm_table;
-       char tmp[28 + 1];
 
        result = kmalloc(sizeof(*result), GFP_KERNEL);
        if (!result)
@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
 
                seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
                seq_printf(seq, "%-#8x", ddm_table.module_id);
-               seq_printf(seq, "%-29s",
-                          chtostr(tmp, ddm_table.module_name_version, 28));
+               seq_printf(seq, "%-.28s", ddm_table.module_name_version);
                seq_printf(seq, "%9d  ", ddm_table.data_size);
                seq_printf(seq, "%8d", ddm_table.code_size);
 
@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
 
        i2o_driver_result_table *result;
        i2o_driver_store_table *dst;
-       char tmp[28 + 1];
 
        result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
        if (result == NULL)
@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
 
                seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
                seq_printf(seq, "%-#8x", dst->module_id);
-               seq_printf(seq, "%-29s",
-                          chtostr(tmp, dst->module_name_version, 28));
-               seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
+               seq_printf(seq, "%-.28s", dst->module_name_version);
+               seq_printf(seq, "%-.8s", dst->date);
                seq_printf(seq, "%8d ", dst->module_size);
                seq_printf(seq, "%8d ", dst->mpb_size);
                seq_printf(seq, "0x%04x", dst->module_flags);
@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
 {
        struct i2o_device *d = (struct i2o_device *)seq->private;
-       static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
+       u32 work32[128];        // allow for "stuff" + up to 256 byte (max) serial number
        // == (allow) 512d bytes (max)
-       static u16 *work16 = (u16 *) work32;
+       u16 *work16 = (u16 *) work32;
        int token;
-       char tmp[16 + 1];
 
        token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
 
@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
        seq_printf(seq, "Device Class  : %s\n", i2o_get_class_name(work16[0]));
        seq_printf(seq, "Owner TID     : %0#5x\n", work16[2]);
        seq_printf(seq, "Parent TID    : %0#5x\n", work16[3]);
-       seq_printf(seq, "Vendor info   : %s\n",
-                  chtostr(tmp, (u8 *) (work32 + 2), 16));
-       seq_printf(seq, "Product info  : %s\n",
-                  chtostr(tmp, (u8 *) (work32 + 6), 16));
-       seq_printf(seq, "Description   : %s\n",
-                  chtostr(tmp, (u8 *) (work32 + 10), 16));
-       seq_printf(seq, "Product rev.  : %s\n",
-                  chtostr(tmp, (u8 *) (work32 + 14), 8));
+       seq_printf(seq, "Vendor info   : %.16s\n", (u8 *) (work32 + 2));
+       seq_printf(seq, "Product info  : %.16s\n", (u8 *) (work32 + 6));
+       seq_printf(seq, "Description   : %.16s\n", (u8 *) (work32 + 10));
+       seq_printf(seq, "Product rev.  : %.8s\n", (u8 *) (work32 + 14));
 
        seq_printf(seq, "Serial number : ");
        print_serial_number(seq, (u8 *) (work32 + 16),
@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
                u8 pad[256];    // allow up to 256 byte (max) serial number
        } result;
 
-       char tmp[24 + 1];
-
        token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
 
        if (token < 0) {
@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
        }
 
        seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
-       seq_printf(seq, "Module name         : %s\n",
-                  chtostr(tmp, result.module_name, 24));
-       seq_printf(seq, "Module revision     : %s\n",
-                  chtostr(tmp, result.module_rev, 8));
+       seq_printf(seq, "Module name         : %.24s\n", result.module_name);
+       seq_printf(seq, "Module revision     : %.8s\n", result.module_rev);
 
        seq_printf(seq, "Serial number       : ");
        print_serial_number(seq, result.serial_number, sizeof(result) - 36);
@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
                u8 instance_number[4];
        } result;
 
-       char tmp[64 + 1];
-
        token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
 
        if (token < 0) {
@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
                return 0;
        }
 
-       seq_printf(seq, "Device name     : %s\n",
-                  chtostr(tmp, result.device_name, 64));
-       seq_printf(seq, "Service name    : %s\n",
-                  chtostr(tmp, result.service_name, 64));
-       seq_printf(seq, "Physical name   : %s\n",
-                  chtostr(tmp, result.physical_location, 64));
-       seq_printf(seq, "Instance number : %s\n",
-                  chtostr(tmp, result.instance_number, 4));
+       seq_printf(seq, "Device name     : %.64s\n", result.device_name);
+       seq_printf(seq, "Service name    : %.64s\n", result.service_name);
+       seq_printf(seq, "Physical name   : %.64s\n", result.physical_location);
+       seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
 
        return 0;
 }
@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
 {
        struct i2o_device *d = (struct i2o_device *)seq->private;
-       static u32 work32[12];
-       static u16 *work16 = (u16 *) work32;
-       static u8 *work8 = (u8 *) work32;
+       u32 work32[12];
+       u16 *work16 = (u16 *) work32;
+       u8 *work8 = (u8 *) work32;
        int token;
 
        token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
index 92752fb5b2d3d8b41364261a99eb44938e33be32..a7494f6d3e1c5e8744dc2e1bb5b919133d76c781 100644 (file)
@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
 
        spin_lock_irqsave(&c->context_list_lock, flags);
 
-       if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
-               atomic_inc(&c->context_list_counter);
+       if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
+               atomic_inc_unchecked(&c->context_list_counter);
 
-       entry->context = atomic_read(&c->context_list_counter);
+       entry->context = atomic_read_unchecked(&c->context_list_counter);
 
        list_add(&entry->list, &c->context_list);
 
@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
 
 #if BITS_PER_LONG == 64
        spin_lock_init(&c->context_list_lock);
-       atomic_set(&c->context_list_counter, 0);
+       atomic_set_unchecked(&c->context_list_counter, 0);
        INIT_LIST_HEAD(&c->context_list);
 #endif
 
index 9a8e185f11dfaf5fe8ba87432a24e8f704484b40..27ff17d11cb2d4f08aeb0753c2f2bfcce08d1dcd 100644 (file)
@@ -100,7 +100,7 @@ static int irq_last;
 static u32 *irq_count;
 static int num_irqs;
 
-static struct device_attribute **dev_attr;
+static device_attribute_no_const **dev_attr;
 static char **event_name;
 
 static u8 avg_sample = SAMPLE_16;
index c880c895c5a698182d10241a1ccd2fe231c6b237..45a7c68cedb5122375f4518438a2bd1b92393007 100644 (file)
@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
                                   const struct i2c_device_id *id)
 {
        struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
-       static struct max8925_chip *chip;
+       struct max8925_chip *chip;
        struct device_node *node = client->dev.of_node;
 
        if (node && !pdata) {
index 7612d89850ddf55566ac5bacbb3dae7111ea965a..70549c2148b835b2e732dd12b14712b397042d98 100644 (file)
@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
                    struct tps65910_platform_data *pdata)
 {
        int ret = 0;
-       static struct regmap_irq_chip *tps6591x_irqs_chip;
+       struct regmap_irq_chip *tps6591x_irqs_chip;
 
        if (!irq) {
                dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
index 1b772ef761cbf63cf1af01a32f9d720258214669..01e77d335685e0c8a71d79fd3c8cb977b3c69f55 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/i2c/twl.h>
+#include <asm/pgtable.h>
 
 #include "twl-core.h"
 
@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
         * Install an irq handler for each of the SIH modules;
         * clone dummy irq_chip since PIH can't *do* anything
         */
-       twl4030_irq_chip = dummy_irq_chip;
-       twl4030_irq_chip.name = "twl4030";
+       pax_open_kernel();
+       memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
+       *(const char **)&twl4030_irq_chip.name = "twl4030";
 
-       twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
+       *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
+       pax_close_kernel();
 
        for (i = irq_base; i < irq_end; i++) {
                irq_set_chip_and_handler(i, &twl4030_irq_chip,
index 464419b364408c084578cb3632c618bf52f62c7f..64bae8d6a15e5c0f9e4cb09d692dda9198a5e50b 100644 (file)
@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
                goto error_idr_alloc;
        c2dev->id = ret;
 
-       bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+       pax_open_kernel();
+       *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+       pax_close_kernel();
 
        c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
                                   "c2port%d", c2dev->id);
index 8385177ff32b076b4a16c4689969d49c785bc7c5..2f54635c367d317fe197f2f68c9272ead153b738 100644 (file)
@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, sid_data);
 
-       sid_bin_attr.size = sid_data->keysize;
+       pax_open_kernel();
+       *(size_t *)&sid_bin_attr.size = sid_data->keysize;
+       pax_close_kernel();
        if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
                return -ENODEV;
 
index 36f5d52775a98a7b72602fd4d4f79ba731b48df2..32311c3e83020822734920ccdd6ef7e9d777722c 100644 (file)
@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
        char before[BREAK_INSTR_SIZE];
        char after[BREAK_INSTR_SIZE];
 
-       probe_kernel_read(before, (char *)kgdbts_break_test,
+       probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
          BREAK_INSTR_SIZE);
        init_simple_test();
        ts.tst = plant_and_detach_test;
@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
        /* Activate test with initial breakpoint */
        if (!is_early)
                kgdb_breakpoint();
-       probe_kernel_read(after, (char *)kgdbts_break_test,
+       probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
          BREAK_INSTR_SIZE);
        if (memcmp(before, after, BREAK_INSTR_SIZE)) {
                printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
index 3ef4627f9cb1552a7585787f6da81cb880c4b784..8d00486a2eb220ddecba704ee5e770291d2c8c2e 100644 (file)
@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
         * the lid is closed. This leads to interrupts as soon as a little move
         * is done.
         */
-       atomic_inc(&lis3->count);
+       atomic_inc_unchecked(&lis3->count);
 
        wake_up_interruptible(&lis3->misc_wait);
        kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
        if (lis3->pm_dev)
                pm_runtime_get_sync(lis3->pm_dev);
 
-       atomic_set(&lis3->count, 0);
+       atomic_set_unchecked(&lis3->count, 0);
        return 0;
 }
 
@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
        add_wait_queue(&lis3->misc_wait, &wait);
        while (true) {
                set_current_state(TASK_INTERRUPTIBLE);
-               data = atomic_xchg(&lis3->count, 0);
+               data = atomic_xchg_unchecked(&lis3->count, 0);
                if (data)
                        break;
 
@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
                                              struct lis3lv02d, miscdev);
 
        poll_wait(file, &lis3->misc_wait, wait);
-       if (atomic_read(&lis3->count))
+       if (atomic_read_unchecked(&lis3->count))
                return POLLIN | POLLRDNORM;
        return 0;
 }
index c439c827eea803b887b678bb00b8a42790413af8..1f20f57138838666ac9c5eedeb3592fbaf047111 100644 (file)
@@ -297,7 +297,7 @@ struct lis3lv02d {
        struct input_polled_dev *idev;     /* input device */
        struct platform_device  *pdev;     /* platform device */
        struct regulator_bulk_data regulators[2];
-       atomic_t                count;     /* interrupt count after last read */
+       atomic_unchecked_t      count;     /* interrupt count after last read */
        union axis_conversion   ac;        /* hw -> logical axis */
        int                     mapped_btns[3];
 
index 2f30badc6ffd9251cd2e59835e3061b2816cd09f..c4c13d0ce0270ae4720d385d6c4fc089509ab9f6 100644 (file)
@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
        unsigned long nsec;
 
        nsec = CLKS2NSEC(clks);
-       atomic_long_inc(&mcs_op_statistics[op].count);
-       atomic_long_add(nsec, &mcs_op_statistics[op].total);
+       atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
+       atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
        if (mcs_op_statistics[op].max < nsec)
                mcs_op_statistics[op].max = nsec;
 }
index 4f76359223949f21e54c5a0bd5a57fae83561ff2..cdfcb2e355989224200a9b6b2caac8ea2c2f0f6a 100644 (file)
@@ -32,9 +32,9 @@
 
 #define printstat(s, f)                printstat_val(s, &gru_stats.f, #f)
 
-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
 {
-       unsigned long val = atomic_long_read(v);
+       unsigned long val = atomic_long_read_unchecked(v);
 
        seq_printf(s, "%16lu %s\n", val, id);
 }
@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
 
        seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
        for (op = 0; op < mcsop_last; op++) {
-               count = atomic_long_read(&mcs_op_statistics[op].count);
-               total = atomic_long_read(&mcs_op_statistics[op].total);
+               count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
+               total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
                max = mcs_op_statistics[op].max;
                seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
                           count ? total / count : 0, max);
index 5c3ce24596753afad29180aa88e6dd7a78d209c8..4915ccbbe6dbfa979c3dcb21571d93eb4f336a21 100644 (file)
@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
  * GRU statistics.
  */
 struct gru_stats_s {
-       atomic_long_t vdata_alloc;
-       atomic_long_t vdata_free;
-       atomic_long_t gts_alloc;
-       atomic_long_t gts_free;
-       atomic_long_t gms_alloc;
-       atomic_long_t gms_free;
-       atomic_long_t gts_double_allocate;
-       atomic_long_t assign_context;
-       atomic_long_t assign_context_failed;
-       atomic_long_t free_context;
-       atomic_long_t load_user_context;
-       atomic_long_t load_kernel_context;
-       atomic_long_t lock_kernel_context;
-       atomic_long_t unlock_kernel_context;
-       atomic_long_t steal_user_context;
-       atomic_long_t steal_kernel_context;
-       atomic_long_t steal_context_failed;
-       atomic_long_t nopfn;
-       atomic_long_t asid_new;
-       atomic_long_t asid_next;
-       atomic_long_t asid_wrap;
-       atomic_long_t asid_reuse;
-       atomic_long_t intr;
-       atomic_long_t intr_cbr;
-       atomic_long_t intr_tfh;
-       atomic_long_t intr_spurious;
-       atomic_long_t intr_mm_lock_failed;
-       atomic_long_t call_os;
-       atomic_long_t call_os_wait_queue;
-       atomic_long_t user_flush_tlb;
-       atomic_long_t user_unload_context;
-       atomic_long_t user_exception;
-       atomic_long_t set_context_option;
-       atomic_long_t check_context_retarget_intr;
-       atomic_long_t check_context_unload;
-       atomic_long_t tlb_dropin;
-       atomic_long_t tlb_preload_page;
-       atomic_long_t tlb_dropin_fail_no_asid;
-       atomic_long_t tlb_dropin_fail_upm;
-       atomic_long_t tlb_dropin_fail_invalid;
-       atomic_long_t tlb_dropin_fail_range_active;
-       atomic_long_t tlb_dropin_fail_idle;
-       atomic_long_t tlb_dropin_fail_fmm;
-       atomic_long_t tlb_dropin_fail_no_exception;
-       atomic_long_t tfh_stale_on_fault;
-       atomic_long_t mmu_invalidate_range;
-       atomic_long_t mmu_invalidate_page;
-       atomic_long_t flush_tlb;
-       atomic_long_t flush_tlb_gru;
-       atomic_long_t flush_tlb_gru_tgh;
-       atomic_long_t flush_tlb_gru_zero_asid;
-
-       atomic_long_t copy_gpa;
-       atomic_long_t read_gpa;
-
-       atomic_long_t mesq_receive;
-       atomic_long_t mesq_receive_none;
-       atomic_long_t mesq_send;
-       atomic_long_t mesq_send_failed;
-       atomic_long_t mesq_noop;
-       atomic_long_t mesq_send_unexpected_error;
-       atomic_long_t mesq_send_lb_overflow;
-       atomic_long_t mesq_send_qlimit_reached;
-       atomic_long_t mesq_send_amo_nacked;
-       atomic_long_t mesq_send_put_nacked;
-       atomic_long_t mesq_page_overflow;
-       atomic_long_t mesq_qf_locked;
-       atomic_long_t mesq_qf_noop_not_full;
-       atomic_long_t mesq_qf_switch_head_failed;
-       atomic_long_t mesq_qf_unexpected_error;
-       atomic_long_t mesq_noop_unexpected_error;
-       atomic_long_t mesq_noop_lb_overflow;
-       atomic_long_t mesq_noop_qlimit_reached;
-       atomic_long_t mesq_noop_amo_nacked;
-       atomic_long_t mesq_noop_put_nacked;
-       atomic_long_t mesq_noop_page_overflow;
+       atomic_long_unchecked_t vdata_alloc;
+       atomic_long_unchecked_t vdata_free;
+       atomic_long_unchecked_t gts_alloc;
+       atomic_long_unchecked_t gts_free;
+       atomic_long_unchecked_t gms_alloc;
+       atomic_long_unchecked_t gms_free;
+       atomic_long_unchecked_t gts_double_allocate;
+       atomic_long_unchecked_t assign_context;
+       atomic_long_unchecked_t assign_context_failed;
+       atomic_long_unchecked_t free_context;
+       atomic_long_unchecked_t load_user_context;
+       atomic_long_unchecked_t load_kernel_context;
+       atomic_long_unchecked_t lock_kernel_context;
+       atomic_long_unchecked_t unlock_kernel_context;
+       atomic_long_unchecked_t steal_user_context;
+       atomic_long_unchecked_t steal_kernel_context;
+       atomic_long_unchecked_t steal_context_failed;
+       atomic_long_unchecked_t nopfn;
+       atomic_long_unchecked_t asid_new;
+       atomic_long_unchecked_t asid_next;
+       atomic_long_unchecked_t asid_wrap;
+       atomic_long_unchecked_t asid_reuse;
+       atomic_long_unchecked_t intr;
+       atomic_long_unchecked_t intr_cbr;
+       atomic_long_unchecked_t intr_tfh;
+       atomic_long_unchecked_t intr_spurious;
+       atomic_long_unchecked_t intr_mm_lock_failed;
+       atomic_long_unchecked_t call_os;
+       atomic_long_unchecked_t call_os_wait_queue;
+       atomic_long_unchecked_t user_flush_tlb;
+       atomic_long_unchecked_t user_unload_context;
+       atomic_long_unchecked_t user_exception;
+       atomic_long_unchecked_t set_context_option;
+       atomic_long_unchecked_t check_context_retarget_intr;
+       atomic_long_unchecked_t check_context_unload;
+       atomic_long_unchecked_t tlb_dropin;
+       atomic_long_unchecked_t tlb_preload_page;
+       atomic_long_unchecked_t tlb_dropin_fail_no_asid;
+       atomic_long_unchecked_t tlb_dropin_fail_upm;
+       atomic_long_unchecked_t tlb_dropin_fail_invalid;
+       atomic_long_unchecked_t tlb_dropin_fail_range_active;
+       atomic_long_unchecked_t tlb_dropin_fail_idle;
+       atomic_long_unchecked_t tlb_dropin_fail_fmm;
+       atomic_long_unchecked_t tlb_dropin_fail_no_exception;
+       atomic_long_unchecked_t tfh_stale_on_fault;
+       atomic_long_unchecked_t mmu_invalidate_range;
+       atomic_long_unchecked_t mmu_invalidate_page;
+       atomic_long_unchecked_t flush_tlb;
+       atomic_long_unchecked_t flush_tlb_gru;
+       atomic_long_unchecked_t flush_tlb_gru_tgh;
+       atomic_long_unchecked_t flush_tlb_gru_zero_asid;
+
+       atomic_long_unchecked_t copy_gpa;
+       atomic_long_unchecked_t read_gpa;
+
+       atomic_long_unchecked_t mesq_receive;
+       atomic_long_unchecked_t mesq_receive_none;
+       atomic_long_unchecked_t mesq_send;
+       atomic_long_unchecked_t mesq_send_failed;
+       atomic_long_unchecked_t mesq_noop;
+       atomic_long_unchecked_t mesq_send_unexpected_error;
+       atomic_long_unchecked_t mesq_send_lb_overflow;
+       atomic_long_unchecked_t mesq_send_qlimit_reached;
+       atomic_long_unchecked_t mesq_send_amo_nacked;
+       atomic_long_unchecked_t mesq_send_put_nacked;
+       atomic_long_unchecked_t mesq_page_overflow;
+       atomic_long_unchecked_t mesq_qf_locked;
+       atomic_long_unchecked_t mesq_qf_noop_not_full;
+       atomic_long_unchecked_t mesq_qf_switch_head_failed;
+       atomic_long_unchecked_t mesq_qf_unexpected_error;
+       atomic_long_unchecked_t mesq_noop_unexpected_error;
+       atomic_long_unchecked_t mesq_noop_lb_overflow;
+       atomic_long_unchecked_t mesq_noop_qlimit_reached;
+       atomic_long_unchecked_t mesq_noop_amo_nacked;
+       atomic_long_unchecked_t mesq_noop_put_nacked;
+       atomic_long_unchecked_t mesq_noop_page_overflow;
 
 };
 
@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
        tghop_invalidate, mcsop_last};
 
 struct mcs_op_statistic {
-       atomic_long_t   count;
-       atomic_long_t   total;
+       atomic_long_unchecked_t count;
+       atomic_long_unchecked_t total;
        unsigned long   max;
 };
 
@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
 
 #define STAT(id)       do {                                            \
                                if (gru_options & OPT_STATS)            \
-                                       atomic_long_inc(&gru_stats.id); \
+                                       atomic_long_inc_unchecked(&gru_stats.id);       \
                        } while (0)
 
 #ifdef CONFIG_SGI_GRU_DEBUG
index c862cd4583cc93694747e191f5e3537e5767bfa5..0d176fed867dda777df205c0478a7ebb3bd5c46f 100644 (file)
@@ -288,7 +288,7 @@ struct xpc_interface {
                                        xpc_notify_func, void *);
        void (*received) (short, int, void *);
        enum xp_retval (*partid_to_nasids) (short, void *);
-};
+} __no_const;
 
 extern struct xpc_interface xpc_interface;
 
index 01be66d02ca8ce52c84b809fa55a7aeb6b219bc2..e3a0c7e22bc7be44c71a4ed3b20265964f2b4597 100644 (file)
@@ -78,13 +78,13 @@ xpc_notloaded(void)
 }
 
 struct xpc_interface xpc_interface = {
-       (void (*)(int))xpc_notloaded,
-       (void (*)(int))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-       (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
+       .connect = (void (*)(int))xpc_notloaded,
+       .disconnect = (void (*)(int))xpc_notloaded,
+       .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
+       .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
                           void *))xpc_notloaded,
-       (void (*)(short, int, void *))xpc_notloaded,
-       (enum xp_retval(*)(short, void *))xpc_notloaded
+       .received = (void (*)(short, int, void *))xpc_notloaded,
+       .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
 };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
index b94d5f767703bcd083e9ccd43659b5db0777a797..7f494c516af09f23f1167cfadde092ea30abe726 100644 (file)
@@ -835,6 +835,7 @@ struct xpc_arch_operations {
        void (*received_payload) (struct xpc_channel *, void *);
        void (*notify_senders_of_disconnect) (struct xpc_channel *);
 };
+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
 
 /* struct xpc_partition act_state values (for XPC HB) */
 
@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
 /* found in xpc_main.c */
 extern struct device *xpc_part;
 extern struct device *xpc_chan;
-extern struct xpc_arch_operations xpc_arch_ops;
+extern xpc_arch_operations_no_const xpc_arch_ops;
 extern int xpc_disengage_timelimit;
 extern int xpc_disengage_timedout;
 extern int xpc_activate_IRQ_rcvd;
index 82dc5748f873b72ea355b517c606d59dc6235c31..8539ab27e9450289f394bdc6677ef93bd8e5aa67 100644 (file)
@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
        .notifier_call = xpc_system_die,
 };
 
-struct xpc_arch_operations xpc_arch_ops;
+xpc_arch_operations_no_const xpc_arch_ops;
 
 /*
  * Timer function to enforce the timelimit on the partition disengage.
@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
 
                if (((die_args->trapnr == X86_TRAP_MF) ||
                     (die_args->trapnr == X86_TRAP_XF)) &&
-                   !user_mode_vm(die_args->regs))
+                   !user_mode(die_args->regs))
                        xpc_die_deactivate();
 
                break;
index 4409d79ed650ee2a2de33475f0ecc686984ed79a..d7766d01cbecbcff442b6a85f6b9c1a583a9c756 100644 (file)
@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
        if (idata->ic.postsleep_min_us)
                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 
-       if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+       if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
                err = -EFAULT;
                goto cmd_rel_host;
        }
index 0d0f7a271d635e711b64a5f1721b61828faad7df..45b8d600240e5a22bf9447fc27c158cd1c936da5 100644 (file)
@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
        int             (*parse_dt)(struct dw_mci *host);
        int             (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
                                        struct dw_mci_tuning_data *tuning_data);
-};
+} __do_const;
 #endif /* _DW_MMC_H_ */
index 8232e9a02d407ca96495704fd266f02b5fd6fc8e..777600609fd74be85a2ec8a95af6d0f19e778250 100644 (file)
@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
        mmc->caps |= MMC_CAP_CMD23;
 
        if (variant->busy_detect) {
-               mmci_ops.card_busy = mmci_card_busy;
+               pax_open_kernel();
+               *(void **)&mmci_ops.card_busy = mmci_card_busy;
+               pax_close_kernel();
                mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
                mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
                mmc->max_busy_timeout = 0;
index 7c71dcdcba8bdc4d5e59ff124e9f793f1dbf296d..74cb746bcdd0d57b9d2693f608cbd2948a48df48 100644 (file)
@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 
        if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
                dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
-               omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
+               pax_open_kernel();
+               *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
+               pax_close_kernel();
        }
 
        pm_runtime_enable(host->dev);
index af1f7c0f95450e94a923146f4d53240b7ba3e821..00d368ad59bfb6aa599ccff52e66174d7288678c 100644 (file)
@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                host->mmc->caps |= MMC_CAP_1_8V_DDR;
        }
 
-       if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
-               sdhci_esdhc_ops.platform_execute_tuning =
+       if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+               pax_open_kernel();
+               *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
                                        esdhc_executing_tuning;
+               pax_close_kernel();
+       }
 
        if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
                writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
index c45b8932d8438086f36661b59aac2b74a2dc56f1..fba0144ef7f3aa130c8f9d7e82bab39d584b3a32 100644 (file)
@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
         * we can use overriding functions instead of default.
         */
        if (sc->no_divider) {
-               sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
-               sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
-               sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+               pax_open_kernel();
+               *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+               *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+               *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+               pax_close_kernel();
        }
 
        /* It supports additional host capabilities if needed */
index 423666b51efb990fab17827c0a850784cc887779..81ff5ebadda6bab722dfb047b410bc81baf91326 100644 (file)
@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
        size_t   totlen = 0, thislen;
        int      ret = 0;
        size_t   buflen = 0;
-       static char *buffer;
+       char *buffer;
 
        if (!ECCBUF_SIZE) {
                /* We should fall back to a general writev implementation.
index b3b7ca1bafb807f8828b3af853aa7886b87d7e90..5dd4634d82b84422dafedf01f9a183fcd5ad9470 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 
 #include "denali.h"
 
index 4f3851a24bb2df62cc749da60510ecd8aca0bfc7..f477a23c4dd41b7b1d5f1312e18a0997a71ff556 100644 (file)
@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
 
        /* first try to map the upper buffer directly */
        if (virt_addr_valid(this->upper_buf) &&
-               !object_is_on_stack(this->upper_buf)) {
+               !object_starts_on_stack(this->upper_buf)) {
                sg_init_one(sgl, this->upper_buf, this->upper_len);
                ret = dma_map_sg(this->dev, sgl, 1, dr);
                if (ret == 0)
index 51b9d6af307f616193ebc006b492a5fecedd535b..52af9a7c40b3517d4a8ea325b7a2adf2af759848 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/errno.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nftl.h>
index c23184a47fc4ea6c1a948ccb4d05f19d84059c4c..4115c419be751a5a081aaac241c2589deed05698 100644 (file)
@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
 #define SM_CIS_VENDOR_OFFSET 0x59
 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
 {
-       struct attribute_group *attr_group;
+       attribute_group_no_const *attr_group;
        struct attribute **attributes;
        struct sm_sysfs_attribute *vendor_attribute;
        char *vendor;
index 7b11243660113484f59d97e2b3ffc9d5b1cb5d98..b3278a301587bce7c709141fdb90f178d99d9d12 100644 (file)
@@ -585,7 +585,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-struct rtnl_link_ops bond_link_ops __read_mostly = {
+struct rtnl_link_ops bond_link_ops = {
        .kind                   = "bond",
        .priv_size              = sizeof(struct bonding),
        .setup                  = bond_setup,
index b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f..80bba38a438c6db526b37610f434427f9c337f57 100644 (file)
@@ -1444,7 +1444,7 @@ err:
        return -ENODEV;
 }
 
-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
+static struct rtnl_link_ops caif_hsi_link_ops = {
        .kind           = "cfhsi",
        .priv_size      = sizeof(struct cfhsi),
        .setup          = cfhsi_setup,
index 98d73aab52fe962888a675d1293ae69f9731f7d0..63ef9da206adf52a7b6fc6ffb648626b0cfceea6 100644 (file)
@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
 
 config CAN_FLEXCAN
        tristate "Support for Freescale FLEXCAN based chips"
-       depends on ARM || PPC
+       depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
        ---help---
          Say Y here if you want to support for Freescale FlexCAN.
 
index 847c1f813261d92a9188983141a79e2fddfcb3e7..3bed6071f929e6a828b62d82ae7d9ebf49619191 100644 (file)
@@ -578,6 +578,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
 
@@ -602,6 +606,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       skb_reset_transport_header(skb);
+
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
 
@@ -950,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
        return -EOPNOTSUPP;
 }
 
-static struct rtnl_link_ops can_link_ops __read_mostly = {
+static struct rtnl_link_ops can_link_ops = {
        .kind           = "can",
        .maxtype        = IFLA_CAN_MAX,
        .policy         = can_policy,
index 674f367087c54ac168d2c0283b1361a1c98d3696..ec3a31f4366b2cbd502d5538c116ac5b14d787d2 100644 (file)
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
        dev->destructor         = free_netdev;
 }
 
-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
+static struct rtnl_link_ops vcan_link_ops = {
        .kind   = "vcan",
        .setup  = vcan_setup,
 };
index 49adbf1b7574211dcd97db2c8cc970e427934c87..fff7ff81d8713c16c686883cb0ff87abdb704275 100644 (file)
@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
+static struct rtnl_link_ops dummy_link_ops = {
        .kind           = DRV_NAME,
        .setup          = dummy_setup,
        .validate       = dummy_validate,
index 0443654f03398d1f50fb45e2f0e5524fd7be1330..4f0aa188a0ed3899f3fe096c7ff2ac41271fe9bc 100644 (file)
@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
        if (ax->plat->reg_offsets)
                ei_local->reg_offset = ax->plat->reg_offsets;
        else {
+               resource_size_t _mem_size = mem_size;
+               do_div(_mem_size, 0x18);
                ei_local->reg_offset = ax->reg_offsets;
                for (ret = 0; ret < 0x18; ret++)
-                       ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
+                       ax->reg_offsets[ret] = _mem_size * ret;
        }
 
        if (!request_mem_region(mem->start, mem_size, pdev->name)) {
index 760c72c6e2acd50ba8472e4b4dd77170c2c381d6..a99728cf3c1f0e5c1e6e275e6c88c6bfd5347c17 100644 (file)
@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
        return 0;
 }
 
-static struct net_device_ops altera_tse_netdev_ops = {
+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
        .ndo_open               = tse_open,
        .ndo_stop               = tse_shutdown,
        .ndo_start_xmit         = tse_start_xmit,
@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
        ndev->netdev_ops = &altera_tse_netdev_ops;
        altera_tse_set_ethtool_ops(ndev);
 
+       pax_open_kernel();
        altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
 
        if (priv->hash_filter)
                altera_tse_netdev_ops.ndo_set_rx_mode =
                        tse_set_rx_mode_hashfilter;
+       pax_close_kernel();
 
        /* Scatter/gather IO is not supported,
         * so it is turned off
index 29a09271b64a39b71a46ac1d5beb5a6472160509..5a348e24904817f41dd500bb5bb476d32b5f7f49 100644 (file)
@@ -1122,14 +1122,14 @@ do {                                                                    \
  * operations, everything works on mask values.
  */
 #define XMDIO_READ(_pdata, _mmd, _reg)                                 \
-       ((_pdata)->hw_if.read_mmd_regs((_pdata), 0,                     \
+       ((_pdata)->hw_if->read_mmd_regs((_pdata), 0,                    \
                MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
 
 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask)                     \
        (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
 
 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val)                          \
-       ((_pdata)->hw_if.write_mmd_regs((_pdata), 0,                    \
+       ((_pdata)->hw_if->write_mmd_regs((_pdata), 0,                   \
                MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
 
 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val)              \
index 8a50b01c2686292b06e97dae11c21c57095f6844..39c1ad0f449dfc7138758842cf7fa9334b95ab8d 100644 (file)
@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
 
        memcpy(pdata->ets, ets, sizeof(*pdata->ets));
 
-       pdata->hw_if.config_dcb_tc(pdata);
+       pdata->hw_if->config_dcb_tc(pdata);
 
        return 0;
 }
@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
 
        memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
 
-       pdata->hw_if.config_dcb_pfc(pdata);
+       pdata->hw_if->config_dcb_pfc(pdata);
 
        return 0;
 }
index a50891f521978ff67d55959fbfab1b63da762f4c..b26fe24076ece36c92f576b1842e6287cf48b0ec 100644 (file)
@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
 
 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_ring_data *rdata;
@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
 
 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_ring_desc *rdesc;
@@ -624,7 +624,7 @@ err_out:
 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        int i;
@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
        DBGPR("<--xgbe_realloc_rx_buffer\n");
 }
 
-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
-{
-       DBGPR("-->xgbe_init_function_ptrs_desc\n");
-
-       desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
-       desc_if->free_ring_resources = xgbe_free_ring_resources;
-       desc_if->map_tx_skb = xgbe_map_tx_skb;
-       desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
-       desc_if->unmap_rdata = xgbe_unmap_rdata;
-       desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
-       desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
-
-       DBGPR("<--xgbe_init_function_ptrs_desc\n");
-}
+const struct xgbe_desc_if default_xgbe_desc_if = {
+       .alloc_ring_resources = xgbe_alloc_ring_resources,
+       .free_ring_resources = xgbe_free_ring_resources,
+       .map_tx_skb = xgbe_map_tx_skb,
+       .realloc_rx_buffer = xgbe_realloc_rx_buffer,
+       .unmap_rdata = xgbe_unmap_rdata,
+       .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
+       .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
+};
index 4c66cd1d1e604f19a36e65fc88b6392ecfa66d3b..1a20aab0867118bfa46e6b05d9859f9d8f0592e4 100644 (file)
@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
 
 static int xgbe_init(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        int ret;
 
        DBGPR("-->xgbe_init\n");
@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
        return 0;
 }
 
-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
-{
-       DBGPR("-->xgbe_init_function_ptrs\n");
-
-       hw_if->tx_complete = xgbe_tx_complete;
+const struct xgbe_hw_if default_xgbe_hw_if = {
+       .tx_complete = xgbe_tx_complete,
 
-       hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
-       hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
-       hw_if->add_mac_addresses = xgbe_add_mac_addresses;
-       hw_if->set_mac_address = xgbe_set_mac_address;
+       .set_promiscuous_mode = xgbe_set_promiscuous_mode,
+       .set_all_multicast_mode = xgbe_set_all_multicast_mode,
+       .add_mac_addresses = xgbe_add_mac_addresses,
+       .set_mac_address = xgbe_set_mac_address,
 
-       hw_if->enable_rx_csum = xgbe_enable_rx_csum;
-       hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+       .enable_rx_csum = xgbe_enable_rx_csum,
+       .disable_rx_csum = xgbe_disable_rx_csum,
 
-       hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
-       hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
-       hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
-       hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
-       hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
+       .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
+       .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
+       .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
+       .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
+       .update_vlan_hash_table = xgbe_update_vlan_hash_table,
 
-       hw_if->read_mmd_regs = xgbe_read_mmd_regs;
-       hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+       .read_mmd_regs = xgbe_read_mmd_regs,
+       .write_mmd_regs = xgbe_write_mmd_regs,
 
-       hw_if->set_gmii_speed = xgbe_set_gmii_speed;
-       hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
-       hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+       .set_gmii_speed = xgbe_set_gmii_speed,
+       .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
+       .set_xgmii_speed = xgbe_set_xgmii_speed,
 
-       hw_if->enable_tx = xgbe_enable_tx;
-       hw_if->disable_tx = xgbe_disable_tx;
-       hw_if->enable_rx = xgbe_enable_rx;
-       hw_if->disable_rx = xgbe_disable_rx;
+       .enable_tx = xgbe_enable_tx,
+       .disable_tx = xgbe_disable_tx,
+       .enable_rx = xgbe_enable_rx,
+       .disable_rx = xgbe_disable_rx,
 
-       hw_if->powerup_tx = xgbe_powerup_tx;
-       hw_if->powerdown_tx = xgbe_powerdown_tx;
-       hw_if->powerup_rx = xgbe_powerup_rx;
-       hw_if->powerdown_rx = xgbe_powerdown_rx;
+       .powerup_tx = xgbe_powerup_tx,
+       .powerdown_tx = xgbe_powerdown_tx,
+       .powerup_rx = xgbe_powerup_rx,
+       .powerdown_rx = xgbe_powerdown_rx,
 
-       hw_if->dev_xmit = xgbe_dev_xmit;
-       hw_if->dev_read = xgbe_dev_read;
-       hw_if->enable_int = xgbe_enable_int;
-       hw_if->disable_int = xgbe_disable_int;
-       hw_if->init = xgbe_init;
-       hw_if->exit = xgbe_exit;
+       .dev_xmit = xgbe_dev_xmit,
+       .dev_read = xgbe_dev_read,
+       .enable_int = xgbe_enable_int,
+       .disable_int = xgbe_disable_int,
+       .init = xgbe_init,
+       .exit = xgbe_exit,
 
        /* Descriptor related Sequences have to be initialized here */
-       hw_if->tx_desc_init = xgbe_tx_desc_init;
-       hw_if->rx_desc_init = xgbe_rx_desc_init;
-       hw_if->tx_desc_reset = xgbe_tx_desc_reset;
-       hw_if->rx_desc_reset = xgbe_rx_desc_reset;
-       hw_if->is_last_desc = xgbe_is_last_desc;
-       hw_if->is_context_desc = xgbe_is_context_desc;
-       hw_if->tx_start_xmit = xgbe_tx_start_xmit;
+       .tx_desc_init = xgbe_tx_desc_init,
+       .rx_desc_init = xgbe_rx_desc_init,
+       .tx_desc_reset = xgbe_tx_desc_reset,
+       .rx_desc_reset = xgbe_rx_desc_reset,
+       .is_last_desc = xgbe_is_last_desc,
+       .is_context_desc = xgbe_is_context_desc,
+       .tx_start_xmit = xgbe_tx_start_xmit,
 
        /* For FLOW ctrl */
-       hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
-       hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+       .config_tx_flow_control = xgbe_config_tx_flow_control,
+       .config_rx_flow_control = xgbe_config_rx_flow_control,
 
        /* For RX coalescing */
-       hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
-       hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
-       hw_if->usec_to_riwt = xgbe_usec_to_riwt;
-       hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+       .config_rx_coalesce = xgbe_config_rx_coalesce,
+       .config_tx_coalesce = xgbe_config_tx_coalesce,
+       .usec_to_riwt = xgbe_usec_to_riwt,
+       .riwt_to_usec = xgbe_riwt_to_usec,
 
        /* For RX and TX threshold config */
-       hw_if->config_rx_threshold = xgbe_config_rx_threshold;
-       hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+       .config_rx_threshold = xgbe_config_rx_threshold,
+       .config_tx_threshold = xgbe_config_tx_threshold,
 
        /* For RX and TX Store and Forward Mode config */
-       hw_if->config_rsf_mode = xgbe_config_rsf_mode;
-       hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+       .config_rsf_mode = xgbe_config_rsf_mode,
+       .config_tsf_mode = xgbe_config_tsf_mode,
 
        /* For TX DMA Operating on Second Frame config */
-       hw_if->config_osp_mode = xgbe_config_osp_mode;
+       .config_osp_mode = xgbe_config_osp_mode,
 
        /* For RX and TX PBL config */
-       hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
-       hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
-       hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
-       hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
-       hw_if->config_pblx8 = xgbe_config_pblx8;
+       .config_rx_pbl_val = xgbe_config_rx_pbl_val,
+       .get_rx_pbl_val = xgbe_get_rx_pbl_val,
+       .config_tx_pbl_val = xgbe_config_tx_pbl_val,
+       .get_tx_pbl_val = xgbe_get_tx_pbl_val,
+       .config_pblx8 = xgbe_config_pblx8,
 
        /* For MMC statistics support */
-       hw_if->tx_mmc_int = xgbe_tx_mmc_int;
-       hw_if->rx_mmc_int = xgbe_rx_mmc_int;
-       hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+       .tx_mmc_int = xgbe_tx_mmc_int,
+       .rx_mmc_int = xgbe_rx_mmc_int,
+       .read_mmc_stats = xgbe_read_mmc_stats,
 
        /* For PTP config */
-       hw_if->config_tstamp = xgbe_config_tstamp;
-       hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
-       hw_if->set_tstamp_time = xgbe_set_tstamp_time;
-       hw_if->get_tstamp_time = xgbe_get_tstamp_time;
-       hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+       .config_tstamp = xgbe_config_tstamp,
+       .update_tstamp_addend = xgbe_update_tstamp_addend,
+       .set_tstamp_time = xgbe_set_tstamp_time,
+       .get_tstamp_time = xgbe_get_tstamp_time,
+       .get_tx_tstamp = xgbe_get_tx_tstamp,
 
        /* For Data Center Bridging config */
-       hw_if->config_dcb_tc = xgbe_config_dcb_tc;
-       hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+       .config_dcb_tc = xgbe_config_dcb_tc,
+       .config_dcb_pfc = xgbe_config_dcb_pfc,
 
        /* For Receive Side Scaling */
-       hw_if->enable_rss = xgbe_enable_rss;
-       hw_if->disable_rss = xgbe_disable_rss;
-       hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
-       hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
-
-       DBGPR("<--xgbe_init_function_ptrs\n");
-}
+       .enable_rss = xgbe_enable_rss,
+       .disable_rss = xgbe_disable_rss,
+       .set_rss_hash_key = xgbe_set_rss_hash_key,
+       .set_rss_lookup_table = xgbe_set_rss_lookup_table,
+};
index e5ffb2ccb67d1d053d47f0f6cc219e86fb6b086b..e56d30b027c91deb07d1a0a949100987faeac097 100644 (file)
@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
                 * support, tell it now
                 */
                if (ring->tx.xmit_more)
-                       pdata->hw_if.tx_start_xmit(channel, ring);
+                       pdata->hw_if->tx_start_xmit(channel, ring);
 
                return NETDEV_TX_BUSY;
        }
@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
 
 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        enum xgbe_int int_id;
        unsigned int i;
@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
 
 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        enum xgbe_int int_id;
        unsigned int i;
@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
 static irqreturn_t xgbe_isr(int irq, void *data)
 {
        struct xgbe_prv_data *pdata = data;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        unsigned int dma_isr, dma_ch_isr;
        unsigned int mac_isr, mac_tssr;
@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
 
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
 
        DBGPR("-->xgbe_init_tx_coalesce\n");
 
@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
 
 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
 
        DBGPR("-->xgbe_init_rx_coalesce\n");
 
@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
 
 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_ring_data *rdata;
@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
 
 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_ring_data *rdata;
@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
 static void xgbe_adjust_link(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct phy_device *phydev = pdata->phydev;
        int new_state = 0;
 
@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned long flags;
 
        DBGPR("-->xgbe_powerdown\n");
@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned long flags;
 
        DBGPR("-->xgbe_powerup\n");
@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
 static int xgbe_start(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct net_device *netdev = pdata->netdev;
 
        DBGPR("-->xgbe_start\n");
@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
 static void xgbe_stop(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_channel *channel;
        struct net_device *netdev = pdata->netdev;
        struct netdev_queue *txq;
@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
 {
        struct xgbe_channel *channel;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned int i;
 
        DBGPR("-->xgbe_restart_dev\n");
@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
                return -ERANGE;
        }
 
-       pdata->hw_if.config_tstamp(pdata, mac_tscr);
+       pdata->hw_if->config_tstamp(pdata, mac_tscr);
 
        memcpy(&pdata->tstamp_config, &config, sizeof(config));
 
@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
 static int xgbe_open(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_channel *channel = NULL;
        unsigned int i = 0;
        int ret;
@@ -1400,8 +1400,8 @@ err_phy_init:
 static int xgbe_close(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_channel *channel;
        unsigned int i;
 
@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_channel *channel;
        struct xgbe_ring *ring;
        struct xgbe_packet_data *packet;
@@ -1518,7 +1518,7 @@ tx_netdev_return:
 static void xgbe_set_rx_mode(struct net_device *netdev)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned int pr_mode, am_mode;
 
        DBGPR("-->xgbe_set_rx_mode\n");
@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct sockaddr *saddr = addr;
 
        DBGPR("-->xgbe_set_mac_address\n");
@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
 
        DBGPR("-->%s\n", __func__);
 
-       pdata->hw_if.read_mmc_stats(pdata);
+       pdata->hw_if->read_mmc_stats(pdata);
 
        s->rx_packets = pstats->rxframecount_gb;
        s->rx_bytes = pstats->rxoctetcount_gb;
@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
                                u16 vid)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
 
        DBGPR("-->%s\n", __func__);
 
@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
                                 u16 vid)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
 
        DBGPR("-->%s\n", __func__);
 
@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
                             netdev_features_t features)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
        int ret = 0;
 
@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
 static void xgbe_rx_refresh(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
 
@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
+       struct xgbe_desc_if *desc_if = pdata->desc_if;
        struct xgbe_ring *ring = channel->tx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
@@ -1891,7 +1891,7 @@ unlock:
 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_packet_data *packet;
index ebf489351555b19411d055a12c9812c08f73dd1c..28108c7d1349c906b14092f915ad7cba08253566 100644 (file)
@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
 
        DBGPR("-->%s\n", __func__);
 
-       pdata->hw_if.read_mmc_stats(pdata);
+       pdata->hw_if->read_mmc_stats(pdata);
        for (i = 0; i < XGBE_STATS_COUNT; i++) {
                stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
                *data++ = *(u64 *)stat;
@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
                             struct ethtool_coalesce *ec)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned int riwt;
 
        DBGPR("-->xgbe_get_coalesce\n");
@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
                             struct ethtool_coalesce *ec)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        unsigned int rx_frames, rx_riwt, rx_usecs;
        unsigned int tx_frames, tx_usecs;
 
index dbd3850b8b0a8f61053026a887e5a90f7a3aef53..4e31b38882e490665a81043359a6bd26331638a1 100644 (file)
@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
        DBGPR("<--xgbe_default_config\n");
 }
 
-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
-{
-       xgbe_init_function_ptrs_dev(&pdata->hw_if);
-       xgbe_init_function_ptrs_desc(&pdata->desc_if);
-}
-
 static int xgbe_probe(struct platform_device *pdev)
 {
        struct xgbe_prv_data *pdata;
@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
        netdev->base_addr = (unsigned long)pdata->xgmac_regs;
 
        /* Set all the function pointers */
-       xgbe_init_all_fptrs(pdata);
-       hw_if = &pdata->hw_if;
-       desc_if = &pdata->desc_if;
+       hw_if = pdata->hw_if = &default_xgbe_hw_if;
+       desc_if = pdata->desc_if = &default_xgbe_desc_if;
 
        /* Issue software reset to device */
        hw_if->exit(pdata);
index 363b210560f332e08837e9bfb9426dcea62707a4..b2413896e6d05c03b03a8f984603f5cd9e0a547b 100644 (file)
 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
 {
        struct xgbe_prv_data *pdata = mii->priv;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        int mmd_data;
 
        DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
                           u16 mmd_val)
 {
        struct xgbe_prv_data *pdata = mii->priv;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_hw_if *hw_if = pdata->hw_if;
        int mmd_data = mmd_val;
 
        DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
index a1bf9d1cdae1e9d5dc666cf7b37f6320d1b2adff..84adcab3eaf3f036929bc5efa3abdde457aa8732 100644 (file)
@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
                                                   tstamp_cc);
        u64 nsec;
 
-       nsec = pdata->hw_if.get_tstamp_time(pdata);
+       nsec = pdata->hw_if->get_tstamp_time(pdata);
 
        return nsec;
 }
@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
 
        spin_lock_irqsave(&pdata->tstamp_lock, flags);
 
-       pdata->hw_if.update_tstamp_addend(pdata, addend);
+       pdata->hw_if->update_tstamp_addend(pdata, addend);
 
        spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
 
index f9ec762ac3f037e780c3f44f70fec2e5bf06577b..988c969dfe8ebd14551647b5e83c7a22f1eb4dc7 100644 (file)
@@ -668,8 +668,8 @@ struct xgbe_prv_data {
        int dev_irq;
        unsigned int per_channel_irq;
 
-       struct xgbe_hw_if hw_if;
-       struct xgbe_desc_if desc_if;
+       const struct xgbe_hw_if *hw_if;
+       const struct xgbe_desc_if *desc_if;
 
        /* AXI DMA settings */
        unsigned int axdomain;
@@ -787,6 +787,9 @@ struct xgbe_prv_data {
 #endif
 };
 
+extern const struct xgbe_hw_if default_xgbe_hw_if;
+extern const struct xgbe_desc_if default_xgbe_desc_if;
+
 /* Function prototypes*/
 
 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
index adcacda7af7b10e70b053821acc1e2dfe722732a..fa6e0ae28f8d410eda0be5eea379880fa5d3f431 100644 (file)
@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
 {
        /* RX_MODE controlling object */
-       bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+       bnx2x_init_rx_mode_obj(bp);
 
        /* multicast configuration controlling object */
        bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
index 07cdf9bbffef2ee85ff3d589f33c1a10aea405d5..b08ecc7c01ef09803d149efa45bb1f109254063b 100644 (file)
@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
        return rc;
 }
 
-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-                           struct bnx2x_rx_mode_obj *o)
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
 {
        if (CHIP_IS_E1x(bp)) {
-               o->wait_comp      = bnx2x_empty_rx_mode_wait;
-               o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+               bp->rx_mode_obj.wait_comp      = bnx2x_empty_rx_mode_wait;
+               bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
        } else {
-               o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
-               o->config_rx_mode = bnx2x_set_rx_mode_e2;
+               bp->rx_mode_obj.wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+               bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
        }
 }
 
index 86baecb7c60c41a3cf096ad884efe674fb9221b3..ff3bb46bfe310ae9520b9802c796f2c9ca241c1a 100644 (file)
@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
 
 /********************* RX MODE ****************/
 
-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-                           struct bnx2x_rx_mode_obj *o);
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
 
 /**
  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
index 31c9f829595384cb843e299c6b51054101d8f6c6..e65e986c4e6454653060d84c5c4311d1a98ebcc8 100644 (file)
 #define  CHIPREV_ID_5750_A0             0x4000
 #define  CHIPREV_ID_5750_A1             0x4001
 #define  CHIPREV_ID_5750_A3             0x4003
+#define  CHIPREV_ID_5750_C1             0x4201
 #define  CHIPREV_ID_5750_C2             0x4202
 #define  CHIPREV_ID_5752_A0_HW          0x5000
 #define  CHIPREV_ID_5752_A0             0x6000
index 903466ef41c06ed4f9812b754f245f1c32644b2f..b285864c8a0a30e41e1e72ed489faaba4c4c8729 100644 (file)
@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
 }
 
 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
-       bna_cb_ioceth_enable,
-       bna_cb_ioceth_disable,
-       bna_cb_ioceth_hbfail,
-       bna_cb_ioceth_reset
+       .enable_cbfn = bna_cb_ioceth_enable,
+       .disable_cbfn = bna_cb_ioceth_disable,
+       .hbfail_cbfn = bna_cb_ioceth_hbfail,
+       .reset_cbfn = bna_cb_ioceth_reset
 };
 
 static void bna_attr_init(struct bna_ioceth *ioceth)
index 8cffcdfd5678205e9bf9183e655f462a45233007..aadf0431ecebe1d91fb1c6b566821078276dc033 100644 (file)
@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
  */
 struct l2t_skb_cb {
        arp_failure_handler_func arp_failure_handler;
-};
+} __no_const;
 
 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
 
index ccf3436024bc8ce8ffdc814553469a523db73c92..b720d773117f40e1b8291fc2b23de7ba06be3b74 100644 (file)
@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 
        int i;
        struct adapter *ap = netdev2adap(dev);
-       static const unsigned int *reg_ranges;
+       const unsigned int *reg_ranges;
        int arr_size = 0, buf_size = 0;
 
        if (is_t4(ap->params.chip)) {
index badff181e719692a9a94b2a5ba1c792fc2bf18fc..e15c4ece4d0128a5e0b9716c93dc01f55b828854 100644 (file)
@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        for (i=0; i<ETH_ALEN; i++) {
            tmp.addr[i] = dev->dev_addr[i];
        }
-       if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+       if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
        break;
 
     case DE4X5_SET_HWADDR:           /* Set the hardware address */
@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        spin_lock_irqsave(&lp->lock, flags);
        memcpy(&statbuf, &lp->pktStats, ioc->len);
        spin_unlock_irqrestore(&lp->lock, flags);
-       if (copy_to_user(ioc->data, &statbuf, ioc->len))
+       if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
                return -EFAULT;
        break;
     }
index d48806b5cd8889457f28062336d7c4bf308af6a1..41cd80f1a1a7350c642fa7408ffee81654bf71b7 100644 (file)
@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
 
        if (wrapped)
                newacc += 65536;
-       ACCESS_ONCE(*acc) = newacc;
+       ACCESS_ONCE_RW(*acc) = newacc;
 }
 
 static void populate_erx_stats(struct be_adapter *adapter,
index 6d0c5d5eea6dd21dccd26445ee87addbfb606aa1..55be363df2d6f40a6204914e90b092ab5fa76bbc 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
 #include <net/ip.h>
 
 #include "ftgmac100.h"
index dce5f7b7f7729225d9bb9456a8c5d7e69be3bb2a..243346603295f18fa30a8695470f5efac932ef5c 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
 
 #include "ftmac100.h"
 
index 6d1ec926aa3713a6ebc40fa52aa2ee873f8292f4..4d5d97df5a90cf1c9d3fb7ef5ac4584d39c53bec 100644 (file)
@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
        wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
 
        /* Update the base adjustement value. */
-       ACCESS_ONCE(pf->ptp_base_adj) = incval;
+       ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
        smp_mb(); /* Force the above update. */
 }
 
index 5fd4b5271f9a19b5df9b06ebb8a1b81fbdbe8eb1..87aa34bcc86c8ad5efef0969d5d7aa7a4064073e 100644 (file)
@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
        }
 
        /* update the base incval used to calculate frequency adjustment */
-       ACCESS_ONCE(adapter->base_incval) = incval;
+       ACCESS_ONCE_RW(adapter->base_incval) = incval;
        smp_mb();
 
        /* need lock to prevent incorrect read while modifying cyclecounter */
index e3357bf523df866222bdabdd0ec4c31cb24bb2a7..d4d534899111e07fc9d9b5c368dc80e728ef4f7e 100644 (file)
@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
        wmb();
 
        /* we want to dirty this cache line once */
-       ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
-       ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+       ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
+       ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
 
        netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
index 2bbd01fcb9b019eaeabc1519a98799e65ea1b51a..e8baa6423be267bf60a45da0fdc12f2f3e48f176 100644 (file)
@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
        struct __vxge_hw_fifo *fifo;
        struct vxge_hw_fifo_config *config;
        u32 txdl_size, txdl_per_memblock;
-       struct vxge_hw_mempool_cbs fifo_mp_callback;
+       static struct vxge_hw_mempool_cbs fifo_mp_callback = {
+               .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
+       };
+
        struct __vxge_hw_virtualpath *vpath;
 
        if ((vp == NULL) || (attr == NULL)) {
@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
                goto exit;
        }
 
-       fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
        fifo->mempool =
                __vxge_hw_mempool_create(vpath->hldev,
                        fifo->config->memblock_size,
index 2bb48d57e7a51856225b7cbb7fa7bb0ebca7e510..d1a865d7e5ead7dfc8c36246a565601d2a695294 100644 (file)
@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
                max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
        } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
                ahw->nic_mode = QLCNIC_DEFAULT_MODE;
-               adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+               pax_open_kernel();
+               *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+               pax_close_kernel();
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
                max_sds_rings = QLCNIC_MAX_SDS_RINGS;
                max_tx_rings = QLCNIC_MAX_TX_RINGS;
index be7d7a62cc0d8c885c1f669f6c01f0f3a2259ea0..a8983f86d26bc9475f0ec317994f66ceed8ccfe4 100644 (file)
@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
        case QLCNIC_NON_PRIV_FUNC:
                ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
-               nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+               pax_open_kernel();
+               *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+               pax_close_kernel();
                break;
        case QLCNIC_PRIV_FUNC:
                ahw->op_mode = QLCNIC_PRIV_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
-               nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+               pax_open_kernel();
+               *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+               pax_close_kernel();
                break;
        case QLCNIC_MGMT_FUNC:
                ahw->op_mode = QLCNIC_MGMT_FUNC;
                ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
-               nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+               pax_open_kernel();
+               *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+               pax_close_kernel();
                break;
        default:
                dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
index c9f57fb84b9eb47215f0cc21a680dce46d253e56..208bdc16eb0fd961e5a470e9b7eddb352cfba21b 100644 (file)
@@ -1285,7 +1285,7 @@ flash_temp:
 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
-       static const struct qlcnic_dump_operations *fw_dump_ops;
+       const struct qlcnic_dump_operations *fw_dump_ops;
        struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
        u32 entry_offset, dump, no_entries, buf_offset = 0;
        int i, k, ops_cnt, ops_index, dump_size = 0;
index 2e2cf80e7135831a0f53d1f662f404cded3b45f5..ebc796d7493b673b108f5833fd4b9ab59244100e 100644 (file)
@@ -788,22 +788,22 @@ struct rtl8169_private {
        struct mdio_ops {
                void (*write)(struct rtl8169_private *, int, int);
                int (*read)(struct rtl8169_private *, int);
-       } mdio_ops;
+       } __no_const mdio_ops;
 
        struct pll_power_ops {
                void (*down)(struct rtl8169_private *);
                void (*up)(struct rtl8169_private *);
-       } pll_power_ops;
+       } __no_const pll_power_ops;
 
        struct jumbo_ops {
                void (*enable)(struct rtl8169_private *);
                void (*disable)(struct rtl8169_private *);
-       } jumbo_ops;
+       } __no_const jumbo_ops;
 
        struct csi_ops {
                void (*write)(struct rtl8169_private *, int, int);
                u32 (*read)(struct rtl8169_private *, int);
-       } csi_ops;
+       } __no_const csi_ops;
 
        int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
        int (*get_settings)(struct net_device *, struct ethtool_cmd *);
index 6b861e3de4b0d0655879e5bb5740855d6b9c251d..204ac866a304e4e11d376b465d2a838cd47aae68 100644 (file)
@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
                       ptp->start.dma_addr);
 
        /* Clear flag that signals MC ready */
-       ACCESS_ONCE(*start) = 0;
+       ACCESS_ONCE_RW(*start) = 0;
        rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
                                MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
        EFX_BUG_ON_PARANOID(rc);
index 08c483bd2ec7bd94d5434f9567c75f609ce27d35..2c4a553f7a486b004e7d20f01f32fbae4ec61b47 100644 (file)
@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
 
        writel(value, ioaddr + MMC_CNTRL);
 
-       pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
-                MMC_CNTRL, value);
+//     pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+//              MMC_CNTRL, value);
 }
 
 /* To mask all all interrupts.*/
index 384ca4f4de4a0e6ee6b053440937d96a272c4850..dd7d4f951666467e1f08aeceee8e28ac0ed6f1d2 100644 (file)
@@ -171,7 +171,7 @@ struct rndis_device {
        enum rndis_device_state state;
        bool link_state;
        bool link_change;
-       atomic_t new_req_id;
+       atomic_unchecked_t new_req_id;
 
        spinlock_t request_lock;
        struct list_head req_list;
index ec0c40a8f653cb80e7c8a591db8975b79e4d6066..c9e42eb5ead930702b6201a0fbf0a439c717a3fb 100644 (file)
@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
         * template
         */
        set = &rndis_msg->msg.set_req;
-       set->req_id = atomic_inc_return(&dev->new_req_id);
+       set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
 
        /* Add to the request list */
        spin_lock_irqsave(&dev->request_lock, flags);
@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
 
        /* Setup the rndis set */
        halt = &request->request_msg.msg.halt_req;
-       halt->req_id = atomic_inc_return(&dev->new_req_id);
+       halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
 
        /* Ignore return since this msg is optional. */
        rndis_filter_send_request(dev, request);
index 34f846b4bd0574a8168d669c408181e1086d3386..4a0d5b17523e4f4060f13a288d8e4c31165130d6 100644 (file)
@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
+static struct rtnl_link_ops ifb_link_ops = {
        .kind           = "ifb",
        .priv_size      = sizeof(struct ifb_private),
        .setup          = ifb_setup,
index 612e0731142d29aef8cca9e882ef66e237fb4960..a9f5eda3ce218ca94feb08df71e61bb566814118 100644 (file)
@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
 free_nskb:
        kfree_skb(nskb);
 err:
-       atomic_long_inc(&skb->dev->rx_dropped);
+       atomic_long_inc_unchecked(&skb->dev->rx_dropped);
 }
 
 static void macvlan_flush_sources(struct macvlan_port *port,
@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
 int macvlan_link_register(struct rtnl_link_ops *ops)
 {
        /* common fields */
-       ops->priv_size          = sizeof(struct macvlan_dev);
-       ops->validate           = macvlan_validate;
-       ops->maxtype            = IFLA_MACVLAN_MAX;
-       ops->policy             = macvlan_policy;
-       ops->changelink         = macvlan_changelink;
-       ops->get_size           = macvlan_get_size;
-       ops->fill_info          = macvlan_fill_info;
+       pax_open_kernel();
+       *(size_t *)&ops->priv_size      = sizeof(struct macvlan_dev);
+       *(void **)&ops->validate        = macvlan_validate;
+       *(int *)&ops->maxtype           = IFLA_MACVLAN_MAX;
+       *(const void **)&ops->policy    = macvlan_policy;
+       *(void **)&ops->changelink      = macvlan_changelink;
+       *(void **)&ops->get_size        = macvlan_get_size;
+       *(void **)&ops->fill_info       = macvlan_fill_info;
+       pax_close_kernel();
 
        return rtnl_link_register(ops);
 };
@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block macvlan_notifier_block __read_mostly = {
+static struct notifier_block macvlan_notifier_block = {
        .notifier_call  = macvlan_device_event,
 };
 
index 4d050ee0f8723c8479dab1c606a8010b49850041..012f6ddf34a823fae2e02e221eae7bf3b80c5c26 100644 (file)
@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
        dev->tx_queue_len = TUN_READQ_SIZE;
 }
 
-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+static struct rtnl_link_ops macvtap_link_ops = {
        .kind           = "macvtap",
        .setup          = macvtap_setup,
        .newlink        = macvtap_newlink,
@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
 
                ret = 0;
                u = q->flags;
-               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+               if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
                    put_user(u, &ifr->ifr_flags))
                        ret = -EFAULT;
                macvtap_put_vlan(vlan);
@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block macvtap_notifier_block __read_mostly = {
+static struct notifier_block macvtap_notifier_block = {
        .notifier_call  = macvtap_device_event,
 };
 
index 34924dfadd0097608dac20dcf0aced6bb1c9805e..a7473601e89fa354be3c61763f286b7d562b38e8 100644 (file)
@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
+static struct rtnl_link_ops nlmon_link_ops = {
        .kind                   = "nlmon",
        .priv_size              = sizeof(struct nlmon),
        .setup                  = nlmon_setup,
index 3fc91e89f5a564bb36ab251c81ec083fc7cab68d..6c363375fb63d453344894440d944f0ebe8f795f 100644 (file)
@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
  *   zero on success.
  *
  */
-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
                           struct phy_c45_device_ids *c45_ids) {
        int phy_reg;
        int i, reg_addr;
@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
  *   its return value is in turn returned.
  *
  */
-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
                      bool is_c45, struct phy_c45_device_ids *c45_ids)
 {
        int phy_reg;
@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
 {
        struct phy_c45_device_ids c45_ids = {0};
-       u32 phy_id = 0;
+       int phy_id = 0;
        int r;
 
        r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
index af034dba9bd62a693ba81dd04479cd11ffc7f10e..1611c0b2d83fa7635b65173df1b1ecf8f3463990 100644 (file)
@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
        struct ppp_stats stats;
        struct ppp_comp_stats cstats;
-       char *vers;
 
        switch (cmd) {
        case SIOCGPPPSTATS:
@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
 
        case SIOCGPPPVER:
-               vers = PPP_VERSION;
-               if (copy_to_user(addr, vers, strlen(vers) + 1))
+               if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
                        break;
                err = 0;
                break;
index 079f7adfcde5ef0fe7bf4637712ef3ac738c2f1e..b2a2bfa76544717d9872ee391445aaaaa75c6b25 100644 (file)
@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
        register struct tcphdr *thp;
        register struct iphdr *ip;
        register struct cstate *cs;
-       int len, hdrlen;
+       long len, hdrlen;
        unsigned char *cp = icp;
 
        /* We've got a compressed packet; read the change byte */
index 2c087efed4737c91bbc66975c6841ce257c893ac..4859007cd13c34f6f36f8140daf3fb0cf222dd8f 100644 (file)
@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
        return TEAM_DEFAULT_NUM_RX_QUEUES;
 }
 
-static struct rtnl_link_ops team_link_ops __read_mostly = {
+static struct rtnl_link_ops team_link_ops = {
        .kind                   = DRV_NAME,
        .priv_size              = sizeof(struct team),
        .setup                  = team_setup,
@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block team_notifier_block __read_mostly = {
+static struct notifier_block team_notifier_block = {
        .notifier_call = team_device_event,
 };
 
index 10f9e4021b5ab9799c2e445fa1f6f9c4799293b3..3515e7e5160e1ef64b62b250e2641b0e186ecd64 100644 (file)
@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
        return -EINVAL;
 }
 
-static struct rtnl_link_ops tun_link_ops __read_mostly = {
+static struct rtnl_link_ops tun_link_ops = {
        .kind           = DRV_NAME,
        .priv_size      = sizeof(struct tun_struct),
        .setup          = tun_setup,
@@ -1827,7 +1827,7 @@ unlock:
 }
 
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
-                           unsigned long arg, int ifreq_len)
+                           unsigned long arg, size_t ifreq_len)
 {
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun;
@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        int le;
        int ret;
 
+       if (ifreq_len > sizeof ifr)
+               return -EFAULT;
+
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
index 9c5aa922a9f4bdb404588b74962769510be13887..8cd0405e7c39be0b65d52d109f87929530d4708b 100644 (file)
@@ -71,7 +71,7 @@
 #include <asm/byteorder.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
-
+#include <asm/local.h>
 
 #define MOD_AUTHOR                     "Option Wireless"
 #define MOD_DESCRIPTION                        "USB High Speed Option driver"
@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
        struct urb *urb;
 
        urb = serial->rx_urb[0];
-       if (serial->port.count > 0) {
+       if (atomic_read(&serial->port.count) > 0) {
                count = put_rxbuf_data(urb, serial);
                if (count == -1)
                        return;
@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
        DUMP1(urb->transfer_buffer, urb->actual_length);
 
        /* Anyone listening? */
-       if (serial->port.count == 0)
+       if (atomic_read(&serial->port.count) == 0)
                return;
 
        if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
        tty_port_tty_set(&serial->port, tty);
 
        /* check for port already opened, if not set the termios */
-       serial->port.count++;
-       if (serial->port.count == 1) {
+       if (atomic_inc_return(&serial->port.count) == 1) {
                serial->rx_state = RX_IDLE;
                /* Force default termio settings */
                _hso_serial_set_termios(tty, NULL);
@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                result = hso_start_serial_device(serial->parent, GFP_KERNEL);
                if (result) {
                        hso_stop_serial_device(serial->parent);
-                       serial->port.count--;
+                       atomic_dec(&serial->port.count);
                        kref_put(&serial->parent->ref, hso_serial_ref_free);
                }
        } else {
@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
 
        /* reset the rts and dtr */
        /* do the actual close */
-       serial->port.count--;
+       atomic_dec(&serial->port.count);
 
-       if (serial->port.count <= 0) {
-               serial->port.count = 0;
+       if (atomic_read(&serial->port.count) <= 0) {
+               atomic_set(&serial->port.count, 0);
                tty_port_tty_set(&serial->port, NULL);
                if (!usb_gone)
                        hso_stop_serial_device(serial->parent);
@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
 
        /* the actual setup */
        spin_lock_irqsave(&serial->serial_lock, flags);
-       if (serial->port.count)
+       if (atomic_read(&serial->port.count))
                _hso_serial_set_termios(tty, old);
        else
                tty->termios = *old;
@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
                                D1("Pending read interrupt on port %d\n", i);
                                spin_lock(&serial->serial_lock);
                                if (serial->rx_state == RX_IDLE &&
-                                       serial->port.count > 0) {
+                                       atomic_read(&serial->port.count) > 0) {
                                        /* Setup and send a ctrl req read on
                                         * port i */
                                        if (!serial->rx_urb_filled[0]) {
@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
        /* Start all serial ports */
        for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
                if (serial_table[i] && (serial_table[i]->interface == iface)) {
-                       if (dev2ser(serial_table[i])->port.count) {
+                       if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
                                result =
                                    hso_start_serial_device(serial_table[i], GFP_NOIO);
                                hso_kick_transmit(dev2ser(serial_table[i]));
index bf405f134d3aa69c60006536da0419f4a2b1f06b..fd847ee823bcae895e66447256d3f19f4238b261 100644 (file)
@@ -571,7 +571,7 @@ struct r8152 {
                void (*unload)(struct r8152 *);
                int (*eee_get)(struct r8152 *, struct ethtool_eee *);
                int (*eee_set)(struct r8152 *, struct ethtool_eee *);
-       } rtl_ops;
+       } __no_const rtl_ops;
 
        int intr_interval;
        u32 saved_wolopts;
index a251588762ec6aa4053ec8af337e714411ec877c..6d1323305eed07dcc2a9b17c3831a0411ba16cf2 100644 (file)
@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
 /* atomic counter partially included in MAC address to make sure 2 devices
  * do not end up with the same MAC - concept breaks in case of > 255 ifaces
  */
-static atomic_t iface_counter = ATOMIC_INIT(0);
+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
 
 /*
  * SYNC Timer Delay definition used to set the expiry time
@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->netdev_ops = &sierra_net_device_ops;
 
        /* change MAC addr to include, ifacenum, and to be unique */
-       dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+       dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
        dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
 
        /* we will have to manufacture ethernet headers, prepare template */
index 059fdf1bf5eed9ff91c728c2a471f03791b5877c..7543217c317e5257770add564d5307d73d43ca58 100644 (file)
@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
 #define RECEIVE_AVG_WEIGHT 64
 
 /* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
 
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
index a8c755dcab1417a27f8939096a23ded65a6af65d..a988b71881b29e558e5809b49fd899dda77e8bc7 100644 (file)
@@ -2702,7 +2702,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
+static struct rtnl_link_ops vxlan_link_ops = {
        .kind           = "vxlan",
        .maxtype        = IFLA_VXLAN_MAX,
        .policy         = vxlan_policy,
@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block vxlan_notifier_block __read_mostly = {
+static struct notifier_block vxlan_notifier_block = {
        .notifier_call = vxlan_lowerdev_event,
 };
 
index 5920c996fcdf24679944920fac6e577c0c4c472b..ff2e4a5654c7cb29a907d39603b68760123c33f8 100644 (file)
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
 
 lmc_media_t lmc_ds3_media = {
-  lmc_ds3_init,                        /* special media init stuff */
-  lmc_ds3_default,             /* reset to default state */
-  lmc_ds3_set_status,          /* reset status to state provided */
-  lmc_dummy_set_1,             /* set clock source */
-  lmc_dummy_set2_1,            /* set line speed */
-  lmc_ds3_set_100ft,           /* set cable length */
-  lmc_ds3_set_scram,           /* set scrambler */
-  lmc_ds3_get_link_status,     /* get link status */
-  lmc_dummy_set_1,             /* set link status */
-  lmc_ds3_set_crc_length,      /* set CRC length */
-  lmc_dummy_set_1,             /* set T1 or E1 circuit type */
-  lmc_ds3_watchdog
+  .init = lmc_ds3_init,                                /* special media init stuff */
+  .defaults = lmc_ds3_default,                 /* reset to default state */
+  .set_status = lmc_ds3_set_status,            /* reset status to state provided */
+  .set_clock_source = lmc_dummy_set_1,         /* set clock source */
+  .set_speed = lmc_dummy_set2_1,               /* set line speed */
+  .set_cable_length = lmc_ds3_set_100ft,       /* set cable length */
+  .set_scrambler = lmc_ds3_set_scram,          /* set scrambler */
+  .get_link_status = lmc_ds3_get_link_status,  /* get link status */
+  .set_link_status = lmc_dummy_set_1,          /* set link status */
+  .set_crc_length = lmc_ds3_set_crc_length,    /* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,         /* set T1 or E1 circuit type */
+  .watchdog = lmc_ds3_watchdog
 };
 
 lmc_media_t lmc_hssi_media = {
-  lmc_hssi_init,               /* special media init stuff */
-  lmc_hssi_default,            /* reset to default state */
-  lmc_hssi_set_status,         /* reset status to state provided */
-  lmc_hssi_set_clock,          /* set clock source */
-  lmc_dummy_set2_1,            /* set line speed */
-  lmc_dummy_set_1,             /* set cable length */
-  lmc_dummy_set_1,             /* set scrambler */
-  lmc_hssi_get_link_status,    /* get link status */
-  lmc_hssi_set_link_status,    /* set link status */
-  lmc_hssi_set_crc_length,     /* set CRC length */
-  lmc_dummy_set_1,             /* set T1 or E1 circuit type */
-  lmc_hssi_watchdog
+  .init = lmc_hssi_init,                       /* special media init stuff */
+  .defaults = lmc_hssi_default,                        /* reset to default state */
+  .set_status = lmc_hssi_set_status,           /* reset status to state provided */
+  .set_clock_source = lmc_hssi_set_clock,      /* set clock source */
+  .set_speed = lmc_dummy_set2_1,               /* set line speed */
+  .set_cable_length = lmc_dummy_set_1,         /* set cable length */
+  .set_scrambler = lmc_dummy_set_1,            /* set scrambler */
+  .get_link_status = lmc_hssi_get_link_status, /* get link status */
+  .set_link_status = lmc_hssi_set_link_status, /* set link status */
+  .set_crc_length = lmc_hssi_set_crc_length,   /* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,         /* set T1 or E1 circuit type */
+  .watchdog = lmc_hssi_watchdog
 };
 
-lmc_media_t lmc_ssi_media = { lmc_ssi_init,    /* special media init stuff */
-  lmc_ssi_default,             /* reset to default state */
-  lmc_ssi_set_status,          /* reset status to state provided */
-  lmc_ssi_set_clock,           /* set clock source */
-  lmc_ssi_set_speed,           /* set line speed */
-  lmc_dummy_set_1,             /* set cable length */
-  lmc_dummy_set_1,             /* set scrambler */
-  lmc_ssi_get_link_status,     /* get link status */
-  lmc_ssi_set_link_status,     /* set link status */
-  lmc_ssi_set_crc_length,      /* set CRC length */
-  lmc_dummy_set_1,             /* set T1 or E1 circuit type */
-  lmc_ssi_watchdog
+lmc_media_t lmc_ssi_media = {
+  .init = lmc_ssi_init,                                /* special media init stuff */
+  .defaults = lmc_ssi_default,                 /* reset to default state */
+  .set_status = lmc_ssi_set_status,            /* reset status to state provided */
+  .set_clock_source = lmc_ssi_set_clock,       /* set clock source */
+  .set_speed = lmc_ssi_set_speed,              /* set line speed */
+  .set_cable_length = lmc_dummy_set_1,         /* set cable length */
+  .set_scrambler = lmc_dummy_set_1,            /* set scrambler */
+  .get_link_status = lmc_ssi_get_link_status,  /* get link status */
+  .set_link_status = lmc_ssi_set_link_status,  /* set link status */
+  .set_crc_length = lmc_ssi_set_crc_length,    /* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,         /* set T1 or E1 circuit type */
+  .watchdog = lmc_ssi_watchdog
 };
 
 lmc_media_t lmc_t1_media = {
-  lmc_t1_init,                 /* special media init stuff */
-  lmc_t1_default,              /* reset to default state */
-  lmc_t1_set_status,           /* reset status to state provided */
-  lmc_t1_set_clock,            /* set clock source */
-  lmc_dummy_set2_1,            /* set line speed */
-  lmc_dummy_set_1,             /* set cable length */
-  lmc_dummy_set_1,             /* set scrambler */
-  lmc_t1_get_link_status,      /* get link status */
-  lmc_dummy_set_1,             /* set link status */
-  lmc_t1_set_crc_length,       /* set CRC length */
-  lmc_t1_set_circuit_type,     /* set T1 or E1 circuit type */
-  lmc_t1_watchdog
+  .init = lmc_t1_init,                         /* special media init stuff */
+  .defaults = lmc_t1_default,                  /* reset to default state */
+  .set_status = lmc_t1_set_status,             /* reset status to state provided */
+  .set_clock_source = lmc_t1_set_clock,                /* set clock source */
+  .set_speed = lmc_dummy_set2_1,               /* set line speed */
+  .set_cable_length = lmc_dummy_set_1,         /* set cable length */
+  .set_scrambler = lmc_dummy_set_1,            /* set scrambler */
+  .get_link_status = lmc_t1_get_link_status,   /* get link status */
+  .set_link_status = lmc_dummy_set_1,          /* set link status */
+  .set_crc_length = lmc_t1_set_crc_length,     /* set CRC length */
+  .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
+  .watchdog = lmc_t1_watchdog
 };
 
 static void
index feacc3b994b7e621570ab93a6c142eb7aa5e9547..5bac0dedb3676c3c10d6088a73e3f53b447f1d99 100644 (file)
@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
 
 struct z8530_irqhandler z8530_sync =
 {
-       z8530_rx,
-       z8530_tx,
-       z8530_status
+       .rx = z8530_rx,
+       .tx = z8530_tx,
+       .status = z8530_status
 };
 
 EXPORT_SYMBOL(z8530_sync);
@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
 }
 
 static struct z8530_irqhandler z8530_dma_sync = {
-       z8530_dma_rx,
-       z8530_dma_tx,
-       z8530_dma_status
+       .rx = z8530_dma_rx,
+       .tx = z8530_dma_tx,
+       .status = z8530_dma_status
 };
 
 static struct z8530_irqhandler z8530_txdma_sync = {
-       z8530_rx,
-       z8530_dma_tx,
-       z8530_dma_status
+       .rx = z8530_rx,
+       .tx = z8530_dma_tx,
+       .status = z8530_dma_status
 };
 
 /**
@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
 
 struct z8530_irqhandler z8530_nop=
 {
-       z8530_rx_clear,
-       z8530_tx_clear,
-       z8530_status_clear
+       .rx = z8530_rx_clear,
+       .tx = z8530_tx_clear,
+       .status = z8530_status_clear
 };
 
 
index 0b602951ff6bafb65a7b6094e1a0640d91fd8660..b8bfa5b2b8541b36b42fbbda398bbfce8c8ded05 100644 (file)
@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
                if (i2400m->rx_roq == NULL)
                        goto error_roq_alloc;
 
-               rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
+               rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
                             GFP_KERNEL);
                if (rd == NULL) {
                        result = -ENOMEM;
index e71a2ce7a4487a5386331e26023e5538512f14bd..2268d6102932e497b0e6a97e88df2ca8726a08e1 100644 (file)
@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
        struct airo_info *ai = dev->ml_priv;
        int  ridcode;
         int  enabled;
-       static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
+       int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
        unsigned char *iobuf;
 
        /* Only super-user can write RIDs */
index da92bfa76b7cf1d37e9ea819edf00c59d84e350b..5a9001a8a8ef3f3cb81611485be724b9e67a06d6 100644 (file)
@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
 }
 
 /* Convert timeout from the DFU status to jiffies */
-static inline unsigned long at76_get_timeout(struct dfu_status *s)
+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
 {
        return msecs_to_jiffies((s->poll_timeout[2] << 16)
                                | (s->poll_timeout[1] << 8)
index f1946a6be442cb1d031b91b0556df1b3ae275b32..cd367fb5c152d6a2bf19d78fd210290b47b58396 100644 (file)
@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
 /* registered target arrival callback from the HIF layer */
 int ath10k_htc_init(struct ath10k *ar)
 {
-       struct ath10k_hif_cb htc_callbacks;
+       static struct ath10k_hif_cb htc_callbacks = {
+               .rx_completion = ath10k_htc_rx_completion_handler,
+               .tx_completion = ath10k_htc_tx_completion_handler,
+       };
        struct ath10k_htc_ep *ep = NULL;
        struct ath10k_htc *htc = &ar->htc;
 
@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
        ath10k_htc_reset_endpoint_states(htc);
 
        /* setup HIF layer callbacks */
-       htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
-       htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
        htc->ar = ar;
 
        /* Get HIF default pipe for HTC message exchange */
index 527179c0edced2f008e6cfa37daf79d3cf94ec0d..a89015030df2acaeb781e1a9dce4850b1189ae36 100644 (file)
@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
 
 struct ath10k_htc_ops {
        void (*target_send_suspend_complete)(struct ath10k *ar);
-};
+} __no_const;
 
 struct ath10k_htc_ep_ops {
        void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
        void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
        void (*ep_tx_credits)(struct ath10k *);
-};
+} __no_const;
 
 /* service connection information */
 struct ath10k_htc_svc_conn_req {
index f816909d9474e204c36abe20e73c16e0c259956b..e56cd8b0268113fc6280f84630690a314441f6fc 100644 (file)
@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
        ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 
-       ACCESS_ONCE(ads->ds_link) = i->link;
-       ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+       ACCESS_ONCE_RW(ads->ds_link) = i->link;
+       ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
 
        ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
        ctl6 = SM(i->keytype, AR_EncrType);
@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
 
        if ((i->is_first || i->is_last) &&
            i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
-               ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+               ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
                        | set11nTries(i->rates, 1)
                        | set11nTries(i->rates, 2)
                        | set11nTries(i->rates, 3)
                        | (i->dur_update ? AR_DurUpdateEna : 0)
                        | SM(0, AR_BurstDur);
 
-               ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+               ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
                        | set11nRate(i->rates, 1)
                        | set11nRate(i->rates, 2)
                        | set11nRate(i->rates, 3);
        } else {
-               ACCESS_ONCE(ads->ds_ctl2) = 0;
-               ACCESS_ONCE(ads->ds_ctl3) = 0;
+               ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
+               ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
        }
 
        if (!i->is_first) {
-               ACCESS_ONCE(ads->ds_ctl0) = 0;
-               ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-               ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+               ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
+               ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
+               ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
                return;
        }
 
@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
                break;
        }
 
-       ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+       ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
                | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
                | SM(i->txpower[0], AR_XmitPower0)
                | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
                | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
                   (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
 
-       ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-       ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+       ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
+       ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
 
        if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
                return;
 
-       ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+       ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
                | set11nPktDurRTSCTS(i->rates, 1);
 
-       ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+       ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
                | set11nPktDurRTSCTS(i->rates, 3);
 
-       ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+       ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
                | set11nRateFlags(i->rates, 1)
                | set11nRateFlags(i->rates, 2)
                | set11nRateFlags(i->rates, 3)
                | SM(i->rtscts_rate, AR_RTSCTSRate);
 
-       ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
-       ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
-       ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
+       ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
+       ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
+       ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
 }
 
 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
index da84b705cbcdc476734ac5f2c41dc82443e744d6..83e49782158e6ca62470e33889b6233e61cdf181 100644 (file)
@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
              (i->qcu << AR_TxQcuNum_S) | desc_len;
 
        checksum += val;
-       ACCESS_ONCE(ads->info) = val;
+       ACCESS_ONCE_RW(ads->info) = val;
 
        checksum += i->link;
-       ACCESS_ONCE(ads->link) = i->link;
+       ACCESS_ONCE_RW(ads->link) = i->link;
 
        checksum += i->buf_addr[0];
-       ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+       ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
        checksum += i->buf_addr[1];
-       ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+       ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
        checksum += i->buf_addr[2];
-       ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+       ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
        checksum += i->buf_addr[3];
-       ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+       ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
 
        checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl3) = val;
+       ACCESS_ONCE_RW(ads->ctl3) = val;
        checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl5) = val;
+       ACCESS_ONCE_RW(ads->ctl5) = val;
        checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl7) = val;
+       ACCESS_ONCE_RW(ads->ctl7) = val;
        checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl9) = val;
+       ACCESS_ONCE_RW(ads->ctl9) = val;
 
        checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
-       ACCESS_ONCE(ads->ctl10) = checksum;
+       ACCESS_ONCE_RW(ads->ctl10) = checksum;
 
        if (i->is_first || i->is_last) {
-               ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+               ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
                        | set11nTries(i->rates, 1)
                        | set11nTries(i->rates, 2)
                        | set11nTries(i->rates, 3)
                        | (i->dur_update ? AR_DurUpdateEna : 0)
                        | SM(0, AR_BurstDur);
 
-               ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+               ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
                        | set11nRate(i->rates, 1)
                        | set11nRate(i->rates, 2)
                        | set11nRate(i->rates, 3);
        } else {
-               ACCESS_ONCE(ads->ctl13) = 0;
-               ACCESS_ONCE(ads->ctl14) = 0;
+               ACCESS_ONCE_RW(ads->ctl13) = 0;
+               ACCESS_ONCE_RW(ads->ctl14) = 0;
        }
 
        ads->ctl20 = 0;
@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
 
        ctl17 = SM(i->keytype, AR_EncrType);
        if (!i->is_first) {
-               ACCESS_ONCE(ads->ctl11) = 0;
-               ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
-               ACCESS_ONCE(ads->ctl15) = 0;
-               ACCESS_ONCE(ads->ctl16) = 0;
-               ACCESS_ONCE(ads->ctl17) = ctl17;
-               ACCESS_ONCE(ads->ctl18) = 0;
-               ACCESS_ONCE(ads->ctl19) = 0;
+               ACCESS_ONCE_RW(ads->ctl11) = 0;
+               ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+               ACCESS_ONCE_RW(ads->ctl15) = 0;
+               ACCESS_ONCE_RW(ads->ctl16) = 0;
+               ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+               ACCESS_ONCE_RW(ads->ctl18) = 0;
+               ACCESS_ONCE_RW(ads->ctl19) = 0;
                return;
        }
 
-       ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+       ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
                | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
                | SM(i->txpower[0], AR_XmitPower0)
                | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
        ctl12 |= SM(val, AR_PAPRDChainMask);
 
-       ACCESS_ONCE(ads->ctl12) = ctl12;
-       ACCESS_ONCE(ads->ctl17) = ctl17;
+       ACCESS_ONCE_RW(ads->ctl12) = ctl12;
+       ACCESS_ONCE_RW(ads->ctl17) = ctl17;
 
-       ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+       ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
                | set11nPktDurRTSCTS(i->rates, 1);
 
-       ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+       ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
                | set11nPktDurRTSCTS(i->rates, 3);
 
-       ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+       ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
                | set11nRateFlags(i->rates, 1)
                | set11nRateFlags(i->rates, 2)
                | set11nRateFlags(i->rates, 3)
                | SM(i->rtscts_rate, AR_RTSCTSRate);
 
-       ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+       ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
 
-       ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
-       ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
-       ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
+       ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
+       ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
+       ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
 }
 
 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
index 1cbd335515134e0d4c5ccbfd7fa8756c01a9801b..27dfb40d5d32b04cd21543a103a3b9bf9d4644ce 100644 (file)
@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
 
        /* ANI */
        void (*ani_cache_ini_regs)(struct ath_hw *ah);
-};
+} __no_const;
 
 /**
  * struct ath_spec_scan - parameters for Atheros spectral scan
@@ -716,7 +716,7 @@ struct ath_hw_ops {
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
        void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
 #endif
-};
+} __no_const;
 
 struct ath_nf_limits {
        s16 max;
index 62b0bf4fdf6b0144f4696703113f344b2993546c..4ae094c1b35c15cbecafc2e0edeaf7336f92dfc0 100644 (file)
@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
        if (!ath9k_is_chanctx_enabled())
                return;
 
-       ath9k_ops.hw_scan                  = ath9k_hw_scan;
-       ath9k_ops.cancel_hw_scan           = ath9k_cancel_hw_scan;
-       ath9k_ops.remain_on_channel        = ath9k_remain_on_channel;
-       ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
-       ath9k_ops.add_chanctx              = ath9k_add_chanctx;
-       ath9k_ops.remove_chanctx           = ath9k_remove_chanctx;
-       ath9k_ops.change_chanctx           = ath9k_change_chanctx;
-       ath9k_ops.assign_vif_chanctx       = ath9k_assign_vif_chanctx;
-       ath9k_ops.unassign_vif_chanctx     = ath9k_unassign_vif_chanctx;
-       ath9k_ops.mgd_prepare_tx           = ath9k_mgd_prepare_tx;
+       pax_open_kernel();
+       *(void **)&ath9k_ops.hw_scan                  = ath9k_hw_scan;
+       *(void **)&ath9k_ops.cancel_hw_scan           = ath9k_cancel_hw_scan;
+       *(void **)&ath9k_ops.remain_on_channel        = ath9k_remain_on_channel;
+       *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
+       *(void **)&ath9k_ops.add_chanctx              = ath9k_add_chanctx;
+       *(void **)&ath9k_ops.remove_chanctx           = ath9k_remove_chanctx;
+       *(void **)&ath9k_ops.change_chanctx           = ath9k_change_chanctx;
+       *(void **)&ath9k_ops.assign_vif_chanctx       = ath9k_assign_vif_chanctx;
+       *(void **)&ath9k_ops.unassign_vif_chanctx     = ath9k_unassign_vif_chanctx;
+       *(void **)&ath9k_ops.mgd_prepare_tx           = ath9k_mgd_prepare_tx;
+       pax_close_kernel();
 }
 
 #endif
index 058a9f2320503ef623748ed595ccbfdb8dbd9414..d5cb1bab2f0fb23912fe94fb7acccd9bbfafca8e 100644 (file)
@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
 {
        struct ssb_bus *bus = dev->dev->sdev->bus;
 
-       static const struct b206x_channel *chandata = NULL;
+       const struct b206x_channel *chandata = NULL;
        u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
        u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
        u16 old_comm15, scale;
index dc1d20cf64ee9b04b83b7d28495b696a44694317..f7a4f06b1f709ee482f9693de70532a8c9516c0a 100644 (file)
@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        if (il3945_mod_params.disable_hw_scan) {
                D_INFO("Disabling hw_scan\n");
-               il3945_mac_ops.hw_scan = NULL;
+               pax_open_kernel();
+               *(void **)&il3945_mac_ops.hw_scan = NULL;
+               pax_close_kernel();
        }
 
        D_INFO("*** LOAD DRIVER ***\n");
index 0ffb6ff1a255f8ac609ba3799c02489ab853eea1..c0b7f0ec37633c87b6a921f40084cce20d7746c3 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[64];
-       int buf_size;
+       size_t buf_size;
        u32 offset, len;
 
        memset(buf, 0, sizeof(buf));
@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
        struct iwl_priv *priv = file->private_data;
 
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        u32 reset_flag;
 
        memset(buf, 0, sizeof(buf));
@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int ht40;
 
        memset(buf, 0, sizeof(buf));
@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int value;
 
        memset(buf, 0, sizeof(buf));
@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
 DEBUGFS_READ_FILE_OPS(current_sleep_command);
 
-static const char *fmt_value = "  %-30s %10u\n";
-static const char *fmt_hex   = "  %-30s       0x%02X\n";
-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
-static const char *fmt_header =
+static const char fmt_value[] = "  %-30s %10u\n";
+static const char fmt_hex[]   = "  %-30s       0x%02X\n";
+static const char fmt_table[] = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char fmt_header[] =
        "%-32s    current  cumulative       delta         max\n";
 
 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int clear;
 
        memset(buf, 0, sizeof(buf));
@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int trace;
 
        memset(buf, 0, sizeof(buf));
@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int missed;
 
        memset(buf, 0, sizeof(buf));
@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
 
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int plcp;
 
        memset(buf, 0, sizeof(buf));
@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
 
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int flush;
 
        memset(buf, 0, sizeof(buf));
@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
 
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int rts;
 
        if (!priv->cfg->ht_params)
@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
 
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) -  1);
@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
        struct iwl_priv *priv = file->private_data;
        u32 event_log_flag;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
 
        /* check that the interface is up */
        if (!iwl_is_ready(priv))
@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
        struct iwl_priv *priv = file->private_data;
        char buf[8];
        u32 calib_disabled;
-       int buf_size;
+       size_t buf_size;
 
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) - 1);
index 523fe0c88dcb2d464ec915085371d376aec22e28..0d9473b16a56e4dbc91f50cbfc82116bac374783 100644 (file)
@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
 
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        u32 reset_flag;
 
        memset(buf, 0, sizeof(buf));
@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
 {
        struct iwl_trans *trans = file->private_data;
        char buf[8];
-       int buf_size;
+       size_t buf_size;
        int csr;
 
        memset(buf, 0, sizeof(buf));
index ef58a8862d9178cecc5e0dc0f656fa42b6abae34..fafa731556bf73acdd5fadb5bbe5747281a82d96 100644 (file)
@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
        if (channels < 1)
                return -EINVAL;
 
-       mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
-       mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
-       mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
-       mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
-       mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
-       mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
-       mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
-       mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
-       mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
-       mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
-       mac80211_hwsim_mchan_ops.assign_vif_chanctx =
-               mac80211_hwsim_assign_vif_chanctx;
-       mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
-               mac80211_hwsim_unassign_vif_chanctx;
+       pax_open_kernel();
+       memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
+       *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
+       *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+       *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
+       *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
+       *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
+       *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+       *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+       *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+       *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+       *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
+       *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
+       pax_close_kernel();
 
        spin_lock_init(&hwsim_radio_lock);
        INIT_LIST_HEAD(&hwsim_radios);
index 1a4facd1fbf335625ff092064b3dfa58f4e869d1..a2ecbbdba17d9594b3f0aa1c86f8a8c66ed89514 100644 (file)
@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
 
        netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
 
-       if (rts_threshold < 0 || rts_threshold > 2347)
+       if (rts_threshold > 2347)
                rts_threshold = 2347;
 
        tmp = cpu_to_le32(rts_threshold);
index 9bb398bed9bb68ba133d702c2e5be8b0b089a8ae..b0cc047c0d67e8f7f8a9278f5367f32939ec98e7 100644 (file)
@@ -375,7 +375,7 @@ struct rt2x00_intf {
         * for hardware which doesn't support hardware
         * sequence counting.
         */
-       atomic_t seqno;
+       atomic_unchecked_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
index 66ff36447b9473799e27899bf44c3c3c8eb3994b..3ce34f7fafbcb471cb5c1fd10985394ac5d3459b 100644 (file)
@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
         * sequence counter given by mac80211.
         */
        if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-               seqno = atomic_add_return(0x10, &intf->seqno);
+               seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
        else
-               seqno = atomic_read(&intf->seqno);
+               seqno = atomic_read_unchecked(&intf->seqno);
 
        hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
        hdr->seq_ctrl |= cpu_to_le16(seqno);
index b661f896e9fe148ff322879c541c67c755a9a1a4..ddf7d2ba77e1ab254bc2ed885034955615473fdd 100644 (file)
@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
 
                irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
-               wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
-               wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+               pax_open_kernel();
+               *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+               *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+               pax_close_kernel();
 
                wl1251_info("using dedicated interrupt line");
        } else {
-               wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
-               wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+               pax_open_kernel();
+               *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+               *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+               pax_close_kernel();
 
                wl1251_info("using SDIO interrupt");
        }
index d6d0d6d9c7a89b4e9377a5e48cf694f1d1161f33..60c23a01684ce10b4493563519933d0f6b23d3c2 100644 (file)
@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
                       sizeof(wl->conf.mem));
 
                /* read data preparation is only needed by wl127x */
-               wl->ops->prepare_read = wl127x_prepare_read;
+               pax_open_kernel();
+               *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
+               pax_close_kernel();
 
                wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
                              WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
                       sizeof(wl->conf.mem));
 
                /* read data preparation is only needed by wl127x */
-               wl->ops->prepare_read = wl127x_prepare_read;
+               pax_open_kernel();
+               *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
+               pax_close_kernel();
 
                wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
                              WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
index 8e562610bf161a7ea841a98e9c592153b69006ff..9140678e007a595f186d013bfdcecbb841b3b73f 100644 (file)
@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
        }
 
        if (!checksum_param) {
-               wl18xx_ops.set_rx_csum = NULL;
-               wl18xx_ops.init_vif = NULL;
+               pax_open_kernel();
+               *(void **)&wl18xx_ops.set_rx_csum = NULL;
+               *(void **)&wl18xx_ops.init_vif = NULL;
+               pax_close_kernel();
        }
 
        /* Enable 11a Band only if we have 5G antennas */
index a912dc051111973367d7b4056f5c793004885f6d..a8225ba4a269df5b7c33e7cacec8c994a1860dae 100644 (file)
@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
 {
        struct zd_usb *usb = urb->context;
        struct zd_usb_interrupt *intr = &usb->intr;
-       int len;
+       unsigned int len;
        u16 int_num;
 
        ZD_ASSERT(in_interrupt());
index ce2e2cf54fbc8b86a5dd674e819dd2b85908d978..f81e500e765061fd7d3cf22196260d3647155607 100644 (file)
@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
 
 static int nfcwilink_probe(struct platform_device *pdev)
 {
-       static struct nfcwilink *drv;
+       struct nfcwilink *drv;
        int rc;
        __u32 protocols;
 
index f2596c8d68b0b36b12501914e7a1ea49ce16c058..50d53afd0746202d7605c1a45ad9f2013d098964 100644 (file)
@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
                goto exit;
        }
 
-       gate = uid_skb->data;
+       memcpy(gate, uid_skb->data, uid_skb->len);
        *len = uid_skb->len;
 exit:
        kfree_skb(uid_skb);
index 510074226d571de661fb7245c0b4091749555ada..6ad4e6d9ed59b9251ad59e31e3131f89ccc0af8c 100644 (file)
@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
                pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
                return 0;
        }
-       of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
+       pax_open_kernel();
+       *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
+       pax_close_kernel();
        return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
 }
 late_initcall(of_fdt_raw_init);
index d93b2b6b1f7a2b3598bb8fcc3bb9ba4d2231e626..ae504011afb5158d56e626fa0b6b60859b9b334c 100644 (file)
@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
                if (cookie == NO_COOKIE)
                        offset = pc;
                if (cookie == INVALID_COOKIE) {
-                       atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+                       atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
                        offset = pc;
                }
                if (cookie != last_cookie) {
@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
        /* add userspace sample */
 
        if (!mm) {
-               atomic_inc(&oprofile_stats.sample_lost_no_mm);
+               atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
                return 0;
        }
 
        cookie = lookup_dcookie(mm, s->eip, &offset);
 
        if (cookie == INVALID_COOKIE) {
-               atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+               atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
                return 0;
        }
 
@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
                /* ignore backtraces if failed to add a sample */
                if (state == sb_bt_start) {
                        state = sb_bt_ignore;
-                       atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+                       atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
                }
        }
        release_mm(mm);
index c0cc4e7ff023b685dd5a2159fe16d7cd0deb6ad3..44d4e5485e95ac723332b02e93413bc8b255b4b7 100644 (file)
@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
        }
 
        if (buffer_pos == buffer_size) {
-               atomic_inc(&oprofile_stats.event_lost_overflow);
+               atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
                return;
        }
 
index ed2c3ec07024d0bcb04686a64510f1a607bfded7..deda85a09de21dc37d73ae720bbaf52cc3ad0810 100644 (file)
@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
        if (oprofile_ops.switch_events())
                return;
 
-       atomic_inc(&oprofile_stats.multiplex_counter);
+       atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
        start_switch_worker();
 }
 
index ee2cfce358b9810b5d1f06e28d18e86e1dec0a98..7f8f6993ede1c667638978f31e4908c8d6bbf19a 100644 (file)
@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
 
 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
 
-static ssize_t timeout_read(struct file *file, char __user *buf,
+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
                size_t count, loff_t *offset)
 {
        return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
index 59659cea45823a505a50a041e35e282ae8eea29d..6c860a0098785502ea3cba0cbf4c147b8ac31473 100644 (file)
@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
                cpu_buf->sample_invalid_eip = 0;
        }
 
-       atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
-       atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
-       atomic_set(&oprofile_stats.event_lost_overflow, 0);
-       atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
-       atomic_set(&oprofile_stats.multiplex_counter, 0);
+       atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
+       atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
+       atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
+       atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
+       atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
 }
 
 
index 1fc622bd18346d8652dfbbd66bdd0a9c2897a557..8c48fc31cca29b32c997cfe09dfb6e90c41f4e56 100644 (file)
 #include <linux/atomic.h>
 
 struct oprofile_stat_struct {
-       atomic_t sample_lost_no_mm;
-       atomic_t sample_lost_no_mapping;
-       atomic_t bt_lost_no_mapping;
-       atomic_t event_lost_overflow;
-       atomic_t multiplex_counter;
+       atomic_unchecked_t sample_lost_no_mm;
+       atomic_unchecked_t sample_lost_no_mapping;
+       atomic_unchecked_t bt_lost_no_mapping;
+       atomic_unchecked_t event_lost_overflow;
+       atomic_unchecked_t multiplex_counter;
 };
 
 extern struct oprofile_stat_struct oprofile_stats;
index 3f493459378faa15d4cfe872af18b0970b19d98c..c750d0becd6c549bfbf546614192df08712844e0 100644 (file)
@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
 
 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
-       atomic_t *val = file->private_data;
-       return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
+       atomic_unchecked_t *val = file->private_data;
+       return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
 }
 
 
@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
 
 
 int oprofilefs_create_ro_atomic(struct dentry *root,
-       char const *name, atomic_t *val)
+       char const *name, atomic_unchecked_t *val)
 {
        return __oprofilefs_create_file(root, name,
                                        &atomic_ro_fops, 0444, val);
index bdef916e5dda3c02eb9f29504b0e69faa6a7bd8b..88c7deea4f43bfa230c06564183f5fb100f8ef60 100644 (file)
@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata oprofile_cpu_notifier = {
+static struct notifier_block oprofile_cpu_notifier = {
        .notifier_call = oprofile_cpu_notify,
 };
 
index 3b470801a04f209fea0cd325de189e4e96cfc39a..6cd05dd8a2f90dad4aea858d4f59896a61a98286 100644 (file)
@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
 
        *ppos += len;
 
-       return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+       return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
 }
 
 #ifdef CONFIG_PARPORT_1284
@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
 
        *ppos += len;
 
-       return copy_to_user (result, buffer, len) ? -EFAULT : 0;
+       return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
 }
 #endif /* IEEE1284.3 support. */
 
index 6ca23998ee8f36b1d486dcaa4c50c77ae74c8979..68d866b958cfcb8b33ffd8e1835c83bb9d1f06c7 100644 (file)
@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
                goto init_cleanup;
        }
 
-       ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
+       pax_open_kernel();
+       *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
+       pax_close_kernel();
        retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
 
        return retval;
index 66b7bbebe493af569d3eae456cd9ec59e4d37fac..26bee785e4977c9a84d15fec1bf8578f541a7d21 100644 (file)
@@ -73,7 +73,6 @@ static u16 port;
 static unsigned int enum_bit;
 static u8 enum_mask;
 
-static struct cpci_hp_controller_ops generic_hpc_ops;
 static struct cpci_hp_controller generic_hpc;
 
 static int __init validate_parameters(void)
@@ -139,6 +138,10 @@ static int query_enum(void)
        return ((value & enum_mask) == enum_mask);
 }
 
+static struct cpci_hp_controller_ops generic_hpc_ops = {
+       .query_enum = query_enum,
+};
+
 static int __init cpcihp_generic_init(void)
 {
        int status;
@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
        pci_dev_put(dev);
 
        memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
-       generic_hpc_ops.query_enum = query_enum;
        generic_hpc.ops = &generic_hpc_ops;
 
        status = cpci_hp_register_controller(&generic_hpc);
index 7ecf34e76a61eb73ebe6418eb4b1f904a997a90f..effed62d053a05bac08ab6f730e9647575ee730d 100644 (file)
@@ -59,7 +59,6 @@
 /* local variables */
 static bool debug;
 static bool poll;
-static struct cpci_hp_controller_ops zt5550_hpc_ops;
 static struct cpci_hp_controller zt5550_hpc;
 
 /* Primary cPCI bus bridge device */
@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
        return 0;
 }
 
+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
+       .query_enum = zt5550_hc_query_enum,
+};
+
 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int status;
@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
        dbg("returned from zt5550_hc_config");
 
        memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
-       zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
        zt5550_hpc.ops = &zt5550_hpc_ops;
        if (!poll) {
                zt5550_hpc.irq = hc_dev->irq;
                zt5550_hpc.irq_flags = IRQF_SHARED;
                zt5550_hpc.dev_id = hc_dev;
 
-               zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
-               zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
-               zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
+               pax_open_kernel();
+               *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+               *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+               *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
+               pax_open_kernel();
        } else {
                info("using ENUM# polling mode");
        }
index 1e08ff8c229c06680dc88b34bc3eb2ca547d3884..3cd145fb51aac438fc4b7b74b302d7510c7e526c 100644 (file)
@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
 
 void compaq_nvram_init (void __iomem *rom_start)
 {
+#ifndef CONFIG_PAX_KERNEXEC
        if (rom_start)
                compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+#endif
 
        dbg("int15 entry  = %p\n", compaq_int15_entry_point);
 
index 56d8486dc16704d1f93726d03131cbedfe0b4bed..f26113f8f6dbfc0d46eb5aada7a729bfe307b423 100644 (file)
@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
                return -EINVAL;
        }
 
-       slot->ops->owner = owner;
-       slot->ops->mod_name = mod_name;
+       pax_open_kernel();
+       *(struct module **)&slot->ops->owner = owner;
+       *(const char **)&slot->ops->mod_name = mod_name;
+       pax_close_kernel();
 
        mutex_lock(&pci_hp_mutex);
        /*
index 07aa722bb12cd61a6a3a8767b2efe1dd826e6952..84514b48807b2baeac2baa479c1f7ab7fbf940fe 100644 (file)
@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
        struct slot *slot = ctrl->slot;
        struct hotplug_slot *hotplug = NULL;
        struct hotplug_slot_info *info = NULL;
-       struct hotplug_slot_ops *ops = NULL;
+       hotplug_slot_ops_no_const *ops = NULL;
        char name[SLOT_NAME_SIZE];
        int retval = -ENOMEM;
 
index fd60806d3fd001c580094a5dc0d2bf0e49dac815..ab6c56588d0d5b08335cf75c87ac0619b9e75954 100644 (file)
@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
 {
        struct attribute **msi_attrs;
        struct attribute *msi_attr;
-       struct device_attribute *msi_dev_attr;
-       struct attribute_group *msi_irq_group;
+       device_attribute_no_const *msi_dev_attr;
+       attribute_group_no_const *msi_irq_group;
        const struct attribute_group **msi_irq_groups;
        struct msi_desc *entry;
        int ret = -ENOMEM;
@@ -573,7 +573,7 @@ error_attrs:
        count = 0;
        msi_attr = msi_attrs[count];
        while (msi_attr) {
-               msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+               msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
                kfree(msi_attr->name);
                kfree(msi_dev_attr);
                ++count;
index aa012fb3834b48dbc0001565d453b2dc267009c1..63fac5d888b3d51d62bce2947abcfb4327e0a686 100644 (file)
@@ -1139,7 +1139,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
 {
        /* allocate attribute structure, piggyback attribute name */
        int name_len = write_combine ? 13 : 10;
-       struct bin_attribute *res_attr;
+       bin_attribute_no_const *res_attr;
        int retval;
 
        res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
@@ -1316,7 +1316,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
 {
        int retval;
-       struct bin_attribute *attr;
+       bin_attribute_no_const *attr;
 
        /* If the device has VPD, try to expose it in sysfs. */
        if (dev->vpd) {
@@ -1363,7 +1363,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
 {
        int retval;
        int rom_size = 0;
-       struct bin_attribute *attr;
+       bin_attribute_no_const *attr;
 
        if (!sysfs_initialized)
                return -EACCES;
index d54632a1db43cbed30d89fb91b84fef489cbc4a4..198c84df5fb99b26f96877d35530d21ca3d5998c 100644 (file)
@@ -93,7 +93,7 @@ struct pci_vpd_ops {
 struct pci_vpd {
        unsigned int len;
        const struct pci_vpd_ops *ops;
-       struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
+       bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
 };
 
 int pci_vpd_pci22_init(struct pci_dev *dev);
index e1e7026b838dd23b2043813d22fc0d718a7e6829..d28dd338f10e06528037e14baaa873708f387f54 100644 (file)
@@ -27,9 +27,9 @@
 #define MODULE_PARAM_PREFIX "pcie_aspm."
 
 /* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP      (1    /* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW      (2    /* Downstream direction L0s state */
-#define ASPM_STATE_L1          (4    /* L1 state */
+#define ASPM_STATE_L0S_UP      (1U)    /* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW      (2U)    /* Downstream direction L0s state */
+#define ASPM_STATE_L1          (4U)    /* L1 state */
 #define ASPM_STATE_L0S         (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
 #define ASPM_STATE_ALL         (ASPM_STATE_L0S | ASPM_STATE_L1)
 
index 23212f8ae09b5e1d853d6ad11410731cc1a17505..65e945b591bb57c07cbe741a9c4f63822fe25e31 100644 (file)
@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
        u16 orig_cmd;
        struct pci_bus_region region, inverted_region;
 
-       mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+       mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
 
        /* No printks while decoding is disabled! */
        if (!dev->mmio_always_on) {
index 3f155e78513fd4171dc4b57c5b7b3b41e7afea20..0f4b1f0f7c275f159adecbbe2858e1c09151b97e 100644 (file)
@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
 static int __init pci_proc_init(void)
 {
        struct pci_dev *dev = NULL;
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
+#endif
+#else
        proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
+#endif
        proc_create("devices", 0, proc_bus_pci_dir,
                    &proc_bus_pci_dev_operations);
        proc_initialized = 1;
index b84fdd6b629bc678281076df59586e460d0fc850..b89d8292f80bc8aa8c4d00e8a27edd1173069b19 100644 (file)
@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
        .callback = chromeos_laptop_dmi_matched, \
        .driver_data = (void *)&board_
 
-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
        {
                .ident = "Samsung Series 5 550",
                .matches = {
index 1e1e594238892a3bf18d99ee0e4e35d58ac96d1f..8fe59c55c3f27b0f781c725eb774bdd6822a1462 100644 (file)
@@ -150,7 +150,7 @@ struct wmax_led_args {
 } __packed;
 
 static struct platform_device *platform_device;
-static struct device_attribute *zone_dev_attrs;
+static device_attribute_no_const *zone_dev_attrs;
 static struct attribute **zone_attrs;
 static struct platform_zone *zone_data;
 
@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
                   }
 };
 
-static struct attribute_group zone_attribute_group = {
+static attribute_group_no_const zone_attribute_group = {
        .name = "rgb_zones",
 };
 
index 7543a56e0f4593c74fa74caee310a0ddbce4062c..367ca8ed839200bb9c138f28d85f181d5d920fd3 100644 (file)
@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
        int err;
        u32 retval = -1;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+       return -EPERM;
+#endif
+
        err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
 
        if (err < 0)
@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
        int err;
        u32 retval = -1;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+       return -EPERM;
+#endif
+
        err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
                                    &retval);
 
@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
        union acpi_object *obj;
        acpi_status status;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+       return -EPERM;
+#endif
+
        status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
                                     1, asus->debug.method_id,
                                     &input, &output);
index 085987730aabbe6ac6e15f588ad11b04ce311e39..1cf7d08588f948f5914e70e6f9ac085824ed9bc1 100644 (file)
@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
 
        if (!quirks->ec_read_only) {
                /* allow userland write sysfs file  */
-               dev_attr_bluetooth.store = store_bluetooth;
-               dev_attr_wlan.store = store_wlan;
-               dev_attr_threeg.store = store_threeg;
-               dev_attr_bluetooth.attr.mode |= S_IWUSR;
-               dev_attr_wlan.attr.mode |= S_IWUSR;
-               dev_attr_threeg.attr.mode |= S_IWUSR;
+               pax_open_kernel();
+               *(void **)&dev_attr_bluetooth.store = store_bluetooth;
+               *(void **)&dev_attr_wlan.store = store_wlan;
+               *(void **)&dev_attr_threeg.store = store_threeg;
+               *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
+               *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
+               *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
+               pax_close_kernel();
        }
 
        /* disable hardware control by fn key */
index 6d2bac0c463cd41f17eef2e4cfe41e16cc4f2658..ec2b0299a57c66169c9bbd691c1563050334acd7 100644 (file)
@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
 static void msi_wmi_notify(u32 value, void *context)
 {
        struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
-       static struct key_entry *key;
+       struct key_entry *key;
        union acpi_object *obj;
        acpi_status status;
 
index 6dd1c0e7dcd9af81a43db55f0c4ba87956e37dde..5d602c77761583997b3ce0cfb3476021efe0dad5 100644 (file)
@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
 }
 
 /* High speed charging function */
-static struct device_attribute *hsc_handle;
+static device_attribute_no_const *hsc_handle;
 
 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
                struct device_attribute *attr,
@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
 }
 
 /* low battery function */
-static struct device_attribute *lowbatt_handle;
+static device_attribute_no_const *lowbatt_handle;
 
 static ssize_t sony_nc_lowbatt_store(struct device *dev,
                struct device_attribute *attr,
@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
 }
 
 /* fan speed function */
-static struct device_attribute *fan_handle, *hsf_handle;
+static device_attribute_no_const *fan_handle, *hsf_handle;
 
 static ssize_t sony_nc_hsfan_store(struct device *dev,
                struct device_attribute *attr,
@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
 }
 
 /* USB charge function */
-static struct device_attribute *uc_handle;
+static device_attribute_no_const *uc_handle;
 
 static ssize_t sony_nc_usb_charge_store(struct device *dev,
                struct device_attribute *attr,
@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
 }
 
 /* Panel ID function */
-static struct device_attribute *panel_handle;
+static device_attribute_no_const *panel_handle;
 
 static ssize_t sony_nc_panelid_show(struct device *dev,
                struct device_attribute *attr, char *buffer)
@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
 }
 
 /* smart connect function */
-static struct device_attribute *sc_handle;
+static device_attribute_no_const *sc_handle;
 
 static ssize_t sony_nc_smart_conn_store(struct device *dev,
                struct device_attribute *attr,
index c3d11fabc46f21c98c6122497958837ae291b171..f83cded1acff1b1abf16a13c7e6edb767ea30ae8 100644 (file)
@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
        return 0;
 }
 
-void static hotkey_mask_warn_incomplete_mask(void)
+static void hotkey_mask_warn_incomplete_mask(void)
 {
        /* log only what the user can fix... */
        const u32 wantedmask = hotkey_driver_mask &
@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
                                && !tp_features.bright_unkfw)
                        TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
        }
+}
 
 #undef TPACPI_COMPARE_KEY
 #undef TPACPI_MAY_SEND_KEY
-}
 
 /*
  * Polling driver
index 438d4c72c7b36c27982ed8e60b9a42cbff58dc8f..ca8a2fba110d41cb6f92e939eed3a64104877736 100644 (file)
@@ -59,7 +59,7 @@ do { \
        set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
 } while(0)
 
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
                        (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
 
 /*
@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
 
        cpu = get_cpu();
        save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
+
+       pax_open_kernel();
        get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
+       pax_close_kernel();
 
        /* On some boxes IRQ's during PnP BIOS calls are deadly.  */
        spin_lock_irqsave(&pnp_bios_lock, flags);
@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
                             :"memory");
        spin_unlock_irqrestore(&pnp_bios_lock, flags);
 
+       pax_open_kernel();
        get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
+       pax_close_kernel();
+
        put_cpu();
 
        /* If we get here and this is set then the PnP BIOS faulted on us. */
@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
        return status;
 }
 
-void pnpbios_calls_init(union pnp_bios_install_struct *header)
+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
 {
        int i;
 
@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
        pnp_bios_callpoint.offset = header->fields.pm16offset;
        pnp_bios_callpoint.segment = PNP_CS16;
 
+       pax_open_kernel();
+
        for_each_possible_cpu(i) {
                struct desc_struct *gdt = get_cpu_gdt_table(i);
                if (!gdt)
@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
                set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
                         (unsigned long)__va(header->fields.pm16dseg));
        }
+
+       pax_close_kernel();
 }
index 0c52e2a0d90cdba166a5cdfda72f70d86f5a2572..3421ab71145a0eb4d405028cc2552c76e8165d36 100644 (file)
@@ -37,7 +37,11 @@ static int polling;
 
 #if IS_ENABLED(CONFIG_USB_PHY)
 static struct usb_phy *transceiver;
-static struct notifier_block otg_nb;
+static int otg_handle_notification(struct notifier_block *nb,
+               unsigned long event, void *unused);
+static struct notifier_block otg_nb = {
+       .notifier_call = otg_handle_notification
+};
 #endif
 
 static struct regulator *ac_draw;
@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
 
 #if IS_ENABLED(CONFIG_USB_PHY)
        if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
-               otg_nb.notifier_call = otg_handle_notification;
                ret = usb_register_notifier(transceiver, &otg_nb);
                if (ret) {
                        dev_err(dev, "failure to register otg notifier\n");
index cc439fd89d8dda469c52795bcc59074245b1cec8..8fa30df6beffd3a00b1d4c271dc1383765129331 100644 (file)
@@ -16,12 +16,12 @@ struct power_supply;
 
 #ifdef CONFIG_SYSFS
 
-extern void power_supply_init_attrs(struct device_type *dev_type);
+extern void power_supply_init_attrs(void);
 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
 
 #else
 
-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
+static inline void power_supply_init_attrs(void) {}
 #define power_supply_uevent NULL
 
 #endif /* CONFIG_SYSFS */
index 694e8cddd5c13e1760e7c9cd5fc0170876a9df04..9f0348361088f4a5e467d2d07431b78ca5a82d6e 100644 (file)
@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
 EXPORT_SYMBOL_GPL(power_supply_notifier);
 
-static struct device_type power_supply_dev_type;
+extern const struct attribute_group *power_supply_attr_groups[];
+static struct device_type power_supply_dev_type = {
+       .groups = power_supply_attr_groups,
+};
 
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
                                         struct power_supply *supply)
@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
                return PTR_ERR(power_supply_class);
 
        power_supply_class->dev_uevent = power_supply_uevent;
-       power_supply_init_attrs(&power_supply_dev_type);
+       power_supply_init_attrs();
 
        return 0;
 }
index 62653f50a524537c7e2f45dca9f22657417146ed..d0bb48531ff7903c50f5168f7b526f8c0cae1a1a 100644 (file)
@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
        .is_visible = power_supply_attr_is_visible,
 };
 
-static const struct attribute_group *power_supply_attr_groups[] = {
+const struct attribute_group *power_supply_attr_groups[] = {
        &power_supply_attr_group,
        NULL,
 };
 
-void power_supply_init_attrs(struct device_type *dev_type)
+void power_supply_init_attrs(void)
 {
        int i;
 
-       dev_type->groups = power_supply_attr_groups;
-
        for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
                __power_supply_attrs[i] = &power_supply_attrs[i].attr;
 }
index 84419af16f777095687eddb59d74c7ca6762e3bb..268ede8c778cde33a26fbe255689e5b6285538d1 100644 (file)
@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
        struct device_attribute name_attr;
 };
 
+static ssize_t show_constraint_name(struct device *dev,
+                               struct device_attribute *dev_attr,
+                               char *buf);
+
 static struct powercap_constraint_attr
-                               constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+                               constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
+       [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
+               .power_limit_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IWUSR | S_IRUGO
+                       },
+                       .show   = show_constraint_power_limit_uw,
+                       .store  = store_constraint_power_limit_uw
+               },
+
+               .time_window_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IWUSR | S_IRUGO
+                       },
+                       .show   = show_constraint_time_window_us,
+                       .store  = store_constraint_time_window_us
+               },
+
+               .max_power_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IRUGO
+                       },
+                       .show   = show_constraint_max_power_uw,
+                       .store  = NULL
+               },
+
+               .min_power_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IRUGO
+                       },
+                       .show   = show_constraint_min_power_uw,
+                       .store  = NULL
+               },
+
+               .max_time_window_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IRUGO
+                       },
+                       .show   = show_constraint_max_time_window_us,
+                       .store  = NULL
+               },
+
+               .min_time_window_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IRUGO
+                       },
+                       .show   = show_constraint_min_time_window_us,
+                       .store  = NULL
+               },
+
+               .name_attr = {
+                       .attr = {
+                               .name   = NULL,
+                               .mode   = S_IRUGO
+                       },
+                       .show   = show_constraint_name,
+                       .store  = NULL
+               }
+       }
+};
 
 /* A list of powercap control_types */
 static LIST_HEAD(powercap_cntrl_list);
@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
 }
 
 static int create_constraint_attribute(int id, const char *name,
-                               int mode,
-                               struct device_attribute *dev_attr,
-                               ssize_t (*show)(struct device *,
-                                       struct device_attribute *, char *),
-                               ssize_t (*store)(struct device *,
-                                       struct device_attribute *,
-                               const char *, size_t)
-                               )
+                               struct device_attribute *dev_attr)
 {
+       name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
 
-       dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
-                                                               id, name);
-       if (!dev_attr->attr.name)
+       if (!name)
                return -ENOMEM;
-       dev_attr->attr.mode = mode;
-       dev_attr->show = show;
-       dev_attr->store = store;
+
+       pax_open_kernel();
+       *(const char **)&dev_attr->attr.name = name;
+       pax_close_kernel();
 
        return 0;
 }
@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
 
        for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
                ret = create_constraint_attribute(i, "power_limit_uw",
-                                       S_IWUSR | S_IRUGO,
-                                       &constraint_attrs[i].power_limit_attr,
-                                       show_constraint_power_limit_uw,
-                                       store_constraint_power_limit_uw);
+                                       &constraint_attrs[i].power_limit_attr);
                if (ret)
                        goto err_alloc;
                ret = create_constraint_attribute(i, "time_window_us",
-                                       S_IWUSR | S_IRUGO,
-                                       &constraint_attrs[i].time_window_attr,
-                                       show_constraint_time_window_us,
-                                       store_constraint_time_window_us);
+                                       &constraint_attrs[i].time_window_attr);
                if (ret)
                        goto err_alloc;
-               ret = create_constraint_attribute(i, "name", S_IRUGO,
-                               &constraint_attrs[i].name_attr,
-                               show_constraint_name,
-                               NULL);
+               ret = create_constraint_attribute(i, "name",
+                               &constraint_attrs[i].name_attr);
                if (ret)
                        goto err_alloc;
-               ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
-                               &constraint_attrs[i].max_power_attr,
-                               show_constraint_max_power_uw,
-                               NULL);
+               ret = create_constraint_attribute(i, "max_power_uw",
+                               &constraint_attrs[i].max_power_attr);
                if (ret)
                        goto err_alloc;
-               ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
-                               &constraint_attrs[i].min_power_attr,
-                               show_constraint_min_power_uw,
-                               NULL);
+               ret = create_constraint_attribute(i, "min_power_uw",
+                               &constraint_attrs[i].min_power_attr);
                if (ret)
                        goto err_alloc;
                ret = create_constraint_attribute(i, "max_time_window_us",
-                               S_IRUGO,
-                               &constraint_attrs[i].max_time_window_attr,
-                               show_constraint_max_time_window_us,
-                               NULL);
+                               &constraint_attrs[i].max_time_window_attr);
                if (ret)
                        goto err_alloc;
                ret = create_constraint_attribute(i, "min_time_window_us",
-                               S_IRUGO,
-                               &constraint_attrs[i].min_time_window_attr,
-                               show_constraint_min_time_window_us,
-                               NULL);
+                               &constraint_attrs[i].min_time_window_attr);
                if (ret)
                        goto err_alloc;
 
@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
                power_zone->zone_dev_attrs[count++] =
                                        &dev_attr_max_energy_range_uj.attr;
        if (power_zone->ops->get_energy_uj) {
+               pax_open_kernel();
                if (power_zone->ops->reset_energy_uj)
-                       dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+                       *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
                else
-                       dev_attr_energy_uj.attr.mode = S_IRUGO;
+                       *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
+               pax_close_kernel();
                power_zone->zone_dev_attrs[count++] =
                                        &dev_attr_energy_uj.attr;
        }
index 9c5d41421b6510f55298b70020f33578e82e97e7..c7900ce54900c321d81f1c99c105887d526b46c0 100644 (file)
@@ -51,7 +51,7 @@ struct ptp_clock {
        struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
        wait_queue_head_t tsev_wq;
        int defunct; /* tells readers to go away when clock is being removed */
-       struct device_attribute *pin_dev_attr;
+       device_attribute_no_const *pin_dev_attr;
        struct attribute **pin_attr;
        struct attribute_group pin_attr_group;
 };
index 302e626fe6b01777523c371e2760ee8f48acc68b..12579afe50ac882e83b463dd8a2a63e157c0c2dd 100644 (file)
@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
                goto no_pin_attr;
 
        for (i = 0; i < n_pins; i++) {
-               struct device_attribute *da = &ptp->pin_dev_attr[i];
+               device_attribute_no_const *da = &ptp->pin_dev_attr[i];
                sysfs_attr_init(&da->attr);
                da->attr.name = info->pin_config[i].name;
                da->attr.mode = 0644;
index 9c48fb32f6601bf4065db65cd2a099bc57f3812d..5b494fac8376a6b56e7ff27595f611af4c5f306f 100644 (file)
@@ -3587,7 +3587,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
 {
        const struct regulation_constraints *constraints = NULL;
        const struct regulator_init_data *init_data;
-       static atomic_t regulator_no = ATOMIC_INIT(0);
+       static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
        struct regulator_dev *rdev;
        struct device *dev;
        int ret, i;
@@ -3661,7 +3661,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
        rdev->dev.class = &regulator_class;
        rdev->dev.parent = dev;
        dev_set_name(&rdev->dev, "regulator.%d",
-                    atomic_inc_return(&regulator_no) - 1);
+                    atomic_inc_return_unchecked(&regulator_no) - 1);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
index 7eee2ca1854183b05e4d4b08180020afc7337033..402451302bf4614b7208c33516675049edbe4ead 100644 (file)
@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
                max8660->shadow_regs[MAX8660_OVER1] = 5;
        } else {
                /* Otherwise devices can be toggled via software */
-               max8660_dcdc_ops.enable = max8660_dcdc_enable;
-               max8660_dcdc_ops.disable = max8660_dcdc_disable;
+               pax_open_kernel();
+               *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
+               *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
+               pax_close_kernel();
        }
 
        /*
index c3d55c2db593b8f37a253c4a07bd21130db99353..0dddfe6bc8a19fe46653dbbabb4a90bcb11c4a57 100644 (file)
@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
        if (!pdata || !pdata->enable_ext_control) {
                max->desc.enable_reg = MAX8973_VOUT;
                max->desc.enable_mask = MAX8973_VOUT_ENABLE;
-               max->ops.enable = regulator_enable_regmap;
-               max->ops.disable = regulator_disable_regmap;
-               max->ops.is_enabled = regulator_is_enabled_regmap;
+               pax_open_kernel();
+               *(void **)&max->ops.enable = regulator_enable_regmap;
+               *(void **)&max->ops.disable = regulator_disable_regmap;
+               *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
+               pax_close_kernel();
        }
 
        if (pdata) {
index 0d17c92068162cc1c0810150716985e9812615c8..a29f627502f03ae3685a65cb91dd9edbb88411d8 100644 (file)
@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
        mc13xxx_unlock(mc13892);
 
        /* update mc13892_vcam ops */
-       memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
+       pax_open_kernel();
+       memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
                                                sizeof(struct regulator_ops));
-       mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
-       mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
+       *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
+       *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
+       pax_close_kernel();
        mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
 
        mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
index 5b2e76159b419854ca8f69b8c3f99919fbab319f..c8c8a4a5226c08dd16e10afd92bc4fba0d2ad6cf 100644 (file)
@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
        hpet_rtc_timer_init();
 
        /* export at least the first block of NVRAM */
-       nvram.size = address_space - NVRAM_OFFSET;
+       pax_open_kernel();
+       *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
+       pax_close_kernel();
        retval = sysfs_create_bin_file(&dev->kobj, &nvram);
        if (retval < 0) {
                dev_dbg(dev, "can't create nvram file? %d\n", retval);
index d049393692517bfea7229f6f0c673de3d5f57847..bb20be0f2e22317ad1f22c4d70c34dc1ba6dcde8 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/rtc.h>
 #include <linux/sched.h>
+#include <linux/grsecurity.h>
 #include "rtc-core.h"
 
 static dev_t rtc_devt;
@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
                if (copy_from_user(&tm, uarg, sizeof(tm)))
                        return -EFAULT;
 
+               gr_log_timechange();
+
                return rtc_set_time(rtc, &tm);
 
        case RTC_PIE_ON:
index 4ffabb322a9a55069a6af31fc82fe5d9da83b9bd..1f87fca06cd2f7fdd034f5d456b87de80e37f130 100644 (file)
@@ -107,7 +107,7 @@ struct ds1307 {
        u8                      offset; /* register's offset */
        u8                      regs[11];
        u16                     nvram_offset;
-       struct bin_attribute    *nvram;
+       bin_attribute_no_const  *nvram;
        enum ds_type            type;
        unsigned long           flags;
 #define HAS_NVRAM      0               /* bit 0 == sysfs file active */
index 90abb5bd589c8e2998594c7c3312e3f4c314cd47..e0bf6ddeab377f33ee21ebb791a78f96b8655687 100644 (file)
@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
        if (IS_ERR(m48t59->rtc))
                return PTR_ERR(m48t59->rtc);
 
-       m48t59_nvram_attr.size = pdata->offset;
+       pax_open_kernel();
+       *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
+       pax_close_kernel();
 
        ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
        if (ret)
index e693af6e59300aef03127dcd6c2d62e000e1c1c9..2e525b6ed9ddd6da3cb33946ed5a05d2527b1e9a 100644 (file)
@@ -36,7 +36,7 @@ struct bfa_iotag_s {
 
 struct bfa_itn_s {
        bfa_isr_func_t isr;
-};
+} __no_const;
 
 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
                void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
index 0f19455951ec37a4d165e9b0bcc03b5c77f9db50..ef7adb555cb5eeca514f74caacb0c83a5f63dd82 100644 (file)
@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
 
 static struct bfa_fcs_mod_s fcs_modules[] = {
-       { bfa_fcs_port_attach, NULL, NULL },
-       { bfa_fcs_uf_attach, NULL, NULL },
-       { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
-         bfa_fcs_fabric_modexit },
+       {
+               .attach = bfa_fcs_port_attach,
+               .modinit = NULL,
+               .modexit = NULL
+       },
+       {
+               .attach = bfa_fcs_uf_attach,
+               .modinit = NULL,
+               .modexit = NULL
+       },
+       {
+               .attach = bfa_fcs_fabric_attach,
+               .modinit = bfa_fcs_fabric_modinit,
+               .modexit = bfa_fcs_fabric_modexit
+       },
 };
 
 /*
index ff75ef8917551b35a61ff1e2eea41239b76b8332..2dfe00acbbb27bcaecd269128c7b90d33b59a98e 100644 (file)
@@ -89,15 +89,26 @@ static struct {
        void            (*offline) (struct bfa_fcs_lport_s *port);
 } __port_action[] = {
        {
-       bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
-                       bfa_fcs_lport_unknown_offline}, {
-       bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
-                       bfa_fcs_lport_fab_offline}, {
-       bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
-                       bfa_fcs_lport_n2n_offline}, {
-       bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
-                       bfa_fcs_lport_loop_offline},
-       };
+               .init = bfa_fcs_lport_unknown_init,
+               .online = bfa_fcs_lport_unknown_online,
+               .offline = bfa_fcs_lport_unknown_offline
+       },
+       {
+               .init = bfa_fcs_lport_fab_init,
+               .online = bfa_fcs_lport_fab_online,
+               .offline = bfa_fcs_lport_fab_offline
+       },
+       {
+               .init = bfa_fcs_lport_n2n_init,
+               .online = bfa_fcs_lport_n2n_online,
+               .offline = bfa_fcs_lport_n2n_offline
+       },
+       {
+               .init = bfa_fcs_lport_loop_init,
+               .online = bfa_fcs_lport_loop_online,
+               .offline = bfa_fcs_lport_loop_offline
+       },
+};
 
 /*
  *  fcs_port_sm FCS logical port state machine
index a38aafa030b35256ede14da888f421d27dc4166d..fe8f03b796b47d44bbbe8fbb8a40aafdf2b590f9 100644 (file)
@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
        bfa_ioc_disable_cbfn_t  disable_cbfn;
        bfa_ioc_hbfail_cbfn_t   hbfail_cbfn;
        bfa_ioc_reset_cbfn_t    reset_cbfn;
-};
+} __no_const;
 
 /*
  * IOC event notification mechanism.
@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
        void            (*ioc_set_alt_fwstate)  (struct bfa_ioc_s *ioc,
                                        enum bfi_ioc_state fwstate);
        enum bfi_ioc_state      (*ioc_get_alt_fwstate)  (struct bfa_ioc_s *ioc);
-};
+} __no_const;
 
 /*
  * Queue element to wait for room in request queue. FIFO order is
index a14c784ff3fc2d06338c5aa011f1e035afcd7f05..6de679071a406d454dbfb0f539904df03b97f885 100644 (file)
@@ -78,12 +78,12 @@ enum {
                                                                        \
        extern struct bfa_module_s hal_mod_ ## __mod;                   \
        struct bfa_module_s hal_mod_ ## __mod = {                       \
-               bfa_ ## __mod ## _meminfo,                              \
-               bfa_ ## __mod ## _attach,                               \
-               bfa_ ## __mod ## _detach,                               \
-               bfa_ ## __mod ## _start,                                \
-               bfa_ ## __mod ## _stop,                                 \
-               bfa_ ## __mod ## _iocdisable,                           \
+               .meminfo = bfa_ ## __mod ## _meminfo,                   \
+               .attach = bfa_ ## __mod ## _attach,                     \
+               .detach = bfa_ ## __mod ## _detach,                     \
+               .start = bfa_ ## __mod ## _start,                       \
+               .stop = bfa_ ## __mod ## _stop,                         \
+               .iocdisable = bfa_ ## __mod ## _iocdisable,             \
        }
 
 #define BFA_CACHELINE_SZ       (256)
index 045c4e11ee5468471dbd26ddd6e83f4f3637e9f5..13de803a98c053f8527f678d0f66ad641b6db9a5 100644 (file)
@@ -33,8 +33,8 @@
  */
 #include "libfcoe.h"
 
-static atomic_t ctlr_num;
-static atomic_t fcf_num;
+static atomic_unchecked_t ctlr_num;
+static atomic_unchecked_t fcf_num;
 
 /*
  * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
        if (!ctlr)
                goto out;
 
-       ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+       ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
        ctlr->f = f;
        ctlr->mode = FIP_CONN_TYPE_FABRIC;
        INIT_LIST_HEAD(&ctlr->fcfs);
@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
        fcf->dev.parent = &ctlr->dev;
        fcf->dev.bus = &fcoe_bus_type;
        fcf->dev.type = &fcoe_fcf_device_type;
-       fcf->id = atomic_inc_return(&fcf_num) - 1;
+       fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
        fcf->state = FCOE_FCF_STATE_UNKNOWN;
 
        fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
 {
        int error;
 
-       atomic_set(&ctlr_num, 0);
-       atomic_set(&fcf_num, 0);
+       atomic_set_unchecked(&ctlr_num, 0);
+       atomic_set_unchecked(&fcf_num, 0);
 
        error = bus_register(&fcoe_bus_type);
        if (error)
index 8bb173e01084efb40b80121cda098651375ed3ce..20236b4036e45ba639f25f2b7ca9b1f17ed605af 100644 (file)
@@ -42,7 +42,7 @@
 #include "scsi_logging.h"
 
 
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0);    /* host_no for next new host */
+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0);  /* host_no for next new host */
 
 
 static void scsi_host_cls_release(struct device *dev)
@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
         * subtract one because we increment first then return, but we need to
         * know what the next host number was before increment
         */
-       shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+       shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
        shost->dma_channel = 0xff;
 
        /* These three are default values which can be overridden */
index 6bb4611b238a8421813ab761ac1ecaa53a20cd6d..0203251481c273fe430da8db3b9a4b34e61aa3b4 100644 (file)
@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
        struct reply_queue_buffer *rq = &h->reply_queue[q];
 
        if (h->transMethod & CFGTBL_Trans_io_accel1)
-               return h->access.command_completed(h, q);
+               return h->access->command_completed(h, q);
 
        if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-               return h->access.command_completed(h, q);
+               return h->access->command_completed(h, q);
 
        if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
                a = rq->head[rq->current_entry];
@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
        while (!list_empty(&h->reqQ)) {
                c = list_entry(h->reqQ.next, struct CommandList, list);
                /* can't do anything if fifo is full */
-               if ((h->access.fifo_full(h))) {
+               if ((h->access->fifo_full(h))) {
                        h->fifo_recently_full = 1;
                        dev_warn(&h->pdev->dev, "fifo full\n");
                        break;
@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
                atomic_inc(&h->commands_outstanding);
                spin_unlock_irqrestore(&h->lock, *flags);
                /* Tell the controller execute command */
-               h->access.submit_command(h, c);
+               h->access->submit_command(h, c);
                spin_lock_irqsave(&h->lock, *flags);
        }
 }
@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
 
 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
 {
-       return h->access.command_completed(h, q);
+       return h->access->command_completed(h, q);
 }
 
 static inline bool interrupt_pending(struct ctlr_info *h)
 {
-       return h->access.intr_pending(h);
+       return h->access->intr_pending(h);
 }
 
 static inline long interrupt_not_for_us(struct ctlr_info *h)
 {
-       return (h->access.intr_pending(h) == 0) ||
+       return (h->access->intr_pending(h) == 0) ||
                (h->interrupts_enabled == 0);
 }
 
@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
        if (prod_index < 0)
                return -ENODEV;
        h->product_name = products[prod_index].product_name;
-       h->access = *(products[prod_index].access);
+       h->access = products[prod_index].access;
 
        pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
                               PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
        unsigned long flags;
        u32 lockup_detected;
 
-       h->access.set_intr_mask(h, HPSA_INTR_OFF);
+       h->access->set_intr_mask(h, HPSA_INTR_OFF);
        spin_lock_irqsave(&h->lock, flags);
        lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
        if (!lockup_detected) {
@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
        }
 
        /* make sure the board interrupts are off */
-       h->access.set_intr_mask(h, HPSA_INTR_OFF);
+       h->access->set_intr_mask(h, HPSA_INTR_OFF);
 
        if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
                goto clean2;
@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
                 * fake ones to scoop up any residual completions.
                 */
                spin_lock_irqsave(&h->lock, flags);
-               h->access.set_intr_mask(h, HPSA_INTR_OFF);
+               h->access->set_intr_mask(h, HPSA_INTR_OFF);
                spin_unlock_irqrestore(&h->lock, flags);
                free_irqs(h);
                rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
                dev_info(&h->pdev->dev, "Board READY.\n");
                dev_info(&h->pdev->dev,
                        "Waiting for stale completions to drain.\n");
-               h->access.set_intr_mask(h, HPSA_INTR_ON);
+               h->access->set_intr_mask(h, HPSA_INTR_ON);
                msleep(10000);
-               h->access.set_intr_mask(h, HPSA_INTR_OFF);
+               h->access->set_intr_mask(h, HPSA_INTR_OFF);
 
                rc = controller_reset_failed(h->cfgtable);
                if (rc)
@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
        h->drv_req_rescan = 0;
 
        /* Turn the interrupts on so we can service requests */
-       h->access.set_intr_mask(h, HPSA_INTR_ON);
+       h->access->set_intr_mask(h, HPSA_INTR_ON);
 
        hpsa_hba_inquiry(h);
        hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
         * To write all data in the battery backed cache to disks
         */
        hpsa_flush_cache(h);
-       h->access.set_intr_mask(h, HPSA_INTR_OFF);
+       h->access->set_intr_mask(h, HPSA_INTR_OFF);
        hpsa_free_irqs_and_disable_msix(h);
 }
 
@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
                                CFGTBL_Trans_enable_directed_msix |
                        (trans_support & (CFGTBL_Trans_io_accel1 |
                                CFGTBL_Trans_io_accel2));
-       struct access_method access = SA5_performant_access;
+       struct access_method *access = &SA5_performant_access;
 
        /* This is a bit complicated.  There are 8 registers on
         * the controller which we write to to tell it 8 different
@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
         * perform the superfluous readl() after each command submission.
         */
        if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
-               access = SA5_performant_access_no_read;
+               access = &SA5_performant_access_no_read;
 
        /* Controller spec: zero out this buffer. */
        for (i = 0; i < h->nreply_queues; i++)
@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
         * enable outbound interrupt coalescing in accelerator mode;
         */
        if (trans_support & CFGTBL_Trans_io_accel1) {
-               access = SA5_ioaccel_mode1_access;
+               access = &SA5_ioaccel_mode1_access;
                writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
                writel(4, &h->cfgtable->HostWrite.CoalIntCount);
        } else {
                if (trans_support & CFGTBL_Trans_io_accel2) {
-                       access = SA5_ioaccel_mode2_access;
+                       access = &SA5_ioaccel_mode2_access;
                        writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
                        writel(4, &h->cfgtable->HostWrite.CoalIntCount);
                }
index 8e06d9e280ec7dc033f3bf56e8c268393b6c3275..396e0a193bbef210d6f0ad1d5c7c259f8ccb9524 100644 (file)
@@ -127,7 +127,7 @@ struct ctlr_info {
        unsigned int msix_vector;
        unsigned int msi_vector;
        int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
-       struct access_method access;
+       struct access_method *access;
        char hba_mode_enabled;
 
        /* queue and queue Info */
@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
 }
 
 static struct access_method SA5_access = {
-       SA5_submit_command,
-       SA5_intr_mask,
-       SA5_fifo_full,
-       SA5_intr_pending,
-       SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5_ioaccel_mode1_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_ioaccel_mode1_intr_pending,
-       SA5_ioaccel_mode1_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_ioaccel_mode1_intr_pending,
+       .command_completed = SA5_ioaccel_mode1_completed,
 };
 
 static struct access_method SA5_ioaccel_mode2_access = {
-       SA5_submit_command_ioaccel2,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command_ioaccel2,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access_no_read = {
-       SA5_submit_command_no_read,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command_no_read,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 struct board_type {
index 1b3a094734522803c7f3fecd39da5c297a1feb43..068e683913aa00e44c20c963e447382184e04765 100644 (file)
@@ -101,12 +101,12 @@ struct fc_exch_mgr {
        u16             pool_max_index;
 
        struct {
-               atomic_t no_free_exch;
-               atomic_t no_free_exch_xid;
-               atomic_t xid_not_found;
-               atomic_t xid_busy;
-               atomic_t seq_not_found;
-               atomic_t non_bls_resp;
+               atomic_unchecked_t no_free_exch;
+               atomic_unchecked_t no_free_exch_xid;
+               atomic_unchecked_t xid_not_found;
+               atomic_unchecked_t xid_busy;
+               atomic_unchecked_t seq_not_found;
+               atomic_unchecked_t non_bls_resp;
        } stats;
 };
 
@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
        /* allocate memory for exchange */
        ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
        if (!ep) {
-               atomic_inc(&mp->stats.no_free_exch);
+               atomic_inc_unchecked(&mp->stats.no_free_exch);
                goto out;
        }
        memset(ep, 0, sizeof(*ep));
@@ -874,7 +874,7 @@ out:
        return ep;
 err:
        spin_unlock_bh(&pool->lock);
-       atomic_inc(&mp->stats.no_free_exch_xid);
+       atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
        mempool_free(ep, mp->ep_pool);
        return NULL;
 }
@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
                xid = ntohs(fh->fh_ox_id);      /* we originated exch */
                ep = fc_exch_find(mp, xid);
                if (!ep) {
-                       atomic_inc(&mp->stats.xid_not_found);
+                       atomic_inc_unchecked(&mp->stats.xid_not_found);
                        reject = FC_RJT_OX_ID;
                        goto out;
                }
@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
                ep = fc_exch_find(mp, xid);
                if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
                        if (ep) {
-                               atomic_inc(&mp->stats.xid_busy);
+                               atomic_inc_unchecked(&mp->stats.xid_busy);
                                reject = FC_RJT_RX_ID;
                                goto rel;
                        }
@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
                        }
                        xid = ep->xid;  /* get our XID */
                } else if (!ep) {
-                       atomic_inc(&mp->stats.xid_not_found);
+                       atomic_inc_unchecked(&mp->stats.xid_not_found);
                        reject = FC_RJT_RX_ID;  /* XID not found */
                        goto out;
                }
@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
        } else {
                sp = &ep->seq;
                if (sp->id != fh->fh_seq_id) {
-                       atomic_inc(&mp->stats.seq_not_found);
+                       atomic_inc_unchecked(&mp->stats.seq_not_found);
                        if (f_ctl & FC_FC_END_SEQ) {
                                /*
                                 * Update sequence_id based on incoming last
@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 
        ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
        if (!ep) {
-               atomic_inc(&mp->stats.xid_not_found);
+               atomic_inc_unchecked(&mp->stats.xid_not_found);
                goto out;
        }
        if (ep->esb_stat & ESB_ST_COMPLETE) {
-               atomic_inc(&mp->stats.xid_not_found);
+               atomic_inc_unchecked(&mp->stats.xid_not_found);
                goto rel;
        }
        if (ep->rxid == FC_XID_UNKNOWN)
                ep->rxid = ntohs(fh->fh_rx_id);
        if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
-               atomic_inc(&mp->stats.xid_not_found);
+               atomic_inc_unchecked(&mp->stats.xid_not_found);
                goto rel;
        }
        if (ep->did != ntoh24(fh->fh_s_id) &&
            ep->did != FC_FID_FLOGI) {
-               atomic_inc(&mp->stats.xid_not_found);
+               atomic_inc_unchecked(&mp->stats.xid_not_found);
                goto rel;
        }
        sof = fr_sof(fp);
@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
                sp->ssb_stat |= SSB_ST_RESP;
                sp->id = fh->fh_seq_id;
        } else if (sp->id != fh->fh_seq_id) {
-               atomic_inc(&mp->stats.seq_not_found);
+               atomic_inc_unchecked(&mp->stats.seq_not_found);
                goto rel;
        }
 
@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
        sp = fc_seq_lookup_orig(mp, fp);        /* doesn't hold sequence */
 
        if (!sp)
-               atomic_inc(&mp->stats.xid_not_found);
+               atomic_inc_unchecked(&mp->stats.xid_not_found);
        else
-               atomic_inc(&mp->stats.non_bls_resp);
+               atomic_inc_unchecked(&mp->stats.non_bls_resp);
 
        fc_frame_free(fp);
 }
@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
 
        list_for_each_entry(ema, &lport->ema_list, ema_list) {
                mp = ema->mp;
-               st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
+               st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
                st->fc_no_free_exch_xid +=
-                               atomic_read(&mp->stats.no_free_exch_xid);
-               st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
-               st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
-               st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
-               st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
+                               atomic_read_unchecked(&mp->stats.no_free_exch_xid);
+               st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
+               st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
+               st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
+               st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
        }
 }
 EXPORT_SYMBOL(fc_exch_update_stats);
index 932d9cc98d2fc807c4ef0e7715fe922803bbc29c..50c7ee99468ae6fcd40aa3bc6136417e5ce77691 100644 (file)
@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
        .postreset              = ata_std_postreset,
        .error_handler          = ata_std_error_handler,
        .post_internal_cmd      = sas_ata_post_internal,
-       .qc_defer               = ata_std_qc_defer,
+       .qc_defer               = ata_std_qc_defer,
        .qc_prep                = ata_noop_qc_prep,
        .qc_issue               = sas_ata_qc_issue,
        .qc_fill_rtf            = sas_ata_qc_fill_rtf,
index 434e9037908ef5eb0203d0b127d0fe953ec825b3..5a4a79bc063e5e3ac1a2efbf8d5d6504542b5922 100644 (file)
@@ -430,7 +430,7 @@ struct lpfc_vport {
        struct dentry *debug_nodelist;
        struct dentry *vport_debugfs_root;
        struct lpfc_debugfs_trc *disc_trc;
-       atomic_t disc_trc_cnt;
+       atomic_unchecked_t disc_trc_cnt;
 #endif
        uint8_t stat_data_enabled;
        uint8_t stat_data_blocked;
@@ -880,8 +880,8 @@ struct lpfc_hba {
        struct timer_list fabric_block_timer;
        unsigned long bit_flags;
 #define        FABRIC_COMANDS_BLOCKED  0
-       atomic_t num_rsrc_err;
-       atomic_t num_cmd_success;
+       atomic_unchecked_t num_rsrc_err;
+       atomic_unchecked_t num_cmd_success;
        unsigned long last_rsrc_error_time;
        unsigned long last_ramp_down_time;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -916,7 +916,7 @@ struct lpfc_hba {
 
        struct dentry *debug_slow_ring_trc;
        struct lpfc_debugfs_trc *slow_ring_trc;
-       atomic_t slow_ring_trc_cnt;
+       atomic_unchecked_t slow_ring_trc_cnt;
        /* iDiag debugfs sub-directory */
        struct dentry *idiag_root;
        struct dentry *idiag_pci_cfg;
index 5633e7dadc084cc93cc11347aa831ae358910e5f..827211420dbd44fdc350911f855dd52e03056d94 100644 (file)
@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
 
 #include <linux/debugfs.h>
 
-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
 static unsigned long lpfc_debugfs_start_time = 0L;
 
 /* iDiag */
@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
        lpfc_debugfs_enable = 0;
 
        len = 0;
-       index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+       index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
                (lpfc_debugfs_max_disc_trc - 1);
        for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
                dtp = vport->disc_trc + i;
@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
        lpfc_debugfs_enable = 0;
 
        len = 0;
-       index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
+       index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
                (lpfc_debugfs_max_slow_ring_trc - 1);
        for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
                dtp = phba->slow_ring_trc + i;
@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
                !vport || !vport->disc_trc)
                return;
 
-       index = atomic_inc_return(&vport->disc_trc_cnt) &
+       index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
                (lpfc_debugfs_max_disc_trc - 1);
        dtp = vport->disc_trc + index;
        dtp->fmt = fmt;
        dtp->data1 = data1;
        dtp->data2 = data2;
        dtp->data3 = data3;
-       dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+       dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
        dtp->jif = jiffies;
 #endif
        return;
@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
                !phba || !phba->slow_ring_trc)
                return;
 
-       index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
+       index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
                (lpfc_debugfs_max_slow_ring_trc - 1);
        dtp = phba->slow_ring_trc + index;
        dtp->fmt = fmt;
        dtp->data1 = data1;
        dtp->data2 = data2;
        dtp->data3 = data3;
-       dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+       dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
        dtp->jif = jiffies;
 #endif
        return;
@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                                                 "slow_ring buffer\n");
                                goto debug_failed;
                        }
-                       atomic_set(&phba->slow_ring_trc_cnt, 0);
+                       atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
                        memset(phba->slow_ring_trc, 0,
                                (sizeof(struct lpfc_debugfs_trc) *
                                lpfc_debugfs_max_slow_ring_trc));
@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                                 "buffer\n");
                goto debug_failed;
        }
-       atomic_set(&vport->disc_trc_cnt, 0);
+       atomic_set_unchecked(&vport->disc_trc_cnt, 0);
 
        snprintf(name, sizeof(name), "discovery_trace");
        vport->debug_disc_trc =
index 0b2c53af85c7cedc592c4ccd6eccfea6b5140ed6..aec2b45a0ec3b821f335b051b15b2f20c8787ede 100644 (file)
@@ -11290,8 +11290,10 @@ lpfc_init(void)
                        "misc_register returned with status %d", error);
 
        if (lpfc_enable_npiv) {
-               lpfc_transport_functions.vport_create = lpfc_vport_create;
-               lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+               pax_open_kernel();
+               *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
+               *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+               pax_close_kernel();
        }
        lpfc_transport_template =
                                fc_attach_transport(&lpfc_transport_functions);
index 4f9222eb22669b6a84f0e5cad891958e6e8c6e22..f1850e36da9d57515e9c59e64281771f03843f64 100644 (file)
@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
        unsigned long expires;
 
        spin_lock_irqsave(&phba->hbalock, flags);
-       atomic_inc(&phba->num_rsrc_err);
+       atomic_inc_unchecked(&phba->num_rsrc_err);
        phba->last_rsrc_error_time = jiffies;
 
        expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
        unsigned long num_rsrc_err, num_cmd_success;
        int i;
 
-       num_rsrc_err = atomic_read(&phba->num_rsrc_err);
-       num_cmd_success = atomic_read(&phba->num_cmd_success);
+       num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
+       num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
 
        /*
         * The error and success command counters are global per
@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
                        }
                }
        lpfc_destroy_vport_work_array(phba, vports);
-       atomic_set(&phba->num_rsrc_err, 0);
-       atomic_set(&phba->num_cmd_success, 0);
+       atomic_set_unchecked(&phba->num_rsrc_err, 0);
+       atomic_set_unchecked(&phba->num_cmd_success, 0);
 }
 
 /**
index 6a1c036a6f3f085cabd6ad69838b64d59290507d..38e0e8dba075cb3d893d63dfb0dfc3a875bfb7ba 100644 (file)
@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-       static struct _raid_device *raid_device;
+       struct _raid_device *raid_device;
        unsigned long flags;
        Mpi2RaidVolPage0_t vol_pg0;
        Mpi2ConfigReply_t mpi_reply;
@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-       static struct _raid_device *raid_device;
+       struct _raid_device *raid_device;
        unsigned long flags;
        Mpi2RaidVolPage0_t vol_pg0;
        Mpi2ConfigReply_t mpi_reply;
@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
        Mpi2EventDataIrOperationStatus_t *event_data =
                (Mpi2EventDataIrOperationStatus_t *)
                fw_event->event_data;
-       static struct _raid_device *raid_device;
+       struct _raid_device *raid_device;
        unsigned long flags;
        u16 handle;
 
@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
        u64 sas_address;
        struct _sas_device *sas_device;
        struct _sas_node *expander_device;
-       static struct _raid_device *raid_device;
+       struct _raid_device *raid_device;
        u8 retry_count;
        unsigned long flags;
 
index 8c27b6a77ec4b1ae9c914cfe15eb05d7dd0b371a..607f56e77ad075ebf47cc966f603f3932928d352 100644 (file)
@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
                res->scsi_dev = scsi_dev;
                scsi_dev->hostdata = res;
                res->change_detected = 0;
-               atomic_set(&res->read_failures, 0);
-               atomic_set(&res->write_failures, 0);
+               atomic_set_unchecked(&res->read_failures, 0);
+               atomic_set_unchecked(&res->write_failures, 0);
                rc = 0;
        }
        spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
 
        /* If this was a SCSI read/write command keep count of errors */
        if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
-               atomic_inc(&res->read_failures);
+               atomic_inc_unchecked(&res->read_failures);
        else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
-               atomic_inc(&res->write_failures);
+               atomic_inc_unchecked(&res->write_failures);
 
        if (!RES_IS_GSCSI(res->cfg_entry) &&
                masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
         * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
         * hrrq_id assigned here in queuecommand
         */
-       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+       ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
                          pinstance->num_hrrq;
        cmd->cmd_done = pmcraid_io_done;
 
@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
         * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
         * hrrq_id assigned here in queuecommand
         */
-       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+       ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
                          pinstance->num_hrrq;
 
        if (request_size) {
@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
 
        pinstance = container_of(workp, struct pmcraid_instance, worker_q);
        /* add resources only after host is added into system */
-       if (!atomic_read(&pinstance->expose_resources))
+       if (!atomic_read_unchecked(&pinstance->expose_resources))
                return;
 
        fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
        init_waitqueue_head(&pinstance->reset_wait_q);
 
        atomic_set(&pinstance->outstanding_cmds, 0);
-       atomic_set(&pinstance->last_message_id, 0);
-       atomic_set(&pinstance->expose_resources, 0);
+       atomic_set_unchecked(&pinstance->last_message_id, 0);
+       atomic_set_unchecked(&pinstance->expose_resources, 0);
 
        INIT_LIST_HEAD(&pinstance->free_res_q);
        INIT_LIST_HEAD(&pinstance->used_res_q);
@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
        /* Schedule worker thread to handle CCN and take care of adding and
         * removing devices to OS
         */
-       atomic_set(&pinstance->expose_resources, 1);
+       atomic_set_unchecked(&pinstance->expose_resources, 1);
        schedule_work(&pinstance->worker_q);
        return rc;
 
index e1d150f3fd4d4912c2cdc9f972550874f0fd63f7..6c6df444a63aceabc4094f8fa7473c558b345fb5 100644 (file)
@@ -748,7 +748,7 @@ struct pmcraid_instance {
        struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
 
        /* Message id as filled in last fired IOARCB, used to identify HRRQ */
-       atomic_t last_message_id;
+       atomic_unchecked_t last_message_id;
 
        /* configuration table */
        struct pmcraid_config_table *cfg_table;
@@ -777,7 +777,7 @@ struct pmcraid_instance {
        atomic_t outstanding_cmds;
 
        /* should add/delete resources to mid-layer now ?*/
-       atomic_t expose_resources;
+       atomic_unchecked_t expose_resources;
 
 
 
@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
                struct pmcraid_config_table_entry_ext cfg_entry_ext;
        };
        struct scsi_device *scsi_dev;   /* Link scsi_device structure */
-       atomic_t read_failures;         /* count of failed READ commands */
-       atomic_t write_failures;        /* count of failed WRITE commands */
+       atomic_unchecked_t read_failures;       /* count of failed READ commands */
+       atomic_unchecked_t write_failures;      /* count of failed WRITE commands */
 
        /* To indicate add/delete/modify during CCN */
        u8 change_detected;
index 82b92c414a9cfe3152933e9cfc191c79c480568c..31781718033071871a1499c2c10f16d23111624e 100644 (file)
@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
        return 0;
 }
 
-struct fc_function_template qla2xxx_transport_functions = {
+fc_function_template_no_const qla2xxx_transport_functions = {
 
        .show_host_node_name = 1,
        .show_host_port_name = 1,
@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
        .bsg_timeout = qla24xx_bsg_timeout,
 };
 
-struct fc_function_template qla2xxx_transport_vport_functions = {
+fc_function_template_no_const qla2xxx_transport_vport_functions = {
 
        .show_host_node_name = 1,
        .show_host_port_name = 1,
index 7686bfe9a4a9e7ac42022c99321d20e0ff4c55f5..47108931d9ccd35d5ce5cc8ef87eec7a37fcc0f4 100644 (file)
@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
 struct device_attribute;
 extern struct device_attribute *qla2x00_host_attrs[];
 struct fc_function_template;
-extern struct fc_function_template qla2xxx_transport_functions;
-extern struct fc_function_template qla2xxx_transport_vport_functions;
+extern fc_function_template_no_const qla2xxx_transport_functions;
+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
index cce1cbc1a9276f4492d6efa16996313c00daabd6..5b9f0fe732251fc075ed650f9905ff9e673d651b 100644 (file)
@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
                    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
                        /* Ok, a 64bit DMA mask is applicable. */
                        ha->flags.enable_64bit_addressing = 1;
-                       ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
-                       ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+                       pax_open_kernel();
+                       *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+                       *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+                       pax_close_kernel();
                        return;
                }
        }
index 8f6d0fb2cd807255a66e962c3cb7c4c8633d4d77..1b2109726b2a5f04c73a0d2ed7aa2c5b6c18330d 100644 (file)
@@ -305,7 +305,7 @@ struct ddb_entry {
                                           * (4000 only) */
        atomic_t relogin_timer;           /* Max Time to wait for
                                           * relogin to complete */
-       atomic_t relogin_retry_count;     /* Num of times relogin has been
+       atomic_unchecked_t relogin_retry_count;   /* Num of times relogin has been
                                           * retried */
        uint32_t default_time2wait;       /* Default Min time between
                                           * relogins (+aens) */
index 6d25879d87c8550893cdd8671db68d542cde64cb..3031a9f7ecc0d30b4f7e0c0a493c030703a67c2c 100644 (file)
@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
                 */
                if (!iscsi_is_session_online(cls_sess)) {
                        /* Reset retry relogin timer */
-                       atomic_inc(&ddb_entry->relogin_retry_count);
+                       atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
                        DEBUG2(ql4_printk(KERN_INFO, ha,
                                "%s: index[%d] relogin timed out-retrying"
                                " relogin (%d), retry (%d)\n", __func__,
                                ddb_entry->fw_ddb_index,
-                               atomic_read(&ddb_entry->relogin_retry_count),
+                               atomic_read_unchecked(&ddb_entry->relogin_retry_count),
                                ddb_entry->default_time2wait + 4));
                        set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
                        atomic_set(&ddb_entry->retry_relogin_timer,
@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
 
        atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
        atomic_set(&ddb_entry->relogin_timer, 0);
-       atomic_set(&ddb_entry->relogin_retry_count, 0);
+       atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
        def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
        ddb_entry->default_relogin_timeout =
                (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
index 17bb541f7cc259a8a3f52c65ec1cae5636e87f65..85f45082da9760808d2906ff274697ec8f2713a7 100644 (file)
@@ -1595,7 +1595,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
        shost = sdev->host;
        scsi_init_cmd_errh(cmd);
        cmd->result = DID_NO_CONNECT << 16;
-       atomic_inc(&cmd->device->iorequest_cnt);
+       atomic_inc_unchecked(&cmd->device->iorequest_cnt);
 
        /*
         * SCSI request completion path will do scsi_device_unbusy(),
@@ -1618,9 +1618,9 @@ static void scsi_softirq_done(struct request *rq)
 
        INIT_LIST_HEAD(&cmd->eh_entry);
 
-       atomic_inc(&cmd->device->iodone_cnt);
+       atomic_inc_unchecked(&cmd->device->iodone_cnt);
        if (cmd->result)
-               atomic_inc(&cmd->device->ioerr_cnt);
+               atomic_inc_unchecked(&cmd->device->ioerr_cnt);
 
        disposition = scsi_decide_disposition(cmd);
        if (disposition != SUCCESS &&
@@ -1661,7 +1661,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        struct Scsi_Host *host = cmd->device->host;
        int rtn = 0;
 
-       atomic_inc(&cmd->device->iorequest_cnt);
+       atomic_inc_unchecked(&cmd->device->iorequest_cnt);
 
        /* check if the device is still usable */
        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
index 1ac38e73df7eec896cf2835fd8588a1db5278a46..6acc65609d1b7ad2b73ca4b5464a8a8e3bf2bd5c 100644 (file)
@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr,      \
                    char *buf)                                          \
 {                                                                      \
        struct scsi_device *sdev = to_scsi_device(dev);                 \
-       unsigned long long count = atomic_read(&sdev->field);           \
+       unsigned long long count = atomic_read_unchecked(&sdev->field); \
        return snprintf(buf, 20, "0x%llx\n", count);                    \
 }                                                                      \
 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
index 5d6f348eb3d8590361327d21dc9ddb2eddf49d8e..18778a6be50afb82f1fde86b052f31b69a4d2d22 100644 (file)
@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
  * Netlink Infrastructure
  */
 
-static atomic_t fc_event_seq;
+static atomic_unchecked_t fc_event_seq;
 
 /**
  * fc_get_event_number - Obtain the next sequential FC event number
@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
 u32
 fc_get_event_number(void)
 {
-       return atomic_add_return(1, &fc_event_seq);
+       return atomic_add_return_unchecked(1, &fc_event_seq);
 }
 EXPORT_SYMBOL(fc_get_event_number);
 
@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
 {
        int error;
 
-       atomic_set(&fc_event_seq, 0);
+       atomic_set_unchecked(&fc_event_seq, 0);
 
        error = transport_class_register(&fc_host_class);
        if (error)
@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
        char *cp;
 
        *val = simple_strtoul(buf, &cp, 0);
-       if ((*cp && (*cp != '\n')) || (*val < 0))
+       if (*cp && (*cp != '\n'))
                return -EINVAL;
        /*
         * Check for overflow; dev_loss_tmo is u32
index 67d43e35693df9e9e119bb6a4c2faac56f1bc260..8cee73c08ae46d7f821b254dedcea9c29d662cbc 100644 (file)
@@ -79,7 +79,7 @@ struct iscsi_internal {
        struct transport_container session_cont;
 };
 
-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
 static struct workqueue_struct *iscsi_eh_timer_workq;
 
 static DEFINE_IDA(iscsi_sess_ida);
@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
        int err;
 
        ihost = shost->shost_data;
-       session->sid = atomic_add_return(1, &iscsi_session_nr);
+       session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
 
        if (target_id == ISCSI_MAX_TARGET) {
                id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
        printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
                ISCSI_TRANSPORT_VERSION);
 
-       atomic_set(&iscsi_session_nr, 0);
+       atomic_set_unchecked(&iscsi_session_nr, 0);
 
        err = class_register(&iscsi_transport_class);
        if (err)
index ae45bd99baed72662b1c528aef97f3b49e9aeb6a..c32a58600e6c4726cb098fafcc0b7f0f86b9b921 100644 (file)
@@ -35,7 +35,7 @@
 #include "scsi_priv.h"
 
 struct srp_host_attrs {
-       atomic_t next_port_id;
+       atomic_unchecked_t next_port_id;
 };
 #define to_srp_host_attrs(host)        ((struct srp_host_attrs *)(host)->shost_data)
 
@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
        struct Scsi_Host *shost = dev_to_shost(dev);
        struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
 
-       atomic_set(&srp_host->next_port_id, 0);
+       atomic_set_unchecked(&srp_host->next_port_id, 0);
        return 0;
 }
 
@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
                          rport_fast_io_fail_timedout);
        INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
 
-       id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
+       id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
        dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 
        transport_setup_device(&rport->dev);
index 05ea0d49a3a3ddf2f039670db88c4d9f126662f4..5af80495f53f68a274b71d4bafc89b175662aa40 100644 (file)
@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
        sdkp->disk = gd;
        sdkp->index = index;
        atomic_set(&sdkp->openers, 0);
-       atomic_set(&sdkp->device->ioerr_cnt, 0);
+       atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
 
        if (!sdp->request_queue->rq_timeout) {
                if (sdp->type != TYPE_MOD)
index dbf8e777d850e0751f509c9d6aef2f15d3976d3e..0d565c7bf6acc21ccfdfa54f11b8709c3bb8a607 100644 (file)
@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
                                       sdp->disk->disk_name,
                                       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
                                       NULL,
-                                      (char *)arg);
+                                      (char __user *)arg);
        case BLKTRACESTART:
                return blk_trace_startstop(sdp->device->request_queue, 1);
        case BLKTRACESTOP:
index 011a3363c265322631a4021b15c05938ce51c62b..fb2b7a0238a877fce03f124fc854c33add5a313b 100644 (file)
@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
        return i;
 }
 
-static struct bin_attribute fuse_bin_attr = {
+static bin_attribute_no_const fuse_bin_attr = {
        .attr = { .name = "fuse", .mode = S_IRUGO, },
        .read = fuse_read,
 };
index 66a70e9bc7438d0090b90a815ccea59812603110..f82cea4460b39a52a659f804eedb83bcc12db0bf 100644 (file)
@@ -2238,7 +2238,7 @@ int spi_bus_unlock(struct spi_master *master)
 EXPORT_SYMBOL_GPL(spi_bus_unlock);
 
 /* portable code must never pass more than 32 bytes */
-#define        SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
+#define        SPI_BUFSIZ      max(32UL, SMP_CACHE_BYTES)
 
 static u8      *buf;
 
index b41429f379fef46555fa4cf7426cbc5190b5f93a..2de53739a88ffa806aebfa63de8cd35c6fe454ab 100644 (file)
@@ -25,7 +25,7 @@
 #include "timed_output.h"
 
 static struct class *timed_output_class;
-static atomic_t device_count;
+static atomic_unchecked_t device_count;
 
 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
                           char *buf)
@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
                timed_output_class = class_create(THIS_MODULE, "timed_output");
                if (IS_ERR(timed_output_class))
                        return PTR_ERR(timed_output_class);
-               atomic_set(&device_count, 0);
+               atomic_set_unchecked(&device_count, 0);
                timed_output_class->dev_groups = timed_output_groups;
        }
 
@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
        if (ret < 0)
                return ret;
 
-       tdev->index = atomic_inc_return(&device_count);
+       tdev->index = atomic_inc_return_unchecked(&device_count);
        tdev->dev = device_create(timed_output_class, NULL,
                MKDEV(0, tdev->index), NULL, "%s", tdev->name);
        if (IS_ERR(tdev->dev))
index f143cb64d69e347d531254b9897b5e01a0b1401b..6fb82553dfa385b81bb0e176ac35d2a053ae3531 100644 (file)
@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
        }
        cfp->last_attached = dev->attached;
        cfp->last_detach_count = dev->detach_count;
-       ACCESS_ONCE(cfp->read_subdev) = read_s;
-       ACCESS_ONCE(cfp->write_subdev) = write_s;
+       ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
+       ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
 }
 
 static void comedi_file_check(struct file *file)
@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
            !(s_old->async->cmd.flags & CMDF_WRITE))
                return -EBUSY;
 
-       ACCESS_ONCE(cfp->read_subdev) = s_new;
+       ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
        return 0;
 }
 
@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
            (s_old->async->cmd.flags & CMDF_WRITE))
                return -EBUSY;
 
-       ACCESS_ONCE(cfp->write_subdev) = s_new;
+       ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
        return 0;
 }
 
index 001348ccacf95cacd3cca7829681089314a36e1c..cfaac8a711480c88ec291c5d5e3c92da14bff0ef 100644 (file)
@@ -44,7 +44,7 @@
 #define gdm_tty_send_control(n, r, v, d, l) (\
        n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
 
-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
 
 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
index 503b2d763595fb02c55b5a5d4bd234463e1164cf..c904931b2ce0cb3796d8722f6042e20939057618 100644 (file)
@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
-       unsigned char len;
+       unsigned char *plen;
 
        /* query the serial number: */
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
                return ret;
        }
 
+       plen = kmalloc(1, GFP_KERNEL);
+       if (plen == NULL)
+               return -ENOMEM;
+
        /* Wait for data length. We'll get 0xff until length arrives. */
        do {
                ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
                                      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
                                      USB_DIR_IN,
-                                     0x0012, 0x0000, &len, 1,
+                                     0x0012, 0x0000, plen, 1,
                                      LINE6_TIMEOUT * HZ);
                if (ret < 0) {
                        dev_err(line6->ifcdev,
                                "receive length failed (error %d)\n", ret);
+                       kfree(plen);
                        return ret;
                }
-       } while (len == 0xff);
+       } while (*plen == 0xff);
 
-       if (len != datalen) {
+       if (*plen != datalen) {
                /* should be equal or something went wrong */
                dev_err(line6->ifcdev,
                        "length mismatch (expected %d, got %d)\n",
-                       (int)datalen, (int)len);
+                       (int)datalen, (int)*plen);
+               kfree(plen);
                return -EINVAL;
        }
+       kfree(plen);
 
        /* receive the result: */
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
-       unsigned char status;
+       unsigned char *status;
 
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
                return ret;
        }
 
+       status = kmalloc(1, GFP_KERNEL);
+       if (status == NULL)
+               return -ENOMEM;
+
        do {
                ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                                      0x67,
                                      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
                                      USB_DIR_IN,
                                      0x0012, 0x0000,
-                                     &status, 1, LINE6_TIMEOUT * HZ);
+                                     status, 1, LINE6_TIMEOUT * HZ);
 
                if (ret < 0) {
                        dev_err(line6->ifcdev,
                                "receiving status failed (error %d)\n", ret);
+                       kfree(status);
                        return ret;
                }
-       } while (status == 0xff);
+       } while (*status == 0xff);
 
-       if (status != 0) {
+       if (*status != 0) {
                dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
+               kfree(status);
                return -EINVAL;
        }
 
+       kfree(status);
+
        return 0;
 }
 
index 69437158d383d67ece0dd15f864354561fc43bd5..0a936327f2d98b275d556bf89fb5703ae2ac71a3 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/wait.h>
+#include <linux/slab.h>
 #include <sound/control.h>
 
 #include "audio.h"
@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
 */
 static void toneport_setup(struct usb_line6_toneport *toneport)
 {
-       int ticks;
+       int *ticks;
        struct usb_line6 *line6 = &toneport->line6;
        struct usb_device *usbdev = line6->usbdev;
        u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
 
+       ticks = kmalloc(sizeof(int), GFP_KERNEL);
+       if (ticks == NULL)
+               return;
+
        /* sync time on device with host: */
-       ticks = (int)get_seconds();
-       line6_write_data(line6, 0x80c6, &ticks, 4);
+       *ticks = (int)get_seconds();
+       line6_write_data(line6, 0x80c6, ticks, sizeof(int));
+
+       kfree(ticks);
 
        /* enable device: */
        toneport_send_cmd(usbdev, 0x0301, 0x0000);
index 463da076fa703ca04c7311d47f06f12b5ff596b7..e791ce99e7037d9cf08dedb24e97614138e5a522 100644 (file)
@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
        return 0;
 }
 
-sfw_test_client_ops_t brw_test_client;
-void brw_init_test_client(void)
-{
-       brw_test_client.tso_init       = brw_client_init;
-       brw_test_client.tso_fini       = brw_client_fini;
-       brw_test_client.tso_prep_rpc   = brw_client_prep_rpc;
-       brw_test_client.tso_done_rpc   = brw_client_done_rpc;
+sfw_test_client_ops_t brw_test_client = {
+       .tso_init       = brw_client_init,
+       .tso_fini       = brw_client_fini,
+       .tso_prep_rpc   = brw_client_prep_rpc,
+       .tso_done_rpc   = brw_client_done_rpc,
 };
 
 srpc_service_t brw_test_service;
index cc9d1826ae66fd62f436aa8c91c4205b4bfa00cf..8fabce3fd5802ea87a84b111194386d6389f401d 100644 (file)
@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
 
 extern sfw_test_client_ops_t ping_test_client;
 extern srpc_service_t  ping_test_service;
-extern void ping_init_test_client(void);
 extern void ping_init_test_service(void);
 
 extern sfw_test_client_ops_t brw_test_client;
 extern srpc_service_t  brw_test_service;
-extern void brw_init_test_client(void);
 extern void brw_init_test_service(void);
 
 
@@ -1675,12 +1673,10 @@ sfw_startup (void)
        INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
        INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
 
-       brw_init_test_client();
        brw_init_test_service();
        rc = sfw_register_test(&brw_test_service, &brw_test_client);
        LASSERT (rc == 0);
 
-       ping_init_test_client();
        ping_init_test_service();
        rc = sfw_register_test(&ping_test_service, &ping_test_client);
        LASSERT (rc == 0);
index d8c0df6e68528e8ad998ebd688370106495aebf1..5041cbbc7bc35ad3d468a277c28a403458f79a15 100644 (file)
@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
        return 0;
 }
 
-sfw_test_client_ops_t ping_test_client;
-void ping_init_test_client(void)
-{
-       ping_test_client.tso_init     = ping_client_init;
-       ping_test_client.tso_fini     = ping_client_fini;
-       ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
-       ping_test_client.tso_done_rpc = ping_client_done_rpc;
-}
+sfw_test_client_ops_t ping_test_client = {
+       .tso_init     = ping_client_init,
+       .tso_fini     = ping_client_fini,
+       .tso_prep_rpc = ping_client_prep_rpc,
+       .tso_done_rpc = ping_client_done_rpc,
+};
 
 srpc_service_t ping_test_service;
 void ping_init_test_service(void)
index 83bc0a9d7d4cd380c7779f9691c31989543d50d5..12ba00ad71633633991c3d4b67bf4b4f9d2ad0fb 100644 (file)
@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
        ldlm_completion_callback lcs_completion;
        ldlm_blocking_callback   lcs_blocking;
        ldlm_glimpse_callback    lcs_glimpse;
-};
+} __no_const;
 
 /* ldlm_lockd.c */
 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
index 2a88b806fca561631505cd9eff953a8abd2dd662..62e7e5f4b1173eb07f75673861ad4f916f65bd0b 100644 (file)
@@ -1362,7 +1362,7 @@ struct md_ops {
         * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
         * wrapper function in include/linux/obd_class.h.
         */
-};
+} __no_const;
 
 struct lsm_operations {
        void (*lsm_free)(struct lov_stripe_md *);
index a4c252febfe4ec4d1815eccff5fd877ba0d2bdc8..b21acac0ec297ee7c6108522f3e7d31b604a03c9 100644 (file)
@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
        int added = (mode == LCK_NL);
        int overlaps = 0;
        int splitted = 0;
-       const struct ldlm_callback_suite null_cbs = { NULL };
+       const struct ldlm_callback_suite null_cbs = { };
 
        CDEBUG(D_DLMTRACE,
               "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
index 83d3f08a37b26462e31535023f7df0526c7007d8..b03adad6109ed211e32b6187ddb0d0a83f9e8c0a 100644 (file)
@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int rc, max_delay_cs;
-       struct ctl_table dummy = *table;
+       ctl_table_no_const dummy = *table;
        long d;
 
        dummy.data = &max_delay_cs;
@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int rc, min_delay_cs;
-       struct ctl_table dummy = *table;
+       ctl_table_no_const dummy = *table;
        long d;
 
        dummy.data = &min_delay_cs;
@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int rc, backoff;
-       struct ctl_table dummy = *table;
+       ctl_table_no_const dummy = *table;
 
        dummy.data = &backoff;
        dummy.proc_handler = &proc_dointvec;
index 2c4fc74505bcede6f53f28bb30e2cac934f532c8..b04ca7976245fefada9eba59d7116cbbddbc7721 100644 (file)
@@ -315,11 +315,11 @@ out:
 
 
 struct cfs_psdev_ops libcfs_psdev_ops = {
-       libcfs_psdev_open,
-       libcfs_psdev_release,
-       NULL,
-       NULL,
-       libcfs_ioctl
+       .p_open = libcfs_psdev_open,
+       .p_close = libcfs_psdev_release,
+       .p_read = NULL,
+       .p_write = NULL,
+       .p_ioctl = libcfs_ioctl
 };
 
 extern int insert_proc(void);
index fcbe836aa99776c1c518fb981ee1fd223b4840c8..8a7ada4283ec856236afa58a4cad31ecf08c7208 100644 (file)
@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
                                /* Increment RX stats for virtual ports */
                                if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
 #ifdef CONFIG_64BIT
-                                       atomic64_add(1,
+                                       atomic64_add_unchecked(1,
                                                     (atomic64_t *)&priv->stats.rx_packets);
-                                       atomic64_add(skb->len,
+                                       atomic64_add_unchecked(skb->len,
                                                     (atomic64_t *)&priv->stats.rx_bytes);
 #else
-                                       atomic_add(1,
+                                       atomic_add_unchecked(1,
                                                   (atomic_t *)&priv->stats.rx_packets);
-                                       atomic_add(skb->len,
+                                       atomic_add_unchecked(skb->len,
                                                   (atomic_t *)&priv->stats.rx_bytes);
 #endif
                                }
@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
                                           dev->name);
                                */
 #ifdef CONFIG_64BIT
-                               atomic64_add(1,
+                               atomic64_add_unchecked(1,
                                             (atomic64_t *)&priv->stats.rx_dropped);
 #else
-                               atomic_add(1,
+                               atomic_add_unchecked(1,
                                           (atomic_t *)&priv->stats.rx_dropped);
 #endif
                                dev_kfree_skb_irq(skb);
index ee321496dcddec1a54e2fb308711561e002fa028..052d18362830ac27c64633789cb68c92cd58175f 100644 (file)
@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
                 * since the RX tasklet also increments it.
                 */
 #ifdef CONFIG_64BIT
-               atomic64_add(rx_status.dropped_packets,
-                            (atomic64_t *)&priv->stats.rx_dropped);
+               atomic64_add_unchecked(rx_status.dropped_packets,
+                            (atomic64_unchecked_t *)&priv->stats.rx_dropped);
 #else
-               atomic_add(rx_status.dropped_packets,
-                            (atomic_t *)&priv->stats.rx_dropped);
+               atomic_add_unchecked(rx_status.dropped_packets,
+                            (atomic_unchecked_t *)&priv->stats.rx_dropped);
 #endif
        }
 
index 3b476d80f64d68430fb1c24f40a5adb5e3a0ba2f..f522d686c1f2f5c9270e7bf8f0016758e80c2be7 100644 (file)
@@ -225,7 +225,7 @@ struct hal_ops {
 
        void (*hal_notch_filter)(struct adapter *adapter, bool enable);
        void (*hal_reset_security_engine)(struct adapter *adapter);
-};
+} __no_const;
 
 enum rt_eeprom_type {
        EEPROM_93C46,
index 070cc03ce3746b6bdf65c69089619d7082710ea5..6806e373554455129bb87760d0a9b2793f805b00 100644 (file)
@@ -108,7 +108,7 @@ struct      _io_ops {
                          u8 *pmem);
        u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
                           u8 *pmem);
-};
+} __no_const;
 
 struct io_req {
        struct list_head list;
index 46dad63fa2c8729798e04b5f6a17b8a6c6ec749d..fe4acdcce922b6ba3be30499ec213f3e3378ca52 100644 (file)
@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
        void (*device_resume)(ulong bus_no, ulong dev_no);
        int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
                                ulong *max_size);
-};
+} __no_const;
 
 /*  These functions live inside visorchipset, and will be called to indicate
  *  responses to specific events (by code outside of visorchipset).
@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
        void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
        void (*device_pause)(ulong bus_no, ulong dev_no, int response);
        void (*device_resume)(ulong bus_no, ulong dev_no, int response);
-};
+} __no_const;
 
 /** Register functions (in the bus driver) to get called by visorchipset
  *  whenever a bus or device appears for which this service partition is
index 9512af6a811408f169b7a6c5084dda5ce98d8dfa..045bf5a0134b020440fa33c8e872dfb048de6bd9 100644 (file)
@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
 
 #define SESSION_MAINTENANCE_INTERVAL HZ
 
-static atomic_t login_id = ATOMIC_INIT(0);
+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
 
 static void session_maintenance_work(struct work_struct *);
 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
@@ -444,7 +444,7 @@ static void sbp_management_request_login(
        login->lun = se_lun;
        login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
        login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
-       login->login_id = atomic_inc_return(&login_id);
+       login->login_id = atomic_inc_return_unchecked(&login_id);
 
        login->tgt_agt = sbp_target_agent_register(login);
        if (IS_ERR(login->tgt_agt)) {
index 58f49ff69b1424bf5feb33ed64eba495d8826851..2669604978f08601e82ea5629546ff8183bdcb46 100644 (file)
@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
        sema_init(&dev->caw_sem, 1);
-       atomic_set(&dev->dev_ordered_id, 0);
+       atomic_set_unchecked(&dev->dev_ordered_id, 0);
        INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&dev->t10_pr.registration_list);
index 0adc0f6502134eb3292a7a2cab66ac75be4d6406..7757bfeaed239b2b8d784917009c7d757814977e 100644 (file)
@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
         * Used to determine when ORDERED commands should go from
         * Dormant to Active status.
         */
-       cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
+       cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
        pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
                        dev->transport->name);
index 65a98a97df071cdf343776bc1e959dc9808dbbc4..d93d3a8111129acae07a39f1aa69203dd1de3927 100644 (file)
@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, priv);
 
        if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
-               int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
-               int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+               pax_open_kernel();
+               *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+               *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+               pax_close_kernel();
        }
        priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
                                                priv, &int3400_thermal_ops,
index d717f3dab6f1410fc955daefb0497c2096298b56..cae1cc3e14831545c85c35cb76cae1ffb9cf4a7c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/export.h>
 #include <linux/string.h>
 #include <linux/thermal.h>
+#include <linux/mm.h>
 
 #include "thermal_core.h"
 
@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
        tz->ops = ops;
        tz->sensor_data = data;
 
-       tzd->ops->get_temp = of_thermal_get_temp;
-       tzd->ops->get_trend = of_thermal_get_trend;
-       tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+       pax_open_kernel();
+       *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
+       *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
+       *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+       pax_close_kernel();
        mutex_unlock(&tzd->lock);
 
        return tzd;
@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
                return;
 
        mutex_lock(&tzd->lock);
-       tzd->ops->get_temp = NULL;
-       tzd->ops->get_trend = NULL;
-       tzd->ops->set_emul_temp = NULL;
+       pax_open_kernel();
+       *(void **)&tzd->ops->get_temp = NULL;
+       *(void **)&tzd->ops->get_trend = NULL;
+       *(void **)&tzd->ops->set_emul_temp = NULL;
+       pax_close_kernel();
 
        tz->ops = NULL;
        tz->sensor_data = NULL;
index fd66f57390d048a049568d465f818d76847214d0..48e63764d42c2f0be2b514917f100f591e2b9fed 100644 (file)
@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
        printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
                        info->port.count);
 #endif
-       info->port.count++;
+       atomic_inc(&info->port.count);
 #ifdef CY_DEBUG_COUNT
        printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
-               current->pid, info->port.count);
+               current->pid, atomic_read(&info->port.count));
 #endif
 
        /*
@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
                for (j = 0; j < cy_card[i].nports; j++) {
                        info = &cy_card[i].ports[j];
 
-                       if (info->port.count) {
+                       if (atomic_read(&info->port.count)) {
                                /* XXX is the ldisc num worth this? */
                                struct tty_struct *tty;
                                struct tty_ldisc *ld;
index 4fcec1d793a7f6ca0b2271c5b770dbd92554f623..5a036f7cf82478e1dcc2fb938b53d40947c14267 100644 (file)
@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
 
        spin_lock_irqsave(&hp->port.lock, flags);
        /* Check and then increment for fast path open. */
-       if (hp->port.count++ > 0) {
+       if (atomic_inc_return(&hp->port.count) > 1) {
                spin_unlock_irqrestore(&hp->port.lock, flags);
                hvc_kick();
                return 0;
@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
 
        spin_lock_irqsave(&hp->port.lock, flags);
 
-       if (--hp->port.count == 0) {
+       if (atomic_dec_return(&hp->port.count) == 0) {
                spin_unlock_irqrestore(&hp->port.lock, flags);
                /* We are done with the tty pointer now. */
                tty_port_tty_set(&hp->port, NULL);
@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
                 */
                tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
        } else {
-               if (hp->port.count < 0)
+               if (atomic_read(&hp->port.count) < 0)
                        printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
-                               hp->vtermno, hp->port.count);
+                               hp->vtermno, atomic_read(&hp->port.count));
                spin_unlock_irqrestore(&hp->port.lock, flags);
        }
 }
@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
         * open->hangup case this can be called after the final close so prevent
         * that from happening for now.
         */
-       if (hp->port.count <= 0) {
+       if (atomic_read(&hp->port.count) <= 0) {
                spin_unlock_irqrestore(&hp->port.lock, flags);
                return;
        }
 
-       hp->port.count = 0;
+       atomic_set(&hp->port.count, 0);
        spin_unlock_irqrestore(&hp->port.lock, flags);
        tty_port_tty_set(&hp->port, NULL);
 
@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                return -EPIPE;
 
        /* FIXME what's this (unprotected) check for? */
-       if (hp->port.count <= 0)
+       if (atomic_read(&hp->port.count) <= 0)
                return -EIO;
 
        spin_lock_irqsave(&hp->lock, flags);
index 81ff7e1bfb1a82ac68dd33d760b84f7edff4e9b8..dfb7b717d63fdb9257487270b636ec90e9b45375 100644 (file)
@@ -83,6 +83,7 @@
 #include <asm/hvcserver.h>
 #include <asm/uaccess.h>
 #include <asm/vio.h>
+#include <asm/local.h>
 
 /*
  * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
 
        spin_lock_irqsave(&hvcsd->lock, flags);
 
-       if (hvcsd->port.count > 0) {
+       if (atomic_read(&hvcsd->port.count) > 0) {
                spin_unlock_irqrestore(&hvcsd->lock, flags);
                printk(KERN_INFO "HVCS: vterm state unchanged.  "
                                "The hvcs device node is still in use.\n");
@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
                }
        }
 
-       hvcsd->port.count = 0;
+       atomic_set(&hvcsd->port.count, 0);
        hvcsd->port.tty = tty;
        tty->driver_data = hvcsd;
 
@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
        unsigned long flags;
 
        spin_lock_irqsave(&hvcsd->lock, flags);
-       hvcsd->port.count++;
+       atomic_inc(&hvcsd->port.count);
        hvcsd->todo_mask |= HVCS_SCHED_READ;
        spin_unlock_irqrestore(&hvcsd->lock, flags);
 
@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
        hvcsd = tty->driver_data;
 
        spin_lock_irqsave(&hvcsd->lock, flags);
-       if (--hvcsd->port.count == 0) {
+       if (atomic_dec_and_test(&hvcsd->port.count)) {
 
                vio_disable_interrupts(hvcsd->vdev);
 
@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
 
                free_irq(irq, hvcsd);
                return;
-       } else if (hvcsd->port.count < 0) {
+       } else if (atomic_read(&hvcsd->port.count) < 0) {
                printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
                                " is missmanaged.\n",
-               hvcsd->vdev->unit_address, hvcsd->port.count);
+               hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
        }
 
        spin_unlock_irqrestore(&hvcsd->lock, flags);
@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
 
        spin_lock_irqsave(&hvcsd->lock, flags);
        /* Preserve this so that we know how many kref refs to put */
-       temp_open_count = hvcsd->port.count;
+       temp_open_count = atomic_read(&hvcsd->port.count);
 
        /*
         * Don't kref put inside the spinlock because the destruction
@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
        tty->driver_data = NULL;
        hvcsd->port.tty = NULL;
 
-       hvcsd->port.count = 0;
+       atomic_set(&hvcsd->port.count, 0);
 
        /* This will drop any buffered data on the floor which is OK in a hangup
         * scenario. */
@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
         * the middle of a write operation?  This is a crummy place to do this
         * but we want to keep it all in the spinlock.
         */
-       if (hvcsd->port.count <= 0) {
+       if (atomic_read(&hvcsd->port.count) <= 0) {
                spin_unlock_irqrestore(&hvcsd->lock, flags);
                return -ENODEV;
        }
@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
 {
        struct hvcs_struct *hvcsd = tty->driver_data;
 
-       if (!hvcsd || hvcsd->port.count <= 0)
+       if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
                return 0;
 
        return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
index 41901997c0d62802b6ae0c165e87eca49c7be372..06d5bfa325a867940e84014db785187ea56468e0 100644 (file)
@@ -85,7 +85,7 @@ struct hvsi_struct {
        int n_outbuf;
        uint32_t vtermno;
        uint32_t virq;
-       atomic_t seqno; /* HVSI packet sequence number */
+       atomic_unchecked_t seqno; /* HVSI packet sequence number */
        uint16_t mctrl;
        uint8_t state;  /* HVSI protocol state */
        uint8_t flags;
@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
 
        packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
        packet.hdr.len = sizeof(struct hvsi_query_response);
-       packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+       packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
        packet.verb = VSV_SEND_VERSION_NUMBER;
        packet.u.version = HVSI_VERSION;
        packet.query_seqno = query_seqno+1;
@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
 
        packet.hdr.type = VS_QUERY_PACKET_HEADER;
        packet.hdr.len = sizeof(struct hvsi_query);
-       packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+       packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
        packet.verb = verb;
 
        pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
        int wrote;
 
        packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-       packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+       packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
        packet.hdr.len = sizeof(struct hvsi_control);
        packet.verb = VSV_SET_MODEM_CTL;
        packet.mask = HVSI_TSDTR;
@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
        BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
 
        packet.hdr.type = VS_DATA_PACKET_HEADER;
-       packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+       packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
        packet.hdr.len = count + sizeof(struct hvsi_header);
        memcpy(&packet.data, buf, count);
 
@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
        struct hvsi_control packet __ALIGNED__;
 
        packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-       packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+       packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
        packet.hdr.len = 6;
        packet.verb = VSV_CLOSE_PROTOCOL;
 
@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
 
        tty_port_tty_set(&hp->port, tty);
        spin_lock_irqsave(&hp->lock, flags);
-       hp->port.count++;
+       atomic_inc(&hp->port.count);
        atomic_set(&hp->seqno, 0);
        h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
        spin_unlock_irqrestore(&hp->lock, flags);
@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
 
        spin_lock_irqsave(&hp->lock, flags);
 
-       if (--hp->port.count == 0) {
+       if (atomic_dec_return(&hp->port.count) == 0) {
                tty_port_tty_set(&hp->port, NULL);
                hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
 
@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
 
                        spin_lock_irqsave(&hp->lock, flags);
                }
-       } else if (hp->port.count < 0)
+       } else if (atomic_read(&hp->port.count) < 0)
                printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
-                      hp - hvsi_ports, hp->port.count);
+                      hp - hvsi_ports, atomic_read(&hp->port.count));
 
        spin_unlock_irqrestore(&hp->lock, flags);
 }
@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
        tty_port_tty_set(&hp->port, NULL);
 
        spin_lock_irqsave(&hp->lock, flags);
-       hp->port.count = 0;
+       atomic_set(&hp->port.count, 0);
        hp->n_outbuf = 0;
        spin_unlock_irqrestore(&hp->lock, flags);
 }
index a270f04588d710e7dd4f716049abcfc4a3ebaae4..7c77b5d1e018c3a6d24c611e02c2f3d50d9bf337 100644 (file)
@@ -8,7 +8,7 @@
 
 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
 {
-       packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
+       packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
 
        /* Assumes that always succeeds, works in practice */
        return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
 
        /* Reset state */
        pv->established = 0;
-       atomic_set(&pv->seqno, 0);
+       atomic_set_unchecked(&pv->seqno, 0);
 
        pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
 
index 345cebb07ae7933156784e46956c78983acf2e29..d5a1e9e3b028d6681ca02ef28a6d47e40caced3c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
 #include <linux/uaccess.h>
+#include <asm/local.h>
 
 #include "tty.h"
 #include "network.h"
@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
                return -ENODEV;
 
        mutex_lock(&tty->ipw_tty_mutex);
-       if (tty->port.count == 0)
+       if (atomic_read(&tty->port.count) == 0)
                tty->tx_bytes_queued = 0;
 
-       tty->port.count++;
+       atomic_inc(&tty->port.count);
 
        tty->port.tty = linux_tty;
        linux_tty->driver_data = tty;
@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
 
 static void do_ipw_close(struct ipw_tty *tty)
 {
-       tty->port.count--;
-
-       if (tty->port.count == 0) {
+       if (atomic_dec_return(&tty->port.count) == 0) {
                struct tty_struct *linux_tty = tty->port.tty;
 
                if (linux_tty != NULL) {
@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
                return;
 
        mutex_lock(&tty->ipw_tty_mutex);
-       if (tty->port.count == 0) {
+       if (atomic_read(&tty->port.count) == 0) {
                mutex_unlock(&tty->ipw_tty_mutex);
                return;
        }
@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
 
        mutex_lock(&tty->ipw_tty_mutex);
 
-       if (!tty->port.count) {
+       if (!atomic_read(&tty->port.count)) {
                mutex_unlock(&tty->ipw_tty_mutex);
                return;
        }
@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
                return -ENODEV;
 
        mutex_lock(&tty->ipw_tty_mutex);
-       if (!tty->port.count) {
+       if (!atomic_read(&tty->port.count)) {
                mutex_unlock(&tty->ipw_tty_mutex);
                return -EINVAL;
        }
@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
        if (!tty)
                return -ENODEV;
 
-       if (!tty->port.count)
+       if (!atomic_read(&tty->port.count))
                return -EINVAL;
 
        room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
        if (!tty)
                return 0;
 
-       if (!tty->port.count)
+       if (!atomic_read(&tty->port.count))
                return 0;
 
        return tty->tx_bytes_queued;
@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
        if (!tty)
                return -ENODEV;
 
-       if (!tty->port.count)
+       if (!atomic_read(&tty->port.count))
                return -EINVAL;
 
        return get_control_lines(tty);
@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
        if (!tty)
                return -ENODEV;
 
-       if (!tty->port.count)
+       if (!atomic_read(&tty->port.count))
                return -EINVAL;
 
        return set_control_lines(tty, set, clear);
@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
        if (!tty)
                return -ENODEV;
 
-       if (!tty->port.count)
+       if (!atomic_read(&tty->port.count))
                return -EINVAL;
 
        /* FIXME: Exactly how is the tty object locked here .. */
@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
                                 * are gone */
                                mutex_lock(&ttyj->ipw_tty_mutex);
                        }
-                       while (ttyj->port.count)
+                       while (atomic_read(&ttyj->port.count))
                                do_ipw_close(ttyj);
                        ipwireless_disassociate_network_ttys(network,
                                                             ttyj->channel_idx);
index 14c54e0410650aa00cf1a3494fd4bb2d151ff089..1efd4f24e25401ab9140b9307fd4b578eb365af5 100644 (file)
@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
        }
 
        ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
-       ch->port.count++;
+       atomic_inc(&ch->port.count);
        tty->driver_data = ch;
        tty_port_tty_set(&ch->port, tty);
        mutex_lock(&ch->port.mutex);
index c4343764cc5b7c527b85278013c3cbbb62f1cbd8..114ce133abd55efb7fe7857f7064e31c79ed6e42 100644 (file)
@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
        spin_lock_init(&dlci->lock);
        mutex_init(&dlci->mutex);
        dlci->fifo = &dlci->_fifo;
-       if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+       if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
                kfree(dlci);
                return NULL;
        }
@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
        struct gsm_dlci *dlci = tty->driver_data;
        struct tty_port *port = &dlci->port;
 
-       port->count++;
+       atomic_inc(&port->count);
        tty_port_tty_set(port, tty);
 
        dlci->modem_rx = 0;
index 4ddfa60c922205513d16ed74a770eefd111fda83..1b7e1128116c22bf05797c1167307b0a3bd0e44a 100644 (file)
@@ -115,7 +115,7 @@ struct n_tty_data {
        int minimum_to_wake;
 
        /* consumer-published */
-       size_t read_tail;
+       size_t read_tail __intentional_overflow(-1);
        size_t line_start;
 
        /* protected by output lock */
@@ -2503,6 +2503,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
 {
        *ops = tty_ldisc_N_TTY;
        ops->owner = NULL;
-       ops->refcount = ops->flags = 0;
+       atomic_set(&ops->refcount, 0);
+       ops->flags = 0;
 }
 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
index 6e1f1505f04e4b0ae948dfad4be3319df1debdb7..c3ba598f2c929e55a8436f0aec5a2cb81e8c4da0 100644 (file)
@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
                panic("Couldn't register Unix98 pts driver");
 
        /* Now create the /dev/ptmx special device */
+       pax_open_kernel();
        tty_default_fops(&ptmx_fops);
-       ptmx_fops.open = ptmx_open;
+       *(void **)&ptmx_fops.open = ptmx_open;
+       pax_close_kernel();
 
        cdev_init(&ptmx_cdev, &ptmx_fops);
        if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
index 383c4c796637713e12bede50677803a0d639a855..d408e211e5b493360c7247834b14a5e46e832de5 100644 (file)
@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
        tty->driver_data = info;
        tty_port_tty_set(port, tty);
 
-       if (port->count++ == 0) {
+       if (atomic_inc_return(&port->count) == 1) {
                atomic_inc(&rp_num_ports_open);
 
 #ifdef ROCKET_DEBUG_OPEN
@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
 #endif
        }
 #ifdef ROCKET_DEBUG_OPEN
-       printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
+       printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
 #endif
 
        /*
@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
                spin_unlock_irqrestore(&info->port.lock, flags);
                return;
        }
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                atomic_dec(&rp_num_ports_open);
        clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
        spin_unlock_irqrestore(&info->port.lock, flags);
index aa28209f44c1919c92fb2138dff48ff05a852142..e08fb857da344c877a1de22bcb7583fc7bbc703c 100644 (file)
@@ -437,7 +437,7 @@ struct ioc4_soft {
                } is_intr_info[MAX_IOC4_INTR_ENTS];
 
                /* Number of entries active in the above array */
-               atomic_t is_num_intrs;
+               atomic_unchecked_t is_num_intrs;
        } is_intr_type[IOC4_NUM_INTR_TYPES];
 
        /* is_ir_lock must be held while
@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
        BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
               || (type == IOC4_OTHER_INTR_TYPE)));
 
-       i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
+       i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
        BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
 
        /* Save off the lower level interrupt handler */
@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
 
        soft = arg;
        for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
-               num_intrs = (int)atomic_read(
+               num_intrs = (int)atomic_read_unchecked(
                                &soft->is_intr_type[intr_type].is_num_intrs);
 
                this_mir = this_ir = pending_intrs(soft, intr_type);
index 129dc5be6028178344ba81f8225a8322e7c57d1a..1da5bb8f57f271367586485dde56c709e86433c8 100644 (file)
@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
         * I/O utilities that messages sent to the console will automatically
         * be displayed on the dbg_io.
         */
-       dbg_io_ops->is_console = true;
+       pax_open_kernel();
+       *(int *)&dbg_io_ops->is_console = true;
+       pax_close_kernel();
 
        return 0;
 }
index a260cde743e272657673474d9612c0bf663d3995..6b2b5ce61e56cf90789edcf9cde8aa60eac4b536 100644 (file)
@@ -24,8 +24,9 @@
 #define MAX_CONFIG_LEN         40
 
 static struct kgdb_io          kgdboc_io_ops;
+static struct kgdb_io          kgdboc_io_ops_console;
 
-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
 static int configured          = -1;
 
 static char config[MAX_CONFIG_LEN];
@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
        kgdboc_unregister_kbd();
        if (configured == 1)
                kgdb_unregister_io_module(&kgdboc_io_ops);
+       else if (configured == 2)
+               kgdb_unregister_io_module(&kgdboc_io_ops_console);
 }
 
 static int configure_kgdboc(void)
@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
        int err;
        char *cptr = config;
        struct console *cons;
+       int is_console = 0;
 
        err = kgdboc_option_setup(config);
        if (err || !strlen(config) || isspace(config[0]))
                goto noconfig;
 
        err = -ENODEV;
-       kgdboc_io_ops.is_console = 0;
        kgdb_tty_driver = NULL;
 
        kgdboc_use_kms = 0;
@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
                int idx;
                if (cons->device && cons->device(cons, &idx) == p &&
                    idx == tty_line) {
-                       kgdboc_io_ops.is_console = 1;
+                       is_console = 1;
                        break;
                }
                cons = cons->next;
@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
        kgdb_tty_line = tty_line;
 
 do_register:
-       err = kgdb_register_io_module(&kgdboc_io_ops);
+       if (is_console) {
+               err = kgdb_register_io_module(&kgdboc_io_ops_console);
+               configured = 2;
+       } else {
+               err = kgdb_register_io_module(&kgdboc_io_ops);
+               configured = 1;
+       }
        if (err)
                goto noconfig;
 
@@ -205,8 +214,6 @@ do_register:
        if (err)
                goto nmi_con_failed;
 
-       configured = 1;
-
        return 0;
 
 nmi_con_failed:
@@ -223,7 +230,7 @@ noconfig:
 static int __init init_kgdboc(void)
 {
        /* Already configured? */
-       if (configured == 1)
+       if (configured >= 1)
                return 0;
 
        return configure_kgdboc();
@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
        if (config[len - 1] == '\n')
                config[len - 1] = '\0';
 
-       if (configured == 1)
+       if (configured >= 1)
                cleanup_kgdboc();
 
        /* Go and configure with the new params. */
@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
        .post_exception         = kgdboc_post_exp_handler,
 };
 
+static struct kgdb_io kgdboc_io_ops_console = {
+       .name                   = "kgdboc",
+       .read_char              = kgdboc_get_char,
+       .write_char             = kgdboc_put_char,
+       .pre_exception          = kgdboc_pre_exp_handler,
+       .post_exception         = kgdboc_post_exp_handler,
+       .is_console             = 1
+};
+
 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
 /* This is only available if kgdboc is a built in for early debugging */
 static int __init kgdboc_early_init(char *opt)
index c88b522ccd73470967a5fbd03480ce7b4e3b6d62..e7630292c87bea61dcb52f6d03fde6f7aaad307a 100644 (file)
@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
        .cons = MSM_CONSOLE,
 };
 
-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
 
 static const struct of_device_id msm_uartdm_table[] = {
        { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
                line = pdev->id;
 
        if (line < 0)
-               line = atomic_inc_return(&msm_uart_next_id) - 1;
+               line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
 
        if (unlikely(line < 0 || line >= UART_NR))
                return -ENXIO;
index 107e807225752623c7f8cae56b17f00d4d003d37..d4a02fa160fdf307d4b3dcac8b2b5bc90f1cef3d 100644 (file)
@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
        }
 }
 
+static int s3c64xx_serial_startup(struct uart_port *port);
 static int s3c24xx_serial_startup(struct uart_port *port)
 {
        struct s3c24xx_uart_port *ourport = to_ourport(port);
        int ret;
 
+       /* Startup sequence is different for s3c64xx and higher SoC's */
+       if (s3c24xx_serial_has_interrupt_mask(port))
+               return s3c64xx_serial_startup(port);
+
        dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
            port, (unsigned long long)port->mapbase, port->membase);
 
@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
        /* setup info for port */
        port->dev       = &platdev->dev;
 
-       /* Startup sequence is different for s3c64xx and higher SoC's */
-       if (s3c24xx_serial_has_interrupt_mask(port))
-               s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
-
        port->uartclk = 1;
 
        if (cfg->uart_flags & UPF_CONS_FLOW) {
index 984605bb5bf1d593087bfffe485323538144b2c5..e53833052464d0f95bb9947824bbcf6a8c5a7fc6 100644 (file)
@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
                state = drv->state + tty->index;
                port = &state->port;
                spin_lock_irq(&port->lock);
-               --port->count;
+               atomic_dec(&port->count);
                spin_unlock_irq(&port->lock);
                return;
        }
@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
 
        pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
 
-       if (!port->count || tty_port_close_start(port, tty, filp) == 0)
+       if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
                return;
 
        /*
@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
                uart_flush_buffer(tty);
                uart_shutdown(tty, state);
                spin_lock_irqsave(&port->lock, flags);
-               port->count = 0;
+               atomic_set(&port->count, 0);
                clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
                spin_unlock_irqrestore(&port->lock, flags);
                tty_port_tty_set(port, NULL);
@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
        pr_debug("uart_open(%d) called\n", line);
 
        spin_lock_irq(&port->lock);
-       ++port->count;
+       atomic_inc(&port->count);
        spin_unlock_irq(&port->lock);
 
        /*
index b7991707ffc0a52589a487d080595c3742ef1294..87dafd5e9320a47e55eccc052cbe083e05d2ed11 100644 (file)
@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
        
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
-                        __FILE__,__LINE__, info->device_name, info->port.count);
+                        __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
 
        if (tty_port_close_start(&info->port, tty, filp) == 0)
                goto cleanup;
@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
 cleanup:                       
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
-                       tty->driver->name, info->port.count);
+                       tty->driver->name, atomic_read(&info->port.count));
                        
 }      /* end of mgsl_close() */
 
@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
 
        mgsl_flush_buffer(tty);
        shutdown(info);
-       
-       info->port.count = 0;   
+
+       atomic_set(&info->port.count, 0);
        info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
        info->port.tty = NULL;
 
@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):block_til_ready before block on %s count=%d\n",
-                        __FILE__,__LINE__, tty->driver->name, port->count );
+                        __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
        spin_lock_irqsave(&info->irq_spinlock, flags);
-       port->count--;
+       atomic_dec(&port->count);
        spin_unlock_irqrestore(&info->irq_spinlock, flags);
        port->blocked_open++;
        
@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                
                if (debug_level >= DEBUG_LEVEL_INFO)
                        printk("%s(%d):block_til_ready blocking on %s count=%d\n",
-                                __FILE__,__LINE__, tty->driver->name, port->count );
+                                __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
                                 
                tty_unlock(tty);
                schedule();
@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        
        /* FIXME: Racy on hangup during close wait */
        if (!tty_hung_up_p(filp))
-               port->count++;
+               atomic_inc(&port->count);
        port->blocked_open--;
        
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
-                        __FILE__,__LINE__, tty->driver->name, port->count );
+                        __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
                         
        if (!retval)
                port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
                
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
-                        __FILE__,__LINE__,tty->driver->name, info->port.count);
+                        __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
 
        /* If port is closing, signal caller to try again */
        if (info->port.flags & ASYNC_CLOSING){
@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
                spin_unlock_irqrestore(&info->netlock, flags);
                goto cleanup;
        }
-       info->port.count++;
+       atomic_inc(&info->port.count);
        spin_unlock_irqrestore(&info->netlock, flags);
 
-       if (info->port.count == 1) {
+       if (atomic_read(&info->port.count) == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info);
                if (retval < 0)
@@ -3442,8 +3442,8 @@ cleanup:
        if (retval) {
                if (tty->count == 1)
                        info->port.tty = NULL; /* tty layer will release tty struct */
-               if(info->port.count)
-                       info->port.count--;
+               if (atomic_read(&info->port.count))
+                       atomic_dec(&info->port.count);
        }
        
        return retval;
@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
        unsigned short new_crctype;
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        switch (encoding)
@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* arbitrate between network and tty opens */
        spin_lock_irqsave(&info->netlock, flags);
-       if (info->port.count != 0 || info->netcount != 0) {
+       if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
                printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
                spin_unlock_irqrestore(&info->netlock, flags);
                return -EBUSY;
@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        if (cmd != SIOCWANDEV)
index 0e8c39b6ccd45643051b2ee2876a0eda59595034..e0cb171c89f5835fcd7780695ee2609a5eec21cb 100644 (file)
@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
        tty->driver_data = info;
        info->port.tty = tty;
 
-       DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
+       DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
 
        /* If port is closing, signal caller to try again */
        if (info->port.flags & ASYNC_CLOSING){
@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
                mutex_unlock(&info->port.mutex);
                goto cleanup;
        }
-       info->port.count++;
+       atomic_inc(&info->port.count);
        spin_unlock_irqrestore(&info->netlock, flags);
 
-       if (info->port.count == 1) {
+       if (atomic_read(&info->port.count) == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info);
                if (retval < 0) {
@@ -715,8 +715,8 @@ cleanup:
        if (retval) {
                if (tty->count == 1)
                        info->port.tty = NULL; /* tty layer will release tty struct */
-               if(info->port.count)
-                       info->port.count--;
+               if(atomic_read(&info->port.count))
+                       atomic_dec(&info->port.count);
        }
 
        DBGINFO(("%s open rc=%d\n", info->device_name, retval));
@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
 
        if (sanity_check(info, tty->name, "close"))
                return;
-       DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
+       DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
 
        if (tty_port_close_start(&info->port, tty, filp) == 0)
                goto cleanup;
@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
        tty_port_close_end(&info->port, tty);
        info->port.tty = NULL;
 cleanup:
-       DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
+       DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
 }
 
 static void hangup(struct tty_struct *tty)
@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
        shutdown(info);
 
        spin_lock_irqsave(&info->port.lock, flags);
-       info->port.count = 0;
+       atomic_set(&info->port.count, 0);
        info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
        info->port.tty = NULL;
        spin_unlock_irqrestore(&info->port.lock, flags);
@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
        unsigned short new_crctype;
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        DBGINFO(("%s hdlcdev_attach\n", info->device_name));
@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* arbitrate between network and tty opens */
        spin_lock_irqsave(&info->netlock, flags);
-       if (info->port.count != 0 || info->netcount != 0) {
+       if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
                DBGINFO(("%s hdlc_open busy\n", dev->name));
                spin_unlock_irqrestore(&info->netlock, flags);
                return -EBUSY;
@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        if (cmd != SIOCWANDEV)
@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
                if (port == NULL)
                        continue;
                spin_lock(&port->lock);
-               if ((port->port.count || port->netcount) &&
+               if ((atomic_read(&port->port.count) || port->netcount) &&
                    port->pending_bh && !port->bh_running &&
                    !port->bh_requested) {
                        DBGISR(("%s bh queued\n", port->device_name));
@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        add_wait_queue(&port->open_wait, &wait);
 
        spin_lock_irqsave(&info->lock, flags);
-       port->count--;
+       atomic_dec(&port->count);
        spin_unlock_irqrestore(&info->lock, flags);
        port->blocked_open++;
 
@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        remove_wait_queue(&port->open_wait, &wait);
 
        if (!tty_hung_up_p(filp))
-               port->count++;
+               atomic_inc(&port->count);
        port->blocked_open--;
 
        if (!retval)
index c3f90910fed93cfa28112eb2796924a64d1d2029..abe46017b0b33c9a13f548e970e481b5323c4a30 100644 (file)
@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s open(), old ref count = %d\n",
-                        __FILE__,__LINE__,tty->driver->name, info->port.count);
+                        __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
 
        /* If port is closing, signal caller to try again */
        if (info->port.flags & ASYNC_CLOSING){
@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
                spin_unlock_irqrestore(&info->netlock, flags);
                goto cleanup;
        }
-       info->port.count++;
+       atomic_inc(&info->port.count);
        spin_unlock_irqrestore(&info->netlock, flags);
 
-       if (info->port.count == 1) {
+       if (atomic_read(&info->port.count) == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info);
                if (retval < 0)
@@ -796,8 +796,8 @@ cleanup:
        if (retval) {
                if (tty->count == 1)
                        info->port.tty = NULL; /* tty layer will release tty struct */
-               if(info->port.count)
-                       info->port.count--;
+               if(atomic_read(&info->port.count))
+                       atomic_dec(&info->port.count);
        }
 
        return retval;
@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s close() entry, count=%d\n",
-                        __FILE__,__LINE__, info->device_name, info->port.count);
+                        __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
 
        if (tty_port_close_start(&info->port, tty, filp) == 0)
                goto cleanup;
@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
 cleanup:
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
-                       tty->driver->name, info->port.count);
+                       tty->driver->name, atomic_read(&info->port.count));
 }
 
 /* Called by tty_hangup() when a hangup is signaled.
@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
        shutdown(info);
 
        spin_lock_irqsave(&info->port.lock, flags);
-       info->port.count = 0;
+       atomic_set(&info->port.count, 0);
        info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
        info->port.tty = NULL;
        spin_unlock_irqrestore(&info->port.lock, flags);
@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
        unsigned short new_crctype;
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        switch (encoding)
@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* arbitrate between network and tty opens */
        spin_lock_irqsave(&info->netlock, flags);
-       if (info->port.count != 0 || info->netcount != 0) {
+       if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
                printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
                spin_unlock_irqrestore(&info->netlock, flags);
                return -EBUSY;
@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
 
        /* return error if TTY interface open */
-       if (info->port.count)
+       if (atomic_read(&info->port.count))
                return -EBUSY;
 
        if (cmd != SIOCWANDEV)
@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
                 * do not request bottom half processing if the
                 * device is not open in a normal mode.
                 */
-               if ( port && (port->port.count || port->netcount) &&
+               if ( port && (atomic_read(&port->port.count) || port->netcount) &&
                     port->pending_bh && !port->bh_running &&
                     !port->bh_requested ) {
                        if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s block_til_ready() before block, count=%d\n",
-                        __FILE__,__LINE__, tty->driver->name, port->count );
+                        __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
        spin_lock_irqsave(&info->lock, flags);
-       port->count--;
+       atomic_dec(&port->count);
        spin_unlock_irqrestore(&info->lock, flags);
        port->blocked_open++;
 
@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 
                if (debug_level >= DEBUG_LEVEL_INFO)
                        printk("%s(%d):%s block_til_ready() count=%d\n",
-                                __FILE__,__LINE__, tty->driver->name, port->count );
+                                __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
                tty_unlock(tty);
                schedule();
@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&port->open_wait, &wait);
        if (!tty_hung_up_p(filp))
-               port->count++;
+               atomic_inc(&port->count);
        port->blocked_open--;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s block_til_ready() after, count=%d\n",
-                        __FILE__,__LINE__, tty->driver->name, port->count );
+                        __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
        if (!retval)
                port->flags |= ASYNC_NORMAL_ACTIVE;
index 42bad18c66c938be9cf3c0de786be18c77692454..447d7a22e62959992f0ccc11e6885f5fab84a098 100644 (file)
@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
                                   size_t count, loff_t *ppos)
 {
-       if (count) {
+       if (count && capable(CAP_SYS_ADMIN)) {
                char c;
 
                if (get_user(c, buf))
index 2bb4dfc028734079f29a89bfb779bfdc7e0079eb..a7f6e868d6fcd4665d320ebbe9538d5f989ef477 100644 (file)
@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
 
 void tty_default_fops(struct file_operations *fops)
 {
-       *fops = tty_fops;
+       memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
 }
 
 /*
index 3737f55272d2c1184463edc3714f89fb2724730e..7cef44817e9fabbe76874ba00ec5e1d9f5f6d014 100644 (file)
@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
        raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
        tty_ldiscs[disc] = new_ldisc;
        new_ldisc->num = disc;
-       new_ldisc->refcount = 0;
+       atomic_set(&new_ldisc->refcount, 0);
        raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
 
        return ret;
@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
-       if (tty_ldiscs[disc]->refcount)
+       if (atomic_read(&tty_ldiscs[disc]->refcount))
                ret = -EBUSY;
        else
                tty_ldiscs[disc] = NULL;
@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
        if (ldops) {
                ret = ERR_PTR(-EAGAIN);
                if (try_module_get(ldops->owner)) {
-                       ldops->refcount++;
+                       atomic_inc(&ldops->refcount);
                        ret = ldops;
                }
        }
@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
-       ldops->refcount--;
+       atomic_dec(&ldops->refcount);
        module_put(ldops->owner);
        raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
 }
index 40b31835f80bc0d8634960a33cc5c3b1ec6d6064..94d92ae19c6c25e8a22eff95fe89fbd88aae6b24 100644 (file)
@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
        unsigned long flags;
 
        spin_lock_irqsave(&port->lock, flags);
-       port->count = 0;
+       atomic_set(&port->count, 0);
        port->flags &= ~ASYNC_NORMAL_ACTIVE;
        tty = port->tty;
        if (tty)
@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
 
        /* The port lock protects the port counts */
        spin_lock_irqsave(&port->lock, flags);
-       port->count--;
+       atomic_dec(&port->count);
        port->blocked_open++;
        spin_unlock_irqrestore(&port->lock, flags);
 
@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
           we must not mess that up further */
        spin_lock_irqsave(&port->lock, flags);
        if (!tty_hung_up_p(filp))
-               port->count++;
+               atomic_inc(&port->count);
        port->blocked_open--;
        if (retval == 0)
                port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
                return 0;
 
        spin_lock_irqsave(&port->lock, flags);
-       if (tty->count == 1 && port->count != 1) {
+       if (tty->count == 1 && atomic_read(&port->count) != 1) {
                printk(KERN_WARNING
                    "tty_port_close_start: tty->count = 1 port count = %d.\n",
-                                                               port->count);
-               port->count = 1;
+                                                               atomic_read(&port->count));
+               atomic_set(&port->count, 1);
        }
-       if (--port->count < 0) {
+       if (atomic_dec_return(&port->count) < 0) {
                printk(KERN_WARNING "tty_port_close_start: count = %d\n",
-                                                               port->count);
-               port->count = 0;
+                                                               atomic_read(&port->count));
+               atomic_set(&port->count, 0);
        }
 
-       if (port->count) {
+       if (atomic_read(&port->count)) {
                spin_unlock_irqrestore(&port->lock, flags);
                return 0;
        }
@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
                                                        struct file *filp)
 {
        spin_lock_irq(&port->lock);
-       ++port->count;
+       atomic_inc(&port->count);
        spin_unlock_irq(&port->lock);
        tty_port_tty_set(port, tty);
 
index 8a89f6e7715db19debaea61781dedc40d05597fc..50b32af36723d0fe8a72a9ba46446d939fe002e7 100644 (file)
@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
             kbd->kbdmode == VC_OFF) &&
             value != KVAL(K_SAK))
                return;         /* SAK is allowed even in raw mode */
+
+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+       {
+               void *func = fn_handler[value];
+               if (func == fn_show_state || func == fn_show_ptregs ||
+                   func == fn_show_mem)
+                       return;
+       }
+#endif
+
        fn_handler[value](vc);
 }
 
@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
        if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
                return -EFAULT;
 
-       if (!capable(CAP_SYS_TTY_CONFIG))
-               perm = 0;
-
        switch (cmd) {
        case KDGKBENT:
                /* Ensure another thread doesn't free it under us */
@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
                spin_unlock_irqrestore(&kbd_event_lock, flags);
                return put_user(val, &user_kbe->kb_value);
        case KDSKBENT:
+               if (!capable(CAP_SYS_TTY_CONFIG))
+                       perm = 0;
+
                if (!perm)
                        return -EPERM;
                if (!i && v == K_NOSUCHMAP) {
@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
        int i, j, k;
        int ret;
 
-       if (!capable(CAP_SYS_TTY_CONFIG))
-               perm = 0;
-
        kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
        if (!kbs) {
                ret = -ENOMEM;
@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
                kfree(kbs);
                return ((p && *p) ? -EOVERFLOW : 0);
        case KDSKBSENT:
+               if (!capable(CAP_SYS_TTY_CONFIG))
+                       perm = 0;
+
                if (!perm) {
                        ret = -EPERM;
                        goto reterr;
index 6276f13e9e126f8085fdc8949d0d8b784e4df14e..84f2449f3c39661bedcaf28d62a2e382b76777a1 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/kobject.h>
 #include <linux/cdev.h>
 #include <linux/uio_driver.h>
+#include <asm/local.h>
 
 #define UIO_MAX_DEVICES                (1U << MINORBITS)
 
@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
                          struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
+       return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
 }
 static DEVICE_ATTR_RO(event);
 
@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
 {
        struct uio_device *idev = info->uio_dev;
 
-       atomic_inc(&idev->event);
+       atomic_inc_unchecked(&idev->event);
        wake_up_interruptible(&idev->wait);
        kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
 }
@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
        }
 
        listener->dev = idev;
-       listener->event_count = atomic_read(&idev->event);
+       listener->event_count = atomic_read_unchecked(&idev->event);
        filep->private_data = listener;
 
        if (idev->info->open) {
@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
                return -EIO;
 
        poll_wait(filep, &idev->wait, wait);
-       if (listener->event_count != atomic_read(&idev->event))
+       if (listener->event_count != atomic_read_unchecked(&idev->event))
                return POLLIN | POLLRDNORM;
        return 0;
 }
@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
        do {
                set_current_state(TASK_INTERRUPTIBLE);
 
-               event_count = atomic_read(&idev->event);
+               event_count = atomic_read_unchecked(&idev->event);
                if (event_count != listener->event_count) {
                        if (copy_to_user(buf, &event_count, count))
                                retval = -EFAULT;
@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
 static int uio_find_mem_index(struct vm_area_struct *vma)
 {
        struct uio_device *idev = vma->vm_private_data;
+       unsigned long size;
 
        if (vma->vm_pgoff < MAX_UIO_MAPS) {
-               if (idev->info->mem[vma->vm_pgoff].size == 0)
+               size = idev->info->mem[vma->vm_pgoff].size;
+               if (size == 0)
+                       return -1;
+               if (vma->vm_end - vma->vm_start > size)
                        return -1;
                return (int)vma->vm_pgoff;
        }
@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
        idev->owner = owner;
        idev->info = info;
        init_waitqueue_head(&idev->wait);
-       atomic_set(&idev->event, 0);
+       atomic_set_unchecked(&idev->event, 0);
 
        ret = uio_get_minor(idev);
        if (ret)
index 813d4d3a51c699a8987c8f95fac0beed20248024..a71934ff55812971b39fd6e221bef6b376d45b7c 100644 (file)
@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
                ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
                if (ret < 2)
                        return -EINVAL;
-               if (index < 0 || index > 0x7f)
+               if (index > 0x7f)
                        return -EINVAL;
                pos += tmp;
 
index dada0146cd7f23f0865d86db487af620cf82e99c..1d0d5175cae7ab71cd0c1733ebcfd001839d4aff 100644 (file)
@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                if (printk_ratelimit())
                        atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
                                __func__, vpi, vci);
-               atomic_inc(&vcc->stats->rx_err);
+               atomic_inc_unchecked(&vcc->stats->rx_err);
                return;
        }
 
@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                if (length > ATM_MAX_AAL5_PDU) {
                        atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
                                  __func__, length, vcc);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        goto out;
                }
 
@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                if (sarb->len < pdu_length) {
                        atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
                                  __func__, pdu_length, sarb->len, vcc);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        goto out;
                }
 
                if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
                        atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
                                  __func__, vcc);
-                       atomic_inc(&vcc->stats->rx_err);
+                       atomic_inc_unchecked(&vcc->stats->rx_err);
                        goto out;
                }
 
@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
                        if (printk_ratelimit())
                                atm_err(instance, "%s: no memory for skb (length: %u)!\n",
                                        __func__, length);
-                       atomic_inc(&vcc->stats->rx_drop);
+                       atomic_inc_unchecked(&vcc->stats->rx_drop);
                        goto out;
                }
 
@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
 
                vcc->push(vcc, skb);
 
-               atomic_inc(&vcc->stats->rx);
+               atomic_inc_unchecked(&vcc->stats->rx);
        out:
                skb_trim(sarb, 0);
        }
@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
                        struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
 
                        usbatm_pop(vcc, skb);
-                       atomic_inc(&vcc->stats->tx);
+                       atomic_inc_unchecked(&vcc->stats->tx);
 
                        skb = skb_dequeue(&instance->sndqueue);
                }
@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
        if (!left--)
                return sprintf(page,
                               "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
-                              atomic_read(&atm_dev->stats.aal5.tx),
-                              atomic_read(&atm_dev->stats.aal5.tx_err),
-                              atomic_read(&atm_dev->stats.aal5.rx),
-                              atomic_read(&atm_dev->stats.aal5.rx_err),
-                              atomic_read(&atm_dev->stats.aal5.rx_drop));
+                              atomic_read_unchecked(&atm_dev->stats.aal5.tx),
+                              atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
+                              atomic_read_unchecked(&atm_dev->stats.aal5.rx),
+                              atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
+                              atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
 
        if (!left--) {
                if (instance->disconnected)
index 2a3bbdf7eb9407568c71c0896615d30fb48ea9cb..91d72cfcc7c48d8577309d07d4db6bf7f14c9b90 100644 (file)
@@ -126,7 +126,7 @@ static const char format_endpt[] =
  * time it gets called.
  */
 static struct device_connect_event {
-       atomic_t count;
+       atomic_unchecked_t count;
        wait_queue_head_t wait;
 } device_event = {
        .count = ATOMIC_INIT(1),
@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
 
 void usbfs_conn_disc_event(void)
 {
-       atomic_add(2, &device_event.count);
+       atomic_add_unchecked(2, &device_event.count);
        wake_up(&device_event.wait);
 }
 
@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
 
        poll_wait(file, &device_event.wait, wait);
 
-       event_count = atomic_read(&device_event.count);
+       event_count = atomic_read_unchecked(&device_event.count);
        if (file->f_version != event_count) {
                file->f_version = event_count;
                return POLLIN | POLLRDNORM;
index e500243803d87b1b3882acb5d37670af48f43f1b..401300f0f36f2f53de2fedb02ad7f6f2c114a7c6 100644 (file)
@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
        struct usb_dev_state *ps = file->private_data;
        struct usb_device *dev = ps->dev;
        ssize_t ret = 0;
-       unsigned len;
+       size_t len;
        loff_t pos;
        int i;
 
@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
        for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
                struct usb_config_descriptor *config =
                        (struct usb_config_descriptor *)dev->rawdescriptors[i];
-               unsigned int length = le16_to_cpu(config->wTotalLength);
+               size_t length = le16_to_cpu(config->wTotalLength);
 
                if (*ppos < pos + length) {
 
                        /* The descriptor may claim to be longer than it
                         * really is.  Here is the actual allocated length. */
-                       unsigned alloclen =
+                       size_t alloclen =
                                le16_to_cpu(dev->config[i].desc.wTotalLength);
 
-                       len = length - (*ppos - pos);
+                       len = length + pos - *ppos;
                        if (len > nbytes)
                                len = nbytes;
 
                        /* Simply don't write (skip over) unallocated parts */
                        if (alloclen > (*ppos - pos)) {
-                               alloclen -= (*ppos - pos);
+                               alloclen = alloclen + pos - *ppos;
                                if (copy_to_user(buf,
                                    dev->rawdescriptors[i] + (*ppos - pos),
                                    min(len, alloclen))) {
index 45a915ccd71c0631836b1579ce4897166d1e4a92..09f9735228c063fed7054d6383ed58390c9cb004 100644 (file)
@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
         */
        usb_get_urb(urb);
        atomic_inc(&urb->use_count);
-       atomic_inc(&urb->dev->urbnum);
+       atomic_inc_unchecked(&urb->dev->urbnum);
        usbmon_urb_submit(&hcd->self, urb);
 
        /* NOTE requirements on root-hub callers (usbfs and the hub
@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
                urb->hcpriv = NULL;
                INIT_LIST_HEAD(&urb->urb_list);
                atomic_dec(&urb->use_count);
-               atomic_dec(&urb->dev->urbnum);
+               atomic_dec_unchecked(&urb->dev->urbnum);
                if (atomic_read(&urb->reject))
                        wake_up(&usb_kill_urb_queue);
                usb_put_urb(urb);
index b4bfa3ac4b12207986972ff040897683817487cc..008f926db83b2ca6f570a6512d4858dfb891700c 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/random.h>
 #include <linux/pm_qos.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                        goto done;
                return;
        }
+
+       if (gr_handle_new_usb())
+               goto done;
+
        if (hub_is_superspeed(hub->hdev))
                unit_load = 150;
        else
index f368d2053da534ca975804dfad2bd8ddc77d5fba..0c30ac5e5b9a4b7a4dfc6ea9e929d0484b011e2c 100644 (file)
@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
  * Return: If successful, the number of bytes transferred. Otherwise, a negative
  * error number.
  */
-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
                    __u8 requesttype, __u16 value, __u16 index, void *data,
                    __u16 size, int timeout)
 {
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
  * If successful, 0. Otherwise a negative error number. The number of actual
  * bytes transferred will be stored in the @actual_length parameter.
  */
-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
                      void *data, int len, int *actual_length, int timeout)
 {
        return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
  * bytes transferred will be stored in the @actual_length parameter.
  *
  */
-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
                 void *data, int len, int *actual_length, int timeout)
 {
        struct urb *urb;
index d26973844a4dea0186f759de848c3baf2442088f..7340cd789948789b4d6e873a542038d7966f98ce 100644 (file)
@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
        struct usb_device *udev;
 
        udev = to_usb_device(dev);
-       return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
+       return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
 }
 static DEVICE_ATTR_RO(urbnum);
 
index b1fb9aef0f5b09b10b412c315345637eb3f50a72..42248850ed2af4f5384ad3217ea09da17a48e1a6 100644 (file)
@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
        set_dev_node(&dev->dev, dev_to_node(bus->controller));
        dev->state = USB_STATE_ATTACHED;
        dev->lpm_disable_count = 1;
-       atomic_set(&dev->urbnum, 0);
+       atomic_set_unchecked(&dev->urbnum, 0);
 
        INIT_LIST_HEAD(&dev->ep0.urb_list);
        dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
index 8cfc3191be50aee8f9157ba95e84c49446d1ae55..4868255f3d61fa7120a6884f470acee6e5779553 100644 (file)
@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
 
 #ifdef CONFIG_KGDB
 static struct kgdb_io kgdbdbgp_io_ops;
-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+static struct kgdb_io kgdbdbgp_io_ops_console;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
 #else
 #define dbgp_kgdb_mode (0)
 #endif
@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
        .write_char = kgdbdbgp_write_char,
 };
 
+static struct kgdb_io kgdbdbgp_io_ops_console = {
+       .name = "kgdbdbgp",
+       .read_char = kgdbdbgp_read_char,
+       .write_char = kgdbdbgp_write_char,
+       .is_console = 1
+};
+
 static int kgdbdbgp_wait_time;
 
 static int __init kgdbdbgp_parse_config(char *str)
@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
                ptr++;
                kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
        }
-       kgdb_register_io_module(&kgdbdbgp_io_ops);
-       kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+       if (early_dbgp_console.index != -1)
+               kgdb_register_io_module(&kgdbdbgp_io_ops_console);
+       else
+               kgdb_register_io_module(&kgdbdbgp_io_ops);
 
        return 0;
 }
index e9715845f82e1dc825690c05f8cc08c2d8e41df7..03495ab4cbc8db7784c43260e198eb61df8432d0 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/atomic.h>
+#include <linux/module.h>
 
 #include "u_uac1.h"
 
index 491082aaf1039fa7cdef927a371a5b3514974e16..dfd7d17a5b00751c2903448355c2bf4688bd2d7a 100644 (file)
@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
                        spin_lock_irq(&port->port_lock);
 
                        /* already open?  Great. */
-                       if (port->port.count) {
+                       if (atomic_read(&port->port.count)) {
                                status = 0;
-                               port->port.count++;
+                               atomic_inc(&port->port.count);
 
                        /* currently opening/closing? wait ... */
                        } else if (port->openclose) {
@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
        tty->driver_data = port;
        port->port.tty = tty;
 
-       port->port.count = 1;
+       atomic_set(&port->port.count, 1);
        port->openclose = false;
 
        /* if connected, start the I/O stream */
@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
 
        spin_lock_irq(&port->port_lock);
 
-       if (port->port.count != 1) {
-               if (port->port.count == 0)
+       if (atomic_read(&port->port.count) != 1) {
+               if (atomic_read(&port->port.count) == 0)
                        WARN_ON(1);
                else
-                       --port->port.count;
+                       atomic_dec(&port->port.count);
                goto exit;
        }
 
@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
         * and sleep if necessary
         */
        port->openclose = true;
-       port->port.count = 0;
+       atomic_set(&port->port.count, 0);
 
        gser = port->port_usb;
        if (gser && gser->disconnect)
@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
        int cond;
 
        spin_lock_irq(&port->port_lock);
-       cond = (port->port.count == 0) && !port->openclose;
+       cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
        spin_unlock_irq(&port->port_lock);
        return cond;
 }
@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
        /* if it's already open, start I/O ... and notify the serial
         * protocol about open/close status (connect/disconnect).
         */
-       if (port->port.count) {
+       if (atomic_read(&port->port.count)) {
                pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
                gs_start_io(port);
                if (gser->connect)
@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
 
        port->port_usb = NULL;
        gser->ioport = NULL;
-       if (port->port.count > 0 || port->openclose) {
+       if (atomic_read(&port->port.count) > 0 || port->openclose) {
                wake_up_interruptible(&port->drain_wait);
                if (port->port.tty)
                        tty_hangup(port->port.tty);
@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
 
        /* finally, free any unused/unusable I/O buffers */
        spin_lock_irqsave(&port->port_lock, flags);
-       if (port->port.count == 0 && !port->openclose)
+       if (atomic_read(&port->port.count) == 0 && !port->openclose)
                gs_buf_free(&port->port_write_buf);
        gs_free_requests(gser->out, &port->read_pool, NULL);
        gs_free_requests(gser->out, &port->read_queue, NULL);
index 53842a1b947fc34a9dd69b915d1f10c2e647685a..2bef3b6bff2866cdaf0c5784f78a3571349a6593 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/ctype.h>
 #include <linux/random.h>
 #include <linux/syscalls.h>
+#include <linux/module.h>
 
 #include "u_uac1.h"
 
index 118edb7bdca288f2f2adf9f7783f3b54949e870d..7a6415fd3a5a22bd0bc446ce6e20e368635ea138 100644 (file)
@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
        urb->transfer_flags = URB_DIR_IN;
        usb_get_urb(urb);
        atomic_inc(&urb->use_count);
-       atomic_inc(&urb->dev->urbnum);
+       atomic_inc_unchecked(&urb->dev->urbnum);
        urb->setup_dma = dma_map_single(
                        hcd->self.controller,
                        urb->setup_packet,
@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
        urb->status = -EINPROGRESS;
        usb_get_urb(urb);
        atomic_inc(&urb->use_count);
-       atomic_inc(&urb->dev->urbnum);
+       atomic_inc_unchecked(&urb->dev->urbnum);
        retval = submit_single_step_set_feature(hcd, urb, 0);
        if (!retval && !wait_for_completion_timeout(&done,
                                                msecs_to_jiffies(2000))) {
index 1db0626c8bf415289fd7e8f0875e15e4aeff404e..49487822af64885383afada0ffc081fe6869c1d8 100644 (file)
@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
        struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
        struct wahc *wa = &hwahc->wa;
        struct device *dev = &wa->usb_iface->dev;
-       u8 mas_le[UWB_NUM_MAS/8];
+       u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
+
+       if (mas_le == NULL)
+               return -ENOMEM;
 
        /* Set the stream index */
        result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
                        WUSB_REQ_SET_WUSB_MAS,
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
                        0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
-                       mas_le, 32, USB_CTRL_SET_TIMEOUT);
+                       mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
        if (result < 0)
                dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
 out:
+       kfree(mas_le);
+
        return result;
 }
 
index b3d245ef46efa7e72a4a4e7fd22ad80029f2a821..99549ed76b91539347f044fdb2423de52828e93e 100644 (file)
@@ -84,7 +84,7 @@ struct appledisplay {
        struct mutex sysfslock;         /* concurrent read and write */
 };
 
-static atomic_t count_displays = ATOMIC_INIT(0);
+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
 static struct workqueue_struct *wq;
 
 static void appledisplay_complete(struct urb *urb)
@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
 
        /* Register backlight device */
        snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
-               atomic_inc_return(&count_displays) - 1);
+               atomic_inc_return_unchecked(&count_displays) - 1);
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = 0xff;
index 29fa1c3d0089bee738ed4f54a8b65d4f82dd0c03..a57b08eebcebfd08fe42d7931c945f3a04ad7ecc 100644 (file)
@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
 
        info->port = port;
 
-       ++port->port.count;
+       atomic_inc(&port->port.count);
        if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
                if (serial->type->set_termios) {
                        /*
@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
        }
        /* Now that any required fake tty operations are completed restore
         * the tty port count */
-       --port->port.count;
+       atomic_dec(&port->port.count);
        /* The console is special in terms of closing the device so
         * indicate this port is now acting as a system console. */
        port->port.console = 1;
@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
  put_tty:
        tty_kref_put(tty);
  reset_open_count:
-       port->port.count = 0;
+       atomic_set(&port->port.count, 0);
        usb_autopm_put_interface(serial->interface);
  error_get_interface:
        usb_serial_put(serial);
@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
 static void usb_console_write(struct console *co,
                                        const char *buf, unsigned count)
 {
-       static struct usbcons_info *info = &usbcons_info;
+       struct usbcons_info *info = &usbcons_info;
        struct usb_serial_port *port = info->port;
        struct usb_serial *serial;
        int retval = -ENODEV;
index 307e339a9478682dc5b2709710bd5209cfef47ca..6aa97cb9b19296ebbc989d8a988f408fe39d441d 100644 (file)
@@ -63,7 +63,7 @@ struct us_unusual_dev {
        __u8  useProtocol;
        __u8  useTransport;
        int (*initFunction)(struct us_data *);
-};
+} __do_const;
 
 
 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
index a863a98a91ce01afa0e12e8d86eec5b8c696e58d..d272795fc087c1ef6aaa41a77ca1d55da3bd0d43 100644 (file)
@@ -83,7 +83,7 @@ struct vhci_hcd {
        unsigned resuming:1;
        unsigned long re_timeout;
 
-       atomic_t seqnum;
+       atomic_unchecked_t seqnum;
 
        /*
         * NOTE:
index 1ae9d40f96bf72beea9bff470525ff1b8d5e9307..c62604b1ab62f9849b1aa71638b9c051aa160fd7 100644 (file)
@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
 
        spin_lock(&vdev->priv_lock);
 
-       priv->seqnum = atomic_inc_return(&the_controller->seqnum);
+       priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
        if (priv->seqnum == 0xffff)
                dev_info(&urb->dev->dev, "seqnum max\n");
 
@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                        return -ENOMEM;
                }
 
-               unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
+               unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
                if (unlink->seqnum == 0xffff)
                        pr_info("seqnum max\n");
 
@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
                vdev->rhport = rhport;
        }
 
-       atomic_set(&vhci->seqnum, 0);
+       atomic_set_unchecked(&vhci->seqnum, 0);
        spin_lock_init(&vhci->lock);
 
        hcd->power_budget = 0; /* no limit */
index 00e4a54308e430f283f053865853089566c823d7..d676f8503c6d45146cc9a1803a0592e6ada6ea2c 100644 (file)
@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
        if (!urb) {
                pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
                pr_info("max seqnum %d\n",
-                       atomic_read(&the_controller->seqnum));
+                       atomic_read_unchecked(&the_controller->seqnum));
                usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
                return;
        }
index edc7267157f3d06ec74f5f11767e20e3ee75a8e4..9f65ce299cca6506e6cf0a17eda51441da0c8744 100644 (file)
@@ -240,7 +240,7 @@ struct wahc {
        spinlock_t xfer_list_lock;
        struct work_struct xfer_enqueue_work;
        struct work_struct xfer_error_work;
-       atomic_t xfer_id_count;
+       atomic_unchecked_t xfer_id_count;
 
        kernel_ulong_t  quirks;
 };
@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
        INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
        INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
        wa->dto_in_use = 0;
-       atomic_set(&wa->xfer_id_count, 1);
+       atomic_set_unchecked(&wa->xfer_id_count, 1);
        /* init the buf in URBs */
        for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
                usb_init_urb(&(wa->buf_in_urbs[index]));
index 69af4fd9e07220b29edeeec2b46fbde469d7d2ac..da390d7427ed905292c87985e7a7f4f0534c9315 100644 (file)
@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
  */
 static void wa_xfer_id_init(struct wa_xfer *xfer)
 {
-       xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+       xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
 }
 
 /* Return the xfer's ID. */
index f018d8d0f975360a091339699d12348c5c93a12b..ccab63f06171ebbc0dd6e1332ecd9da0cf27a8da 100644 (file)
@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
                return 0;
 
        /* TODO Prevent device auto probing */
-       WARN("Device %s added to live group %d!\n", dev_name(dev),
+       WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
             iommu_group_id(group->iommu_group));
 
        return 0;
index 9484d5652ca5a23e051c54b1b0544ce8868f3876..d415d69dc2378cbc7568bbcdcc53e601770ee761 100644 (file)
@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
                        break;
                }
                /* TODO: Should check and handle checksum. */
-
-               hdr.num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
+                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
                                      offsetof(typeof(hdr), num_buffers),
                                      sizeof hdr.num_buffers)) {
                        vq_err(vq, "Failed num_buffers write");
index 3bb02c60a2f5fe85a4d38e5d884b66f03b8756e9..a01ff3878334847927a1a0e7fa96adeb926f5200 100644 (file)
@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
 {
        __virtio16 v = 0;
-       int rc = get_user(v, (__force __virtio16 __user *)p);
+       int rc = get_user(v, (__force_user __virtio16 *)p);
        *val = vringh16_to_cpu(vrh, v);
        return rc;
 }
@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
 {
        __virtio16 v = cpu_to_vringh16(vrh, val);
-       return put_user(v, (__force __virtio16 __user *)p);
+       return put_user(v, (__force_user __virtio16 *)p);
 }
 
 static inline int copydesc_user(void *dst, const void *src, size_t len)
 {
-       return copy_from_user(dst, (__force void __user *)src, len) ?
+       return copy_from_user(dst, (void __force_user *)src, len) ?
                -EFAULT : 0;
 }
 
@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
                               const struct vring_used_elem *src,
                               unsigned int num)
 {
-       return copy_to_user((__force void __user *)dst, src,
+       return copy_to_user((void __force_user *)dst, src,
                            sizeof(*dst) * num) ? -EFAULT : 0;
 }
 
 static inline int xfer_from_user(void *src, void *dst, size_t len)
 {
-       return copy_from_user(dst, (__force void __user *)src, len) ?
+       return copy_from_user(dst, (void __force_user *)src, len) ?
                -EFAULT : 0;
 }
 
 static inline int xfer_to_user(void *dst, void *src, size_t len)
 {
-       return copy_to_user((__force void __user *)dst, src, len) ?
+       return copy_to_user((void __force_user *)dst, src, len) ?
                -EFAULT : 0;
 }
 
@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
        vrh->last_used_idx = 0;
        vrh->vring.num = num;
        /* vring expects kernel addresses, but only used via accessors. */
-       vrh->vring.desc = (__force struct vring_desc *)desc;
-       vrh->vring.avail = (__force struct vring_avail *)avail;
-       vrh->vring.used = (__force struct vring_used *)used;
+       vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
+       vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
+       vrh->vring.used = (__force_kernel struct vring_used *)used;
        return 0;
 }
 EXPORT_SYMBOL(vringh_init_user);
@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
 
 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
 {
-       ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
+       ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
        return 0;
 }
 
index 84a110a719cbd73256c326b13fe19619b6cfde74..96312c3afc079ce0bc13c7d8039c40f857edeedc 100644 (file)
@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
 static unsigned long kb3886bl_flags;
 #define KB3886BL_SUSPENDED     0x01
 
-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
        {
                .ident = "Sahara Touch-iT",
                .matches = {
index 1b0b233b8b39858f10bf54459884d084e858bb36..6f34c2ca65a17bd71870fb00469184db6cd7c363 100644 (file)
@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
                return -ENOSPC;
 
        err = 0;
-       if ((count + p) > fbmemlength) {
+       if (count > (fbmemlength - p)) {
                count = fbmemlength - p;
                err = -ENOSPC;
        }
index aedf2fbf9bf61f1e652f1e1353a1f54535a983a2..47c9aca205b9dfc43c8373f5f366a0146527c8ef 100644 (file)
@@ -149,7 +149,7 @@ enum {
 };
 
 /* Must match above enum */
-static char * const r128_family[] = {
+static const char * const r128_family[] = {
        "AGP",
        "PCI",
        "PRO AGP",
index 37ec09b3fffd2b4204a717953b3d67b2009a3bdd..98f88624225fc915a8b658c0fb6376a3433b843c 100644 (file)
@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
        par->accel_flags = var->accel_flags; /* hack */
 
        if (var->accel_flags) {
-               info->fbops->fb_sync = atyfb_sync;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_sync = atyfb_sync;
+               pax_close_kernel();
                info->flags &= ~FBINFO_HWACCEL_DISABLED;
        } else {
-               info->fbops->fb_sync = NULL;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_sync = NULL;
+               pax_close_kernel();
                info->flags |= FBINFO_HWACCEL_DISABLED;
        }
 
index 2fa0317ab3c7d1a89c2cf0b91ee3ef047fe2e10e..4983f2a2979e87555b517c9b02c3be54736475cd 100644 (file)
@@ -8,6 +8,7 @@
 #include "../core/fb_draw.h"
 
 #include <asm/io.h>
+#include <asm/pgtable.h>
 
 #ifdef __sparc__
 #include <asm/fbio.h>
@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
        info->sprite.buf_align = 16;    /* and 64 lines tall. */
        info->sprite.flags = FB_PIXMAP_IO;
 
-       info->fbops->fb_cursor = atyfb_cursor;
+       pax_open_kernel();
+       *(void **)&info->fbops->fb_cursor = atyfb_cursor;
+       pax_close_kernel();
 
        return 0;
 }
index d6cab1fd9a4795da2fe2348c2882bc3e04a6cf30..112f6803b476cab76b451fe1c2a449a1d9b5d941 100644 (file)
@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
 
        BUG_ON(!fbdefio);
        mutex_init(&fbdefio->lock);
-       info->fbops->fb_mmap = fb_deferred_io_mmap;
+       pax_open_kernel();
+       *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
+       pax_close_kernel();
        INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
        INIT_LIST_HEAD(&fbdefio->pagelist);
        if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
                page->mapping = NULL;
        }
 
-       info->fbops->fb_mmap = NULL;
+       *(void **)&info->fbops->fb_mmap = NULL;
        mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
index 0705d8883edecc785a72f4ef256b9894e4d0e950..d9429bf1dd83609899b957d4368190276db8473e 100644 (file)
@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
        __u32 data;
        int err;
 
-       err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
+       err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
 
        data = (__u32) (unsigned long) fix->smem_start;
        err |= put_user(data, &fix32->smem_start);
index 42543362f16308b3b6b0fdbffb287e98bf33d14d..282567ee0400b6ffbeebfb927083e555856f084b 100644 (file)
@@ -240,7 +240,7 @@ static uint screen_fb_size;
 static inline int synthvid_send(struct hv_device *hdev,
                                struct synthvid_msg *msg)
 {
-       static atomic64_t request_id = ATOMIC64_INIT(0);
+       static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
        int ret;
 
        msg->pipe_hdr.type = PIPE_MSG_DATA;
@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
 
        ret = vmbus_sendpacket(hdev->channel, msg,
                               msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
-                              atomic64_inc_return(&request_id),
+                              atomic64_inc_return_unchecked(&request_id),
                               VM_PKT_DATA_INBAND, 0);
 
        if (ret)
index 7672d2ea9b357309e5c4f0ffd96ca1eeaf69b63d..b56437f7b210dc6f63e98638a22853071ba325ca 100644 (file)
@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
                }
        }
        printk("ringbuffer lockup!!!\n");
+       printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
        i810_report_error(mmio); 
        par->dev_flags |= LOCKUP;
        info->pixmap.scan_align = 1;
index a01147fdf270f5e15416d163a614f1f3db810051..5d896f879ff9778c7d4eacd02116daeb42dc4466 100644 (file)
@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
 
 #ifdef CONFIG_FB_MATROX_MYSTIQUE
 struct matrox_switch matrox_mystique = {
-       MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
+       .preinit = MGA1064_preinit,
+       .reset = MGA1064_reset,
+       .init = MGA1064_init,
+       .restore = MGA1064_restore,
 };
 EXPORT_SYMBOL(matrox_mystique);
 #endif
 
 #ifdef CONFIG_FB_MATROX_G
 struct matrox_switch matrox_G100 = {
-       MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
+       .preinit = MGAG100_preinit,
+       .reset = MGAG100_reset,
+       .init = MGAG100_init,
+       .restore = MGAG100_restore,
 };
 EXPORT_SYMBOL(matrox_G100);
 #endif
index 195ad7cac1baccd70a0b681832c85bd120209d91..09743fc5b8e4edf8e28cbd88b15946131f3fdda0 100644 (file)
@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
 }
 
 struct matrox_switch matrox_millennium = {
-       Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
+       .preinit = Ti3026_preinit,
+       .reset = Ti3026_reset,
+       .init = Ti3026_init,
+       .restore = Ti3026_restore
 };
 EXPORT_SYMBOL(matrox_millennium);
 #endif
index fe92eed6da70c01bb1a96b07c23f87561d5dac18..106e085e2a95d0ad3d9df4f32af3a8279f968f41 100644 (file)
@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
        struct mb862xxfb_par *par = info->par;
 
        if (info->var.bits_per_pixel == 32) {
-               info->fbops->fb_fillrect = cfb_fillrect;
-               info->fbops->fb_copyarea = cfb_copyarea;
-               info->fbops->fb_imageblit = cfb_imageblit;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
+               *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
+               *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
+               pax_close_kernel();
        } else {
                outreg(disp, GC_L0EM, 3);
-               info->fbops->fb_fillrect = mb86290fb_fillrect;
-               info->fbops->fb_copyarea = mb86290fb_copyarea;
-               info->fbops->fb_imageblit = mb86290fb_imageblit;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
+               *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
+               *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
+               pax_close_kernel();
        }
        outreg(draw, GDC_REG_DRAW_BASE, 0);
        outreg(draw, GDC_REG_MODE_MISC, 0x8000);
index def04120467699580abdd4558de05d38f1e91b9a..fed6529e02a618c2aec0e536a7ed5d8e5704a571 100644 (file)
@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
        info->fix.line_length = (info->var.xres_virtual *
                                 info->var.bits_per_pixel) >> 3;
        if (info->var.accel_flags) {
-               info->fbops->fb_imageblit = nvidiafb_imageblit;
-               info->fbops->fb_fillrect = nvidiafb_fillrect;
-               info->fbops->fb_copyarea = nvidiafb_copyarea;
-               info->fbops->fb_sync = nvidiafb_sync;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
+               *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
+               *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
+               *(void **)&info->fbops->fb_sync = nvidiafb_sync;
+               pax_close_kernel();
                info->pixmap.scan_align = 4;
                info->flags &= ~FBINFO_HWACCEL_DISABLED;
                info->flags |= FBINFO_READS_FAST;
                NVResetGraphics(info);
        } else {
-               info->fbops->fb_imageblit = cfb_imageblit;
-               info->fbops->fb_fillrect = cfb_fillrect;
-               info->fbops->fb_copyarea = cfb_copyarea;
-               info->fbops->fb_sync = NULL;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
+               *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
+               *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
+               *(void **)&info->fbops->fb_sync = NULL;
+               pax_close_kernel();
                info->pixmap.scan_align = 1;
                info->flags |= FBINFO_HWACCEL_DISABLED;
                info->flags &= ~FBINFO_READS_FAST;
@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
        info->pixmap.size = 8 * 1024;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-       if (!hwcur)
-           info->fbops->fb_cursor = NULL;
+       if (!hwcur) {
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_cursor = NULL;
+               pax_close_kernel();
+       }
 
        info->var.accel_flags = (!noaccel);
 
index 2412a0dd0c1312baa732296e895d5d9f1a63eea6..294215b136c19b72e884bcc8e7073065f2b91794 100644 (file)
@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
        if (dssdev->name == NULL)
                dssdev->name = dssdev->alias;
 
+       pax_open_kernel();
        if (drv && drv->get_resolution == NULL)
-               drv->get_resolution = omapdss_default_get_resolution;
+               *(void **)&drv->get_resolution = omapdss_default_get_resolution;
        if (drv && drv->get_recommended_bpp == NULL)
-               drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
+               *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
        if (drv && drv->get_timings == NULL)
-               drv->get_timings = omapdss_default_get_timings;
+               *(void **)&drv->get_timings = omapdss_default_get_timings;
+       pax_close_kernel();
 
        mutex_lock(&panel_list_mutex);
        list_add_tail(&dssdev->panel_list, &panel_list);
index 83433cb0dfba42010c4793bd7c8695955c77217f..71e9b986ab656d25f76288dd0cf9c2ff5d611538 100644 (file)
@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 
        switch(prod_id) {
        case S1D13506_PROD_ID:  /* activate acceleration */
-               s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
-               s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+               pax_open_kernel();
+               *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+               *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+               pax_close_kernel();
                info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
                        FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
                break;
index d3013cd9f976966330cd7b501e86e934f1493794..95b82854023664ed456d609ad16516ddc3c3ce6c 100644 (file)
@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
 }
 
 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
-       lcdc_sys_write_index,
-       lcdc_sys_write_data,
-       lcdc_sys_read_data,
+       .write_index = lcdc_sys_write_index,
+       .write_data = lcdc_sys_write_data,
+       .read_data = lcdc_sys_read_data,
 };
 
 static int sh_mobile_lcdc_sginit(struct fb_info *info,
index 9279e5f6696e24ad84ea11a5e3adf7c1980c9a91..d5f52761f98b9b3c61c9682c7d630fbf73c81545 100644 (file)
@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
                fb_deferred_io_cleanup(info);
                kfree(info->fbdefio);
                info->fbdefio = NULL;
-               info->fbops->fb_mmap = ufx_ops_mmap;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
+               pax_close_kernel();
        }
 
        pr_debug("released /dev/fb%d user=%d count=%d",
index ff2b8731a2dc6781d1aacb747609cb1a31f05223..626a8d591f504582cff50484ffaf3e53254e1ff2 100644 (file)
@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
                dlfb_urb_completion(urb);
 
 error:
-       atomic_add(bytes_sent, &dev->bytes_sent);
-       atomic_add(bytes_identical, &dev->bytes_identical);
-       atomic_add(width*height*2, &dev->bytes_rendered);
+       atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
+       atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
+       atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
        end_cycles = get_cycles();
-       atomic_add(((unsigned int) ((end_cycles - start_cycles)
+       atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
                    >> 10)), /* Kcycles */
                   &dev->cpu_kcycles_used);
 
@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
                dlfb_urb_completion(urb);
 
 error:
-       atomic_add(bytes_sent, &dev->bytes_sent);
-       atomic_add(bytes_identical, &dev->bytes_identical);
-       atomic_add(bytes_rendered, &dev->bytes_rendered);
+       atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
+       atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
+       atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
        end_cycles = get_cycles();
-       atomic_add(((unsigned int) ((end_cycles - start_cycles)
+       atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
                    >> 10)), /* Kcycles */
                   &dev->cpu_kcycles_used);
 }
@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
                fb_deferred_io_cleanup(info);
                kfree(info->fbdefio);
                info->fbdefio = NULL;
-               info->fbops->fb_mmap = dlfb_ops_mmap;
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
+               pax_close_kernel();
        }
 
        pr_warn("released /dev/fb%d user=%d count=%d\n",
@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
        struct fb_info *fb_info = dev_get_drvdata(fbdev);
        struct dlfb_data *dev = fb_info->par;
        return snprintf(buf, PAGE_SIZE, "%u\n",
-                       atomic_read(&dev->bytes_rendered));
+                       atomic_read_unchecked(&dev->bytes_rendered));
 }
 
 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
        struct fb_info *fb_info = dev_get_drvdata(fbdev);
        struct dlfb_data *dev = fb_info->par;
        return snprintf(buf, PAGE_SIZE, "%u\n",
-                       atomic_read(&dev->bytes_identical));
+                       atomic_read_unchecked(&dev->bytes_identical));
 }
 
 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
        struct fb_info *fb_info = dev_get_drvdata(fbdev);
        struct dlfb_data *dev = fb_info->par;
        return snprintf(buf, PAGE_SIZE, "%u\n",
-                       atomic_read(&dev->bytes_sent));
+                       atomic_read_unchecked(&dev->bytes_sent));
 }
 
 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
        struct fb_info *fb_info = dev_get_drvdata(fbdev);
        struct dlfb_data *dev = fb_info->par;
        return snprintf(buf, PAGE_SIZE, "%u\n",
-                       atomic_read(&dev->cpu_kcycles_used));
+                       atomic_read_unchecked(&dev->cpu_kcycles_used));
 }
 
 static ssize_t edid_show(
@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
        struct fb_info *fb_info = dev_get_drvdata(fbdev);
        struct dlfb_data *dev = fb_info->par;
 
-       atomic_set(&dev->bytes_rendered, 0);
-       atomic_set(&dev->bytes_identical, 0);
-       atomic_set(&dev->bytes_sent, 0);
-       atomic_set(&dev->cpu_kcycles_used, 0);
+       atomic_set_unchecked(&dev->bytes_rendered, 0);
+       atomic_set_unchecked(&dev->bytes_identical, 0);
+       atomic_set_unchecked(&dev->bytes_sent, 0);
+       atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
 
        return count;
 }
index d32d1c4d1b99f81a1962bb92bf7cc6b3e1ad77ff..46722e6a2bca411fced8aaf722af945b07870ff6 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/moduleloader.h>
 #include <video/edid.h>
 #include <video/uvesafb.h>
 #ifdef CONFIG_X86
@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
        if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
                par->pmi_setpal = par->ypan = 0;
        } else {
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_MODULES
+               par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
+#endif
+               if (!par->pmi_code) {
+                       par->pmi_setpal = par->ypan = 0;
+                       return 0;
+               }
+#endif
+
                par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
                                                + task->t.regs.edi);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+               pax_open_kernel();
+               memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
+               pax_close_kernel();
+
+               par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
+               par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
+#else
                par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
                par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
+#endif
+
                printk(KERN_INFO "uvesafb: protected mode interface info at "
                                 "%04x:%04x\n",
                                 (u16)task->t.regs.es, (u16)task->t.regs.edi);
@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
        par->ypan = ypan;
 
        if (par->pmi_setpal || par->ypan) {
+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
                if (__supported_pte_mask & _PAGE_NX) {
                        par->pmi_setpal = par->ypan = 0;
                        printk(KERN_WARNING "uvesafb: NX protection is active, "
                                            "better not use the PMI.\n");
-               } else {
+               } else
+#endif
                        uvesafb_vbe_getpmi(task, par);
-               }
        }
 #else
        /* The protected mode interface is not available on non-x86. */
@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
        info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
 
        /* Disable blanking if the user requested so. */
-       if (!blank)
-               info->fbops->fb_blank = NULL;
+       if (!blank) {
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_blank = NULL;
+               pax_close_kernel();
+       }
 
        /*
         * Find out how much IO memory is required for the mode with
@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
        info->flags = FBINFO_FLAG_DEFAULT |
                        (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
 
-       if (!par->ypan)
-               info->fbops->fb_pan_display = NULL;
+       if (!par->ypan) {
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_pan_display = NULL;
+               pax_close_kernel();
+       }
 }
 
 static void uvesafb_init_mtrr(struct fb_info *info)
@@ -1786,6 +1816,11 @@ out_mode:
 out:
        kfree(par->vbe_modes);
 
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+       if (par->pmi_code)
+               module_memfree_exec(par->pmi_code);
+#endif
+
        framebuffer_release(info);
        return err;
 }
@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
                kfree(par->vbe_state_orig);
                kfree(par->vbe_state_saved);
 
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+               if (par->pmi_code)
+                       module_memfree_exec(par->pmi_code);
+#endif
+
                framebuffer_release(info);
        }
        return 0;
index d79a0ac49fc7d17449b358174464847cdbc5b4a0..2d0c3d4ccb0f8b4561b20c949ae7abf62dd35ca0 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -52,8 +53,8 @@ static int   vram_remap;                      /* Set amount of memory to be used */
 static int   vram_total;                       /* Set total amount of memory */
 static int   pmi_setpal __read_mostly = 1;     /* pmi for palette changes ??? */
 static int   ypan       __read_mostly;         /* 0..nothing, 1..ypan, 2..ywrap */
-static void  (*pmi_start)(void) __read_mostly;
-static void  (*pmi_pal)  (void) __read_mostly;
+static void  (*pmi_start)(void) __read_only;
+static void  (*pmi_pal)  (void) __read_only;
 static int   depth      __read_mostly;
 static int   vga_compat __read_mostly;
 /* --------------------------------------------------------------------- */
@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
        unsigned int size_remap;
        unsigned int size_total;
        char *option = NULL;
+       void *pmi_code = NULL;
 
        /* ignore error return of fb_get_options */
        fb_get_options("vesafb", &option);
@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
                size_remap = size_total;
        vesafb_fix.smem_len = size_remap;
 
-#ifndef __i386__
-       screen_info.vesapm_seg = 0;
-#endif
-
        if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
                printk(KERN_WARNING
                       "vesafb: cannot reserve video memory at 0x%lx\n",
@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
        printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
               vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
 
+#ifdef __i386__
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+       pmi_code = module_alloc_exec(screen_info.vesapm_size);
+       if (!pmi_code)
+#elif !defined(CONFIG_PAX_KERNEXEC)
+       if (0)
+#endif
+
+#endif
+       screen_info.vesapm_seg = 0;
+
        if (screen_info.vesapm_seg) {
-               printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
-                      screen_info.vesapm_seg,screen_info.vesapm_off);
+               printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
+                      screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
        }
 
        if (screen_info.vesapm_seg < 0xc000)
@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
 
        if (ypan || pmi_setpal) {
                unsigned short *pmi_base;
+
                pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
-               pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
-               pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+               pax_open_kernel();
+               memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
+#else
+               pmi_code  = pmi_base;
+#endif
+
+               pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
+               pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+               pmi_start = ktva_ktla(pmi_start);
+               pmi_pal = ktva_ktla(pmi_pal);
+               pax_close_kernel();
+#endif
+
                printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
                if (pmi_base[3]) {
                        printk(KERN_INFO "vesafb: pmi: ports = ");
@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
        info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
                (ypan ? FBINFO_HWACCEL_YPAN : 0);
 
-       if (!ypan)
-               info->fbops->fb_pan_display = NULL;
+       if (!ypan) {
+               pax_open_kernel();
+               *(void **)&info->fbops->fb_pan_display = NULL;
+               pax_close_kernel();
+       }
 
        if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
                err = -ENOMEM;
@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
        fb_info(info, "%s frame buffer device\n", info->fix.id);
        return 0;
 err:
+
+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+       module_memfree_exec(pmi_code);
+#endif
+
        if (info->screen_base)
                iounmap(info->screen_base);
        framebuffer_release(info);
index 88714ae0d157d83eeb2dbb9151e21676dc5e3df9..16c2e11132fd199db181527b6d0ba2fb70b72a72 100644 (file)
@@ -56,7 +56,7 @@ struct via_clock {
 
        void (*set_engine_pll_state)(u8 state);
        void (*set_engine_pll)(struct via_pll_config config);
-};
+} __no_const;
 
 
 static inline u32 get_pll_internal_frequency(u32 ref_freq,
index 3c14e43b82fefe1d32f591d1b2f61d2cd28d0fa8..263057016d3b538afe91ca7451cda69fe0bae957 100644 (file)
@@ -2,1603 +2,1123 @@ P3
 # Standard 224-color Linux logo
 80 80
 255
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  10  10  10  10  10  10
- 10  10  10   6   6   6   6   6   6   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 22  22  22  26  26  26  30  30  30  34  34  34
- 30  30  30  30  30  30  26  26  26  18  18  18
- 14  14  14  10  10  10   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  26  26  26  42  42  42
- 54  54  54  66  66  66  78  78  78  78  78  78
- 78  78  78  74  74  74  66  66  66  54  54  54
- 42  42  42  26  26  26  18  18  18  10  10  10
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 22  22  22  42  42  42  66  66  66  86  86  86
- 66  66  66  38  38  38  38  38  38  22  22  22
- 26  26  26  34  34  34  54  54  54  66  66  66
- 86  86  86  70  70  70  46  46  46  26  26  26
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 50  50  50  82  82  82  58  58  58   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  54  54  54  86  86  86  66  66  66
- 38  38  38  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 78  78  78  34  34  34   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6  70  70  70
- 78  78  78  46  46  46  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  42  42  42  82  82  82
- 26  26  26   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 46  46  46  34  34  34   6   6   6   2   2   6
- 42  42  42  78  78  78  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  30  30  30  66  66  66  58  58  58
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 86  86  86 101 101 101  46  46  46  10  10  10
-  2   2   6  58  58  58  70  70  70  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  86  86  86  10  10  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  30  30  30
- 94  94  94  94  94  94  58  58  58  26  26  26
-  2   2   6   6   6   6  78  78  78  54  54  54
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  62  62  62  62  62  62   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 54  54  54  38  38  38  18  18  18  10  10  10
-  2   2   6   2   2   6  34  34  34  82  82  82
- 38  38  38  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 10  10  10   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  54  54  54
- 66  66  66  26  26  26   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  82  82  82   2   2   6   2   2   6
-  2   2   6   6   6   6  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   6   6   6
- 14  14  14  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  18  18  18
- 82  82  82  34  34  34  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
-  6   6   6   6   6   6  22  22  22  34  34  34
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  34  34  34
- 10  10  10  50  50  50  22  22  22   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 86  86  86  42  42  42  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
- 38  38  38 116 116 116  94  94  94  22  22  22
- 22  22  22   2   2   6   2   2   6   2   2   6
- 14  14  14  86  86  86 138 138 138 162 162 162
-154 154 154  38  38  38  26  26  26   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  14  14  14
-134 134 134 198 198 198 195 195 195 116 116 116
- 10  10  10   2   2   6   2   2   6   6   6   6
-101  98  89 187 187 187 210 210 210 218 218 218
-214 214 214 134 134 134  14  14  14   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  50  50  50  18  18  18   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  54  54  54
-218 218 218 195 195 195 226 226 226 246 246 246
- 58  58  58   2   2   6   2   2   6  30  30  30
-210 210 210 253 253 253 174 174 174 123 123 123
-221 221 221 234 234 234  74  74  74   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  82  82  82   2   2   6 106 106 106
-170 170 170  26  26  26  86  86  86 226 226 226
-123 123 123  10  10  10  14  14  14  46  46  46
-231 231 231 190 190 190   6   6   6  70  70  70
- 90  90  90 238 238 238 158 158 158   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  86  86  86   6   6   6 116 116 116
-106 106 106   6   6   6  70  70  70 149 149 149
-128 128 128  18  18  18  38  38  38  54  54  54
-221 221 221 106 106 106   2   2   6  14  14  14
- 46  46  46 190 190 190 198 198 198   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  94  94  94  14  14  14 101 101 101
-128 128 128   2   2   6  18  18  18 116 116 116
-118  98  46 121  92   8 121  92   8  98  78  10
-162 162 162 106 106 106   2   2   6   2   2   6
-  2   2   6 195 195 195 195 195 195   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   1
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  90  90  90  14  14  14  58  58  58
-210 210 210  26  26  26  54  38   6 154 114  10
-226 170  11 236 186  11 225 175  15 184 144  12
-215 174  15 175 146  61  37  26   9   2   2   6
- 70  70  70 246 246 246 138 138 138   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  66  66  66  26  26  26   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14  10  10  10
-195 195 195 188 164 115 192 133   9 225 175  15
-239 182  13 234 190  10 232 195  16 232 200  30
-245 207  45 241 208  19 232 195  16 184 144  12
-218 194 134 211 206 186  42  42  42   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  74  74  74  30  30  30   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  86  86  86  14  14  14   2   2   6
-121  87  25 192 133   9 219 162  10 239 182  13
-236 186  11 232 195  16 241 208  19 244 214  54
-246 218  60 246 218  38 246 215  20 241 208  19
-241 208  19 226 184  13 121  87  25   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  82  82  82  34  34  34  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  82  82  82  30  30  30  61  42   6
-180 123   7 206 145  10 230 174  11 239 182  13
-234 190  10 238 202  15 241 208  19 246 218  74
-246 218  38 246 215  20 246 215  20 246 215  20
-226 184  13 215 174  15 184 144  12   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 26  26  26  94  94  94  42  42  42  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  50  50  50 104  69   6
-192 133   9 216 158  10 236 178  12 236 186  11
-232 195  16 241 208  19 244 214  54 245 215  43
-246 215  20 246 215  20 241 208  19 198 155  10
-200 144  11 216 158  10 156 118  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  90  90  90  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  46  46  46  22  22  22
-137  92   6 210 162  10 239 182  13 238 190  10
-238 202  15 241 208  19 246 215  20 246 215  20
-241 208  19 203 166  17 185 133  11 210 150  10
-216 158  10 210 150  10 102  78  10   2   2   6
-  6   6   6  54  54  54  14  14  14   2   2   6
-  2   2   6  62  62  62  74  74  74  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  78  78  78  50  50  50   6   6   6
- 94  70  30 139 102  15 190 146  13 226 184  13
-232 200  30 232 195  16 215 174  15 190 146  13
-168 122  10 192 133   9 210 150  10 213 154  11
-202 150  34 182 157 106 101  98  89   2   2   6
-  2   2   6  78  78  78 116 116 116  58  58  58
-  2   2   6  22  22  22  90  90  90  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  50  50  50   6   6   6
-128 128 128 174 154 114 156 107  11 168 122  10
-198 155  10 184 144  12 197 138  11 200 144  11
-206 145  10 206 145  10 197 138  11 188 164 115
-195 195 195 198 198 198 174 174 174  14  14  14
-  2   2   6  22  22  22 116 116 116 116 116 116
- 22  22  22   2   2   6  74  74  74  70  70  70
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 101 101 101  26  26  26  10  10  10
-138 138 138 190 190 190 174 154 114 156 107  11
-197 138  11 200 144  11 197 138  11 192 133   9
-180 123   7 190 142  34 190 178 144 187 187 187
-202 202 202 221 221 221 214 214 214  66  66  66
-  2   2   6   2   2   6  50  50  50  62  62  62
-  6   6   6   2   2   6  10  10  10  90  90  90
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  34  34  34
- 74  74  74  74  74  74   2   2   6   6   6   6
-144 144 144 198 198 198 190 190 190 178 166 146
-154 121  60 156 107  11 156 107  11 168 124  44
-174 154 114 187 187 187 190 190 190 210 210 210
-246 246 246 253 253 253 253 253 253 182 182 182
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  62  62  62
- 74  74  74  34  34  34  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  22  22  22  54  54  54
- 94  94  94  18  18  18   2   2   6  46  46  46
-234 234 234 221 221 221 190 190 190 190 190 190
-190 190 190 187 187 187 187 187 187 190 190 190
-190 190 190 195 195 195 214 214 214 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
- 82  82  82   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 86  86  86  54  54  54  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  46  46  46  90  90  90
- 46  46  46  18  18  18   6   6   6 182 182 182
-253 253 253 246 246 246 206 206 206 190 190 190
-190 190 190 190 190 190 190 190 190 190 190 190
-206 206 206 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-202 202 202  14  14  14   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  86  86  86  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  38  38  38  74  74  74  66  66  66
-  2   2   6   6   6   6  90  90  90 250 250 250
-253 253 253 253 253 253 238 238 238 198 198 198
-190 190 190 190 190 190 195 195 195 221 221 221
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253  82  82  82   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  78  78  78  70  70  70  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  66  66  66  78  78  78   6   6   6
-  2   2   6  18  18  18 218 218 218 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-226 226 226 231 231 231 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 178 178 178   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  18  18  18  90  90  90  62  62  62
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 58  58  58  90  90  90  18  18  18   2   2   6
-  2   2   6 110 110 110 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231  18  18  18   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  94  94  94
- 54  54  54  26  26  26  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 90  90  90  26  26  26   2   2   6   2   2   6
- 14  14  14 195 195 195 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 242 242 242  54  54  54   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
- 86  86  86  50  50  50  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  38  38  38  82  82  82
- 34  34  34   2   2   6   2   2   6   2   2   6
- 42  42  42 195 195 195 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 242 242 242 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 246 246 246 238 238 238
-226 226 226 231 231 231 101 101 101   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 38  38  38  82  82  82  42  42  42  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  62  62  62  66  66  66
-  2   2   6   2   2   6   2   2   6   6   6   6
- 70  70  70 170 170 170 206 206 206 234 234 234
-246 246 246 250 250 250 250 250 250 238 238 238
-226 226 226 231 231 231 238 238 238 250 250 250
-250 250 250 250 250 250 246 246 246 231 231 231
-214 214 214 206 206 206 202 202 202 202 202 202
-198 198 198 202 202 202 182 182 182  18  18  18
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  62  62  62  66  66  66  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  82  82  82  18  18  18
-  2   2   6   2   2   6   2   2   6  10  10  10
- 94  94  94 182 182 182 218 218 218 242 242 242
-250 250 250 253 253 253 253 253 253 250 250 250
-234 234 234 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-238 238 238 226 226 226 210 210 210 202 202 202
-195 195 195 195 195 195 210 210 210 158 158 158
-  6   6   6  14  14  14  50  50  50  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  86  86  86  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54  70  70  70   2   2   6
-  2   2   6  10  10  10   2   2   6  22  22  22
-166 166 166 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-231 231 231 206 206 206 198 198 198 226 226 226
- 94  94  94   2   2   6   6   6   6  38  38  38
- 30  30  30   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  62  62  62  66  66  66
- 26  26  26  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74  50  50  50   2   2   6
- 26  26  26  26  26  26   2   2   6 106 106 106
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246 218 218 218 202 202 202
-210 210 210  14  14  14   2   2   6   2   2   6
- 30  30  30  22  22  22   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  86  86  86
- 42  42  42  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  90  90  90  22  22  22   2   2   6
- 42  42  42   2   2   6  18  18  18 218 218 218
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 221 221 221
-218 218 218 101 101 101   2   2   6  14  14  14
- 18  18  18  38  38  38  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 58  58  58  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  82  82  82   2   2   6  26  26  26
- 22  22  22   2   2   6 123 123 123 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-238 238 238 198 198 198   6   6   6  38  38  38
- 58  58  58  26  26  26  38  38  38   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
- 78  78  78  30  30  30  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  30  30  30
- 74  74  74  58  58  58   2   2   6  42  42  42
-  2   2   6  22  22  22 231 231 231 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246  46  46  46  38  38  38
- 42  42  42  14  14  14  38  38  38  14  14  14
-  2   2   6   2   2   6   2   2   6   6   6   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  42  42  42
- 90  90  90  18  18  18  18  18  18  26  26  26
-  2   2   6 116 116 116 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253  94  94  94   6   6   6
-  2   2   6   2   2   6  10  10  10  34  34  34
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  26  26  26  66  66  66
- 82  82  82   2   2   6  38  38  38   6   6   6
- 14  14  14 210 210 210 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 246 246 246 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 144 144 144   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  74  74  74  30  30  30  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  42  42  42  90  90  90
- 26  26  26   6   6   6  42  42  42   2   2   6
- 74  74  74 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 242 242 242 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 182 182 182   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 10  10  10  86  86  86  38  38  38  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  66  66  66  82  82  82
-  2   2   6  22  22  22  18  18  18   2   2   6
-149 149 149 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  86  86  86  46  46  46  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  46  46  46  86  86  86  18  18  18
-  2   2   6  34  34  34  10  10  10   6   6   6
-210 210 210 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 221 221 221   6   6   6
-  2   2   6   2   2   6   6   6   6  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 26  26  26  66  66  66  62  62  62   2   2   6
-  2   2   6  38  38  38  10  10  10  26  26  26
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231   6   6   6
-  2   2   6   2   2   6  10  10  10  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  78  78  78   6   6   6   2   2   6
-  2   2   6  46  46  46  14  14  14  42  42  42
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  10  10  10
-  2   2   6   2   2   6  22  22  22  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50  74  74  74   2   2   6   2   2   6
- 14  14  14  70  70  70  34  34  34  62  62  62
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  14  14  14
-  2   2   6   2   2   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  62  62  62   2   2   6   2   2   6
-  2   2   6  30  30  30  46  46  46  70  70  70
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 226 226 226  10  10  10
-  2   2   6   6   6   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58  62  62  62   2   2   6   2   2   6
-  2   2   6   2   2   6  30  30  30  78  78  78
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
- 22  22  22  34  34  34  18  14   6  22  22  22
- 26  26  26  18  18  18   6   6   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  26  26  26
- 62  62  62 106 106 106  74  54  14 185 133  11
-210 162  10 121  92   8   6   6   6  62  62  62
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 158 158 158  18  18  18
- 14  14  14   2   2   6   2   2   6   2   2   6
-  6   6   6  18  18  18  66  66  66  38  38  38
-  6   6   6  94  94  94  50  50  50  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 10  10  10  10  10  10  18  18  18  38  38  38
- 78  78  78 142 134 106 216 158  10 242 186  14
-246 190  14 246 190  14 156 118  10  10  10  10
- 90  90  90 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 230 190
-238 204  91 238 204  91 181 142  44  37  26   9
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  38  38  38  46  46  46
- 26  26  26 106 106 106  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  22  22  22
- 30  30  30  38  38  38  50  50  50  70  70  70
-106 106 106 190 142  34 226 170  11 242 186  14
-246 190  14 246 190  14 246 190  14 154 114  10
-  6   6   6  74  74  74 226 226 226 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 228 184  62
-241 196  14 241 208  19 232 195  16  38  30  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  30  30  30  26  26  26
-203 166  17 154 142  90  66  66  66  26  26  26
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  38  38  38  58  58  58
- 78  78  78  86  86  86 101 101 101 123 123 123
-175 146  61 210 150  10 234 174  13 246 186  14
-246 190  14 246 190  14 246 190  14 238 190  10
-102  78  10   2   2   6  46  46  46 198 198 198
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 224 178  62
-242 186  14 241 196  14 210 166  10  22  18   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6 121  92   8
-238 202  15 232 195  16  82  82  82  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  38  38  38  70  70  70 154 122  46
-190 142  34 200 144  11 197 138  11 197 138  11
-213 154  11 226 170  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-225 175  15  46  32   6   2   2   6  22  22  22
-158 158 158 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 242 242 242 224 178  62
-239 182  13 236 186  11 213 154  11  46  32   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 225 175  15
-238 190  10 236 186  11 112 100  78  42  42  42
- 14  14  14   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54 154 122  46 213 154  11
-226 170  11 230 174  11 226 170  11 226 170  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 184 144  12  10  10  10   2   2   6
-  6   6   6 116 116 116 242 242 242 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231 198 198 198 214 170  54
-236 178  12 236 178  12 210 150  10 137  92   6
- 18  14   6   2   2   6   2   2   6   2   2   6
-  6   6   6  70  47   6 200 144  11 236 178  12
-239 182  13 239 182  13 124 112  88  58  58  58
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  70  70  70 180 133  36 226 170  11
-239 182  13 242 186  14 242 186  14 246 186  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16  98  70   6   2   2   6
-  2   2   6   2   2   6  66  66  66 221 221 221
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 206 206 206 198 198 198 214 166  58
-230 174  11 230 174  11 216 158  10 192 133   9
-163 110   8 116  81   8 102  78  10 116  81   8
-167 114   7 197 138  11 226 170  11 239 182  13
-242 186  14 242 186  14 162 146  94  78  78  78
- 34  34  34  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78 190 142  34 226 170  11
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 241 196  14 203 166  17  22  18   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-218 218 218 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 206 206 206 198 198 198 202 162  69
-226 170  11 236 178  12 224 166  10 210 150  10
-200 144  11 197 138  11 192 133   9 197 138  11
-210 150  10 226 170  11 242 186  14 246 190  14
-246 190  14 246 186  14 225 175  15 124 112  88
- 62  62  62  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 174 135  50 224 166  10
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 139 102  15
-  2   2   6   2   2   6   2   2   6   2   2   6
- 78  78  78 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 214 214 214 198 198 198 190 150  46
-219 162  10 236 178  12 234 174  13 224 166  10
-216 158  10 213 154  11 213 154  11 216 158  10
-226 170  11 239 182  13 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 206 162  42
-101 101 101  58  58  58  30  30  30  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74 174 135  50 216 158  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 226 184  13
- 61  42   6   2   2   6   2   2   6   2   2   6
- 22  22  22 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 226 226 226 187 187 187 180 133  36
-216 158  10 236 178  12 239 182  13 236 178  12
-230 174  11 226 170  11 226 170  11 230 174  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 186  14 239 182  13
-206 162  42 106 106 106  66  66  66  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 213 154  11
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 241 196  14
-190 146  13  18  14   6   2   2   6   2   2   6
- 46  46  46 246 246 246 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 221 221 221  86  86  86 156 107  11
-216 158  10 236 178  12 242 186  14 246 186  14
-242 186  14 239 182  13 239 182  13 242 186  14
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 225 175  15 142 122  72  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 210 150  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-232 195  16 121  92   8  34  34  34 106 106 106
-221 221 221 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-242 242 242  82  82  82  18  14   6 163 110   8
-216 158  10 236 178  12 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 163 133  67
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 163 133  67 210 150  10
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 215 174  15 190 178 144 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 218 218 218
- 58  58  58   2   2   6  22  18   6 167 114   7
-216 158  10 236 178  12 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 186  14 242 186  14 190 150  46
- 54  54  54  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 38  38  38  86  86  86 180 133  36 213 154  11
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16 190 146  13 214 214 214
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 170 170 170  26  26  26
-  2   2   6   2   2   6  37  26   9 163 110   8
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 224 166  10 142 122  72
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 109 106  95 192 133   9 224 166  10
-242 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 226 184  13 210 162  10 142 110  46
-226 226 226 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-198 198 198  66  66  66   2   2   6   2   2   6
-  2   2   6   2   2   6  50  34   6 156 107  11
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 242 186  14
-234 174  13 213 154  11 154 122  46  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58 154 121  60 206 145  10 234 174  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 210 162  10 163 110   8
- 61  42   6 138 138 138 218 218 218 250 250 250
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 210 210 210 144 144 144  66  66  66
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 163 110   8
-216 158  10 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 239 182  13 230 174  11 216 158  10
-190 142  34 124 112  88  70  70  70  38  38  38
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 62  62  62 168 124  44 206 145  10 224 166  10
-236 178  12 239 182  13 242 186  14 242 186  14
-246 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 236 178  12 216 158  10 175 118   6
- 80  54   7   2   2   6   6   6   6  30  30  30
- 54  54  54  62  62  62  50  50  50  38  38  38
- 14  14  14   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 167 114   7
-213 154  11 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 242 186  14 239 182  13 239 182  13
-230 174  11 210 150  10 174 135  50 124 112  88
- 82  82  82  54  54  54  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 158 118  36 192 133   9 200 144  11
-216 158  10 219 162  10 224 166  10 226 170  11
-230 174  11 236 178  12 239 182  13 239 182  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 230 174  11 210 150  10 163 110   8
-104  69   6  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  91  60   6 167 114   7
-206 145  10 230 174  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 186  14 242 186  14
-239 182  13 230 174  11 224 166  10 213 154  11
-180 133  36 124 112  88  86  86  86  58  58  58
- 38  38  38  22  22  22  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  70  70  70 138 110  50 158 118  36
-167 114   7 180 123   7 192 133   9 197 138  11
-200 144  11 206 145  10 213 154  11 219 162  10
-224 166  10 230 174  11 239 182  13 242 186  14
-246 186  14 246 186  14 246 186  14 246 186  14
-239 182  13 216 158  10 185 133  11 152  99   6
-104  69   6  18  14   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 152  99   6
-192 133   9 219 162  10 236 178  12 239 182  13
-246 186  14 242 186  14 239 182  13 236 178  12
-224 166  10 206 145  10 192 133   9 154 121  60
- 94  94  94  62  62  62  42  42  42  22  22  22
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  34  34  34  58  58  58  78  78  78
-101  98  89 124 112  88 142 110  46 156 107  11
-163 110   8 167 114   7 175 118   6 180 123   7
-185 133  11 197 138  11 210 150  10 219 162  10
-226 170  11 236 178  12 236 178  12 234 174  13
-219 162  10 197 138  11 163 110   8 130  83   6
- 91  60   6  10  10  10   2   2   6   2   2   6
- 18  18  18  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  26  26  26   2   2   6
-  2   2   6   6   6   6  70  47   6 137  92   6
-175 118   6 200 144  11 219 162  10 230 174  11
-234 174  13 230 174  11 219 162  10 210 150  10
-192 133   9 163 110   8 124 112  88  82  82  82
- 50  50  50  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  22  22  22  34  34  34
- 42  42  42  58  58  58  74  74  74  86  86  86
-101  98  89 122 102  70 130  98  46 121  87  25
-137  92   6 152  99   6 163 110   8 180 123   7
-185 133  11 197 138  11 206 145  10 200 144  11
-180 123   7 156 107  11 130  83   6 104  69   6
- 50  34   6  54  54  54 110 110 110 101  98  89
- 86  86  86  82  82  82  78  78  78  78  78  78
- 78  78  78  78  78  78  78  78  78  78  78  78
- 78  78  78  82  82  82  86  86  86  94  94  94
-106 106 106 101 101 101  86  66  34 124  80   6
-156 107  11 180 123   7 192 133   9 200 144  11
-206 145  10 200 144  11 192 133   9 175 118   6
-139 102  15 109 106  95  70  70  70  42  42  42
- 22  22  22  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  10  10  10
- 14  14  14  22  22  22  30  30  30  38  38  38
- 50  50  50  62  62  62  74  74  74  90  90  90
-101  98  89 112 100  78 121  87  25 124  80   6
-137  92   6 152  99   6 152  99   6 152  99   6
-138  86   6 124  80   6  98  70   6  86  66  30
-101  98  89  82  82  82  58  58  58  46  46  46
- 38  38  38  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  38  38  38  42  42  42
- 54  54  54  82  82  82  94  86  76  91  60   6
-134  86   6 156 107  11 167 114   7 175 118   6
-175 118   6 167 114   7 152  99   6 121  87  25
-101  98  89  62  62  62  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6   6   6   6  10  10  10
- 18  18  18  22  22  22  30  30  30  42  42  42
- 50  50  50  66  66  66  86  86  86 101  98  89
-106  86  58  98  70   6 104  69   6 104  69   6
-104  69   6  91  60   6  82  62  34  90  90  90
- 62  62  62  38  38  38  22  22  22  14  14  14
- 10  10  10  10  10  10  10  10  10  10  10  10
- 10  10  10  10  10  10   6   6   6  10  10  10
- 10  10  10  10  10  10  10  10  10  14  14  14
- 22  22  22  42  42  42  70  70  70  89  81  66
- 80  54   7 104  69   6 124  80   6 137  92   6
-134  86   6 116  81   8 100  82  52  86  86  86
- 58  58  58  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 18  18  18  26  26  26  38  38  38  54  54  54
- 70  70  70  86  86  86  94  86  76  89  81  66
- 89  81  66  86  86  86  74  74  74  50  50  50
- 30  30  30  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  34  34  34  58  58  58
- 82  82  82  89  81  66  89  81  66  89  81  66
- 94  86  66  94  86  76  74  74  74  50  50  50
- 26  26  26  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  14  14  14  18  18  18
- 30  30  30  38  38  38  46  46  46  54  54  54
- 50  50  50  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  26  26  26
- 38  38  38  50  50  50  58  58  58  58  58  58
- 54  54  54  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
-  6   6   6  10  10  10  14  14  14  18  18  18
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  18  18  18  22  22  22  22  22  22
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  3 3 3  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  1 1 1  0 0 0
+0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  2 1 0  2 1 0  3 2 2
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  2 2 2  0 0 0  3 4 3  26 28 28
+37 38 37  37 38 37  14 17 19  2 2 2  0 0 0  2 2 2
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  3 3 3  0 0 0  1 1 1  6 6 6
+2 2 2  0 0 0  3 3 3  4 4 4  4 4 4  4 4 4
+4 4 5  3 3 3  1 0 0  0 0 0  1 0 0  0 0 0
+1 1 1  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+2 2 2  0 0 0  0 0 0  14 17 19  60 74 84  137 136 137
+153 152 153  137 136 137  125 124 125  60 73 81  6 6 6  3 1 0
+0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  0 0 0  4 4 4  41 54 63  125 124 125
+60 73 81  6 6 6  4 0 0  3 3 3  4 4 4  4 4 4
+4 4 4  0 0 0  6 9 11  41 54 63  41 65 82  22 30 35
+2 2 2  2 1 0  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  2 2 2  0 0 0
+4 0 0  6 6 6  41 54 63  137 136 137  174 174 174  167 166 167
+165 164 165  165 164 165  163 162 163  163 162 163  125 124 125  41 54 63
+1 1 1  0 0 0  0 0 0  3 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
+3 3 3  2 0 0  4 0 0  60 73 81  156 155 156  167 166 167
+163 162 163  85 115 134  5 7 8  0 0 0  4 4 4  5 5 5
+0 0 0  2 5 5  55 98 126  90 154 193  90 154 193  72 125 159
+37 51 59  2 0 0  1 1 1  4 5 5  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 4 4  1 1 1  0 0 0  3 3 3
+37 38 37  125 124 125  163 162 163  174 174 174  158 157 158  158 157 158
+156 155 156  156 155 156  158 157 158  165 164 165  174 174 174  166 165 166
+125 124 125  16 19 21  1 0 0  0 0 0  0 0 0  4 4 4
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  1 1 1
+0 0 0  0 0 0  37 38 37  153 152 153  174 174 174  158 157 158
+174 174 174  163 162 163  37 38 37  4 3 3  4 0 0  1 1 1
+0 0 0  22 40 52  101 161 196  101 161 196  90 154 193  101 161 196
+64 123 161  14 17 19  0 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+5 5 5  2 2 2  0 0 0  4 0 0  24 26 27  85 115 134
+156 155 156  174 174 174  167 166 167  156 155 156  154 153 154  157 156 157
+156 155 156  156 155 156  155 154 155  153 152 153  158 157 158  167 166 167
+174 174 174  156 155 156  60 74 84  16 19 21  0 0 0  0 0 0
+1 1 1  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  6 6 6  3 3 3  0 0 0  4 0 0
+13 16 17  60 73 81  137 136 137  165 164 165  156 155 156  153 152 153
+174 174 174  177 184 187  60 73 81  3 1 0  0 0 0  1 1 2
+22 30 35  64 123 161  136 185 209  90 154 193  90 154 193  90 154 193
+90 154 193  21 29 34  0 0 0  3 2 2  4 4 5  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  3 3 3
+0 0 0  0 0 0  10 13 16  60 74 84  157 156 157  174 174 174
+174 174 174  158 157 158  153 152 153  154 153 154  156 155 156  155 154 155
+156 155 156  155 154 155  154 153 154  157 156 157  154 153 154  153 152 153
+163 162 163  174 174 174  177 184 187  137 136 137  60 73 81  13 16 17
+4 0 0  0 0 0  3 3 3  5 5 5  4 4 4  4 4 4
+5 5 5  4 4 4  1 1 1  0 0 0  3 3 3  41 54 63
+131 129 131  174 174 174  174 174 174  174 174 174  167 166 167  174 174 174
+190 197 201  137 136 137  24 26 27  4 0 0  16 21 25  50 82 103
+90 154 193  136 185 209  90 154 193  101 161 196  101 161 196  101 161 196
+31 91 132  3 6 7  0 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  2 2 2  0 0 0  4 0 0
+4 0 0  43 57 68  137 136 137  177 184 187  174 174 174  163 162 163
+155 154 155  155 154 155  156 155 156  155 154 155  158 157 158  165 164 165
+167 166 167  166 165 166  163 162 163  157 156 157  155 154 155  155 154 155
+153 152 153  156 155 156  167 166 167  174 174 174  174 174 174  131 129 131
+41 54 63  5 5 5  0 0 0  0 0 0  3 3 3  4 4 4
+1 1 1  0 0 0  1 0 0  26 28 28  125 124 125  174 174 174
+177 184 187  174 174 174  174 174 174  156 155 156  131 129 131  137 136 137
+125 124 125  24 26 27  4 0 0  41 65 82  90 154 193  136 185 209
+136 185 209  101 161 196  53 118 160  37 112 160  90 154 193  34 86 122
+7 12 15  0 0 0  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  3 3 3  0 0 0  0 0 0  5 5 5  37 38 37
+125 124 125  167 166 167  174 174 174  167 166 167  158 157 158  155 154 155
+156 155 156  156 155 156  156 155 156  163 162 163  167 166 167  155 154 155
+137 136 137  153 152 153  156 155 156  165 164 165  163 162 163  156 155 156
+156 155 156  156 155 156  155 154 155  158 157 158  166 165 166  174 174 174
+167 166 167  125 124 125  37 38 37  1 0 0  0 0 0  0 0 0
+0 0 0  24 26 27  60 74 84  158 157 158  174 174 174  174 174 174
+166 165 166  158 157 158  125 124 125  41 54 63  13 16 17  6 6 6
+6 6 6  37 38 37  80 127 157  136 185 209  101 161 196  101 161 196
+90 154 193  28 67 93  6 10 14  13 20 25  13 20 25  6 10 14
+1 1 2  4 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+1 1 1  1 0 0  4 3 3  37 38 37  60 74 84  153 152 153
+167 166 167  167 166 167  158 157 158  154 153 154  155 154 155  156 155 156
+157 156 157  158 157 158  167 166 167  167 166 167  131 129 131  43 57 68
+26 28 28  37 38 37  60 73 81  131 129 131  165 164 165  166 165 166
+158 157 158  155 154 155  156 155 156  156 155 156  156 155 156  158 157 158
+165 164 165  174 174 174  163 162 163  60 74 84  16 19 21  13 16 17
+60 73 81  131 129 131  174 174 174  174 174 174  167 166 167  165 164 165
+137 136 137  60 73 81  24 26 27  4 0 0  4 0 0  16 19 21
+52 104 138  101 161 196  136 185 209  136 185 209  90 154 193  27 99 146
+13 20 25  4 5 7  2 5 5  4 5 7  1 1 2  0 0 0
+4 4 4  4 4 4  3 3 3  2 2 2  2 2 2  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  3 3 3  0 0 0
+0 0 0  13 16 17  60 73 81  137 136 137  174 174 174  166 165 166
+158 157 158  156 155 156  157 156 157  156 155 156  155 154 155  158 157 158
+167 166 167  174 174 174  153 152 153  60 73 81  16 19 21  4 0 0
+4 0 0  4 0 0  6 6 6  26 28 28  60 74 84  158 157 158
+174 174 174  166 165 166  157 156 157  155 154 155  156 155 156  156 155 156
+155 154 155  158 157 158  167 166 167  167 166 167  131 129 131  125 124 125
+137 136 137  167 166 167  167 166 167  174 174 174  158 157 158  125 124 125
+16 19 21  4 0 0  4 0 0  10 13 16  49 76 92  107 159 188
+136 185 209  136 185 209  90 154 193  26 108 161  22 40 52  6 10 14
+2 3 3  1 1 2  1 1 2  4 4 5  4 4 5  4 4 5
+4 4 5  2 2 1  0 0 0  0 0 0  0 0 0  2 2 2
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  3 3 3  0 0 0  1 0 0  4 0 0
+37 51 59  131 129 131  167 166 167  167 166 167  163 162 163  157 156 157
+157 156 157  155 154 155  153 152 153  157 156 157  167 166 167  174 174 174
+153 152 153  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
+4 3 3  4 3 3  4 0 0  6 6 6  4 0 0  37 38 37
+125 124 125  174 174 174  174 174 174  165 164 165  156 155 156  154 153 154
+156 155 156  156 155 156  155 154 155  163 162 163  158 157 158  163 162 163
+174 174 174  174 174 174  174 174 174  125 124 125  37 38 37  0 0 0
+4 0 0  6 9 11  41 54 63  90 154 193  136 185 209  146 190 211
+136 185 209  37 112 160  22 40 52  6 10 14  3 6 7  1 1 2
+1 1 2  3 3 3  1 1 2  3 3 3  4 4 4  4 4 4
+2 2 2  2 0 0  16 19 21  37 38 37  24 26 27  0 0 0
+0 0 0  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
+4 4 4  0 0 0  0 0 0  0 0 0  26 28 28  120 125 127
+158 157 158  174 174 174  165 164 165  157 156 157  155 154 155  156 155 156
+153 152 153  153 152 153  167 166 167  174 174 174  174 174 174  125 124 125
+37 38 37  4 0 0  0 0 0  4 0 0  4 3 3  4 4 4
+4 4 4  4 4 4  5 5 5  4 0 0  4 0 0  4 0 0
+4 3 3  43 57 68  137 136 137  174 174 174  174 174 174  165 164 165
+154 153 154  153 152 153  153 152 153  153 152 153  163 162 163  174 174 174
+174 174 174  153 152 153  60 73 81  6 6 6  4 0 0  4 3 3
+32 43 50  80 127 157  136 185 209  146 190 211  146 190 211  90 154 193
+28 67 93  28 67 93  40 71 93  3 6 7  1 1 2  2 5 5
+50 82 103  79 117 143  26 37 45  0 0 0  3 3 3  1 1 1
+0 0 0  41 54 63  137 136 137  174 174 174  153 152 153  60 73 81
+2 0 0  0 0 0
+4 4 4  4 4 4  4 4 4  4 4 4  6 6 6  2 2 2
+0 0 0  2 0 0  24 26 27  60 74 84  153 152 153  174 174 174
+174 174 174  157 156 157  154 153 154  156 155 156  154 153 154  153 152 153
+165 164 165  174 174 174  177 184 187  137 136 137  43 57 68  6 6 6
+4 0 0  2 0 0  3 3 3  5 5 5  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  6 6 6  4 3 3
+4 0 0  4 0 0  24 26 27  60 73 81  153 152 153  174 174 174
+174 174 174  158 157 158  158 157 158  174 174 174  174 174 174  158 157 158
+60 74 84  24 26 27  4 0 0  4 0 0  17 23 27  59 113 148
+136 185 209  191 222 234  146 190 211  136 185 209  31 91 132  7 11 13
+22 40 52  101 161 196  90 154 193  6 9 11  3 4 4  43 95 132
+136 185 209  172 205 220  55 98 126  0 0 0  0 0 0  2 0 0
+26 28 28  153 152 153  177 184 187  167 166 167  177 184 187  165 164 165
+37 38 37  0 0 0
+4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
+13 16 17  60 73 81  137 136 137  174 174 174  174 174 174  165 164 165
+153 152 153  153 152 153  155 154 155  154 153 154  158 157 158  174 174 174
+177 184 187  163 162 163  60 73 81  16 19 21  4 0 0  4 0 0
+4 3 3  4 4 4  5 5 5  5 5 5  4 4 4  5 5 5
+5 5 5  5 5 5  5 5 5  4 4 4  4 4 4  5 5 5
+6 6 6  4 0 0  4 0 0  4 0 0  24 26 27  60 74 84
+166 165 166  174 174 174  177 184 187  165 164 165  125 124 125  24 26 27
+4 0 0  4 0 0  5 5 5  50 82 103  136 185 209  172 205 220
+146 190 211  136 185 209  26 108 161  22 40 52  7 12 15  44 81 103
+71 116 144  28 67 93  37 51 59  41 65 82  100 139 164  101 161 196
+90 154 193  90 154 193  28 67 93  0 0 0  0 0 0  26 28 28
+125 124 125  167 166 167  163 162 163  153 152 153  163 162 163  174 174 174
+85 115 134  4 0 0
+4 4 4  5 5 5  4 4 4  1 0 0  4 0 0  34 47 55
+125 124 125  174 174 174  174 174 174  167 166 167  157 156 157  153 152 153
+155 154 155  155 154 155  158 157 158  166 165 166  167 166 167  154 153 154
+125 124 125  26 28 28  4 0 0  4 0 0  4 0 0  5 5 5
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  1 1 1
+0 0 0  0 0 0  1 1 1  4 4 4  4 4 4  4 4 4
+5 5 5  5 5 5  4 3 3  4 0 0  4 0 0  6 6 6
+37 38 37  131 129 131  137 136 137  37 38 37  0 0 0  4 0 0
+4 5 5  43 61 72  90 154 193  172 205 220  146 190 211  136 185 209
+90 154 193  28 67 93  13 20 25  43 61 72  71 116 144  44 81 103
+2 5 5  7 11 13  59 113 148  101 161 196  90 154 193  28 67 93
+13 20 25  6 10 14  0 0 0  13 16 17  60 73 81  137 136 137
+166 165 166  158 157 158  156 155 156  154 153 154  167 166 167  174 174 174
+60 73 81  4 0 0
+4 4 4  4 4 4  0 0 0  3 3 3  60 74 84  174 174 174
+174 174 174  167 166 167  163 162 163  155 154 155  157 156 157  155 154 155
+156 155 156  163 162 163  167 166 167  158 157 158  125 124 125  37 38 37
+4 3 3  4 0 0  4 0 0  6 6 6  6 6 6  5 5 5
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  2 3 3
+10 13 16  7 11 13  1 0 0  0 0 0  2 2 1  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  4 0 0
+4 0 0  7 11 13  13 16 17  4 0 0  3 3 3  34 47 55
+80 127 157  146 190 211  172 205 220  136 185 209  136 185 209  136 185 209
+28 67 93  22 40 52  55 98 126  55 98 126  21 29 34  7 11 13
+50 82 103  101 161 196  101 161 196  35 83 115  13 20 25  2 2 1
+1 1 2  1 1 2  37 51 59  131 129 131  174 174 174  174 174 174
+167 166 167  163 162 163  163 162 163  167 166 167  174 174 174  125 124 125
+16 19 21  4 0 0
+4 4 4  4 0 0  4 0 0  60 74 84  174 174 174  174 174 174
+158 157 158  155 154 155  155 154 155  156 155 156  155 154 155  158 157 158
+167 166 167  165 164 165  131 129 131  60 73 81  13 16 17  4 0 0
+4 0 0  4 3 3  6 6 6  4 3 3  5 5 5  4 4 4
+4 4 4  3 2 2  0 0 0  0 0 0  7 11 13  45 69 86
+80 127 157  71 116 144  43 61 72  7 11 13  0 0 0  1 1 1
+4 3 3  4 4 4  4 4 4  4 4 4  6 6 6  5 5 5
+3 2 2  4 0 0  1 0 0  21 29 34  59 113 148  136 185 209
+146 190 211  136 185 209  136 185 209  136 185 209  136 185 209  136 185 209
+68 124 159  44 81 103  22 40 52  13 16 17  43 61 72  90 154 193
+136 185 209  59 113 148  21 29 34  3 4 3  1 1 1  0 0 0
+24 26 27  125 124 125  163 162 163  174 174 174  166 165 166  165 164 165
+163 162 163  125 124 125  125 124 125  125 124 125  125 124 125  26 28 28
+4 0 0  4 3 3
+3 3 3  0 0 0  24 26 27  153 152 153  177 184 187  158 157 158
+156 155 156  156 155 156  155 154 155  155 154 155  165 164 165  174 174 174
+155 154 155  60 74 84  26 28 28  4 0 0  4 0 0  3 1 0
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 3 3
+2 0 0  0 0 0  0 0 0  32 43 50  72 125 159  101 161 196
+136 185 209  101 161 196  101 161 196  79 117 143  32 43 50  0 0 0
+0 0 0  2 2 2  4 4 4  4 4 4  3 3 3  1 0 0
+0 0 0  4 5 5  49 76 92  101 161 196  146 190 211  146 190 211
+136 185 209  136 185 209  136 185 209  136 185 209  136 185 209  90 154 193
+28 67 93  13 16 17  37 51 59  80 127 157  136 185 209  90 154 193
+22 40 52  6 9 11  3 4 3  2 2 1  16 19 21  60 73 81
+137 136 137  163 162 163  158 157 158  166 165 166  167 166 167  153 152 153
+60 74 84  37 38 37  6 6 6  13 16 17  4 0 0  1 0 0
+3 2 2  4 4 4
+3 2 2  4 0 0  37 38 37  137 136 137  167 166 167  158 157 158
+157 156 157  154 153 154  157 156 157  167 166 167  174 174 174  125 124 125
+37 38 37  4 0 0  4 0 0  4 0 0  4 3 3  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
+0 0 0  16 21 25  55 98 126  90 154 193  136 185 209  101 161 196
+101 161 196  101 161 196  136 185 209  136 185 209  101 161 196  55 98 126
+14 17 19  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+22 40 52  90 154 193  146 190 211  146 190 211  136 185 209  136 185 209
+136 185 209  136 185 209  136 185 209  101 161 196  35 83 115  7 11 13
+17 23 27  59 113 148  136 185 209  101 161 196  34 86 122  7 12 15
+2 5 5  3 4 3  6 6 6  60 73 81  131 129 131  163 162 163
+166 165 166  174 174 174  174 174 174  163 162 163  125 124 125  41 54 63
+13 16 17  4 0 0  4 0 0  4 0 0  1 0 0  2 2 2
+4 4 4  4 4 4
+1 1 1  2 1 0  43 57 68  137 136 137  153 152 153  153 152 153
+163 162 163  156 155 156  165 164 165  167 166 167  60 74 84  6 6 6
+4 0 0  4 0 0  5 5 5  4 4 4  4 4 4  4 4 4
+4 5 5  6 6 6  4 3 3  0 0 0  0 0 0  11 15 18
+40 71 93  100 139 164  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  136 185 209
+101 161 196  45 69 86  6 6 6  0 0 0  17 23 27  55 98 126
+136 185 209  146 190 211  136 185 209  136 185 209  136 185 209  136 185 209
+136 185 209  136 185 209  90 154 193  22 40 52  7 11 13  50 82 103
+136 185 209  136 185 209  53 118 160  22 40 52  7 11 13  2 5 5
+3 4 3  37 38 37  125 124 125  157 156 157  166 165 166  167 166 167
+174 174 174  174 174 174  137 136 137  60 73 81  4 0 0  4 0 0
+4 0 0  4 0 0  5 5 5  3 3 3  3 3 3  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  137 136 137  125 124 125  131 129 131
+155 154 155  167 166 167  174 174 174  60 74 84  6 6 6  4 0 0
+4 3 3  6 6 6  4 4 4  4 4 4  4 4 4  5 5 5
+4 4 4  1 1 1  0 0 0  3 6 7  41 65 82  72 125 159
+101 161 196  101 161 196  101 161 196  90 154 193  90 154 193  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
+136 185 209  136 185 209  80 127 157  55 98 126  101 161 196  146 190 211
+136 185 209  136 185 209  136 185 209  101 161 196  136 185 209  101 161 196
+136 185 209  101 161 196  35 83 115  22 30 35  101 161 196  172 205 220
+90 154 193  28 67 93  7 11 13  2 5 5  3 4 3  13 16 17
+85 115 134  167 166 167  174 174 174  174 174 174  174 174 174  174 174 174
+167 166 167  60 74 84  13 16 17  4 0 0  4 0 0  4 3 3
+6 6 6  5 5 5  4 4 4  5 5 5  4 4 4  5 5 5
+5 5 5  5 5 5
+1 1 1  4 0 0  41 54 63  137 136 137  137 136 137  125 124 125
+131 129 131  167 166 167  157 156 157  37 38 37  6 6 6  4 0 0
+6 6 6  5 5 5  4 4 4  4 4 4  4 5 5  2 2 1
+0 0 0  0 0 0  26 37 45  58 111 146  101 161 196  101 161 196
+101 161 196  90 154 193  90 154 193  90 154 193  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  136 185 209  136 185 209  136 185 209  146 190 211  136 185 209
+136 185 209  101 161 196  136 185 209  136 185 209  101 161 196  136 185 209
+101 161 196  136 185 209  136 185 209  136 185 209  136 185 209  16 89 141
+7 11 13  2 5 5  2 5 5  13 16 17  60 73 81  154 154 154
+174 174 174  174 174 174  174 174 174  174 174 174  163 162 163  125 124 125
+24 26 27  4 0 0  4 0 0  4 0 0  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
+5 5 5  4 4 4
+4 0 0  6 6 6  37 38 37  137 136 137  137 136 137  131 129 131
+131 129 131  153 152 153  131 129 131  26 28 28  4 0 0  4 3 3
+6 6 6  4 4 4  4 4 4  4 4 4  0 0 0  0 0 0
+13 20 25  51 88 114  90 154 193  101 161 196  101 161 196  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  101 161 196
+101 161 196  136 185 209  101 161 196  136 185 209  136 185 209  101 161 196
+136 185 209  101 161 196  136 185 209  101 161 196  101 161 196  101 161 196
+136 185 209  136 185 209  136 185 209  37 112 160  21 29 34  5 7 8
+2 5 5  13 16 17  43 57 68  131 129 131  174 174 174  174 174 174
+174 174 174  167 166 167  157 156 157  125 124 125  37 38 37  4 0 0
+4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  41 54 63  153 152 153  137 136 137  137 136 137
+137 136 137  153 152 153  125 124 125  24 26 27  4 0 0  3 2 2
+4 4 4  4 4 4  4 3 3  4 0 0  3 6 7  43 61 72
+64 123 161  101 161 196  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  101 161 196  90 154 193
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+136 185 209  101 161 196  101 161 196  136 185 209  136 185 209  101 161 196
+101 161 196  90 154 193  28 67 93  13 16 17  7 11 13  3 6 7
+37 51 59  125 124 125  163 162 163  174 174 174  167 166 167  166 165 166
+167 166 167  131 129 131  60 73 81  4 0 0  4 0 0  4 0 0
+3 3 3  5 5 5  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  137 136 137  153 152 153  137 136 137
+153 152 153  157 156 157  125 124 125  24 26 27  0 0 0  2 2 2
+4 4 4  4 4 4  2 0 0  0 0 0  28 67 93  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  64 123 161  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+90 154 193  101 161 196  101 161 196  101 161 196  90 154 193  136 185 209
+101 161 196  101 161 196  136 185 209  101 161 196  136 185 209  101 161 196
+101 161 196  101 161 196  136 185 209  101 161 196  101 161 196  90 154 193
+35 83 115  13 16 17  3 6 7  2 5 5  13 16 17  60 74 84
+154 154 154  166 165 166  165 164 165  158 157 158  163 162 163  157 156 157
+60 74 84  13 16 17  4 0 0  4 0 0  3 2 2  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  41 54 63  157 156 157  155 154 155  137 136 137
+153 152 153  158 157 158  137 136 137  26 28 28  2 0 0  2 2 2
+4 4 4  4 4 4  1 0 0  6 10 14  34 86 122  90 154 193
+64 123 161  90 154 193  64 123 161  90 154 193  90 154 193  90 154 193
+64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+136 185 209  101 161 196  136 185 209  90 154 193  26 108 161  22 40 52
+13 16 17  5 7 8  2 5 5  2 5 5  37 38 37  165 164 165
+174 174 174  163 162 163  154 154 154  165 164 165  167 166 167  60 73 81
+6 6 6  4 0 0  4 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  41 54 63  156 155 156  158 157 158  153 152 153
+156 155 156  165 164 165  137 136 137  26 28 28  0 0 0  2 2 2
+4 4 5  4 4 4  2 0 0  7 12 15  31 96 139  64 123 161
+90 154 193  64 123 161  90 154 193  90 154 193  64 123 161  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
+101 161 196  136 185 209  26 108 161  22 40 52  7 11 13  5 7 8
+2 5 5  2 5 5  2 5 5  2 2 1  37 38 37  158 157 158
+174 174 174  154 154 154  156 155 156  167 166 167  165 164 165  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  157 156 157  163 162 163  153 152 153
+158 157 158  167 166 167  137 136 137  26 28 28  2 0 0  2 2 2
+4 5 5  4 4 4  4 0 0  7 12 15  24 86 132  26 108 161
+37 112 160  64 123 161  90 154 193  64 123 161  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  101 161 196  90 154 193  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  136 185 209  101 161 196  136 185 209
+90 154 193  35 83 115  13 16 17  13 16 17  7 11 13  3 6 7
+5 7 8  6 6 6  3 4 3  2 2 1  30 32 34  154 154 154
+167 166 167  154 154 154  154 154 154  174 174 174  165 164 165  37 38 37
+6 6 6  4 0 0  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  163 162 163  166 165 166  154 154 154
+163 162 163  174 174 174  137 136 137  26 28 28  0 0 0  2 2 2
+4 5 5  4 4 5  1 1 2  6 10 14  28 67 93  18 97 151
+18 97 151  18 97 151  26 108 161  37 112 160  37 112 160  90 154 193
+64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+90 154 193  101 161 196  101 161 196  90 154 193  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  136 185 209  90 154 193  16 89 141
+13 20 25  7 11 13  5 7 8  5 7 8  2 5 5  4 5 5
+3 4 3  4 5 5  3 4 3  0 0 0  37 38 37  158 157 158
+174 174 174  158 157 158  158 157 158  167 166 167  174 174 174  41 54 63
+4 0 0  3 2 2  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  165 164 165  174 174 174  158 157 158
+167 166 167  174 174 174  153 152 153  26 28 28  2 0 0  2 2 2
+4 5 5  4 4 4  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
+26 108 161  37 112 160  53 118 160  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  136 185 209  90 154 193  26 108 161  22 40 52  13 16 17
+7 11 13  3 6 7  5 7 8  5 7 8  2 5 5  4 5 5
+4 5 5  6 6 6  3 4 3  0 0 0  30 32 34  158 157 158
+174 174 174  156 155 156  155 154 155  165 164 165  154 153 154  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  167 166 167  174 174 174  163 162 163
+174 174 174  174 174 174  153 152 153  26 28 28  0 0 0  3 3 3
+5 5 5  4 4 4  1 1 2  7 12 15  28 67 93  18 97 151
+18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+90 154 193  26 108 161  90 154 193  90 154 193  90 154 193  101 161 196
+101 161 196  26 108 161  22 40 52  13 16 17  7 11 13  2 5 5
+2 5 5  6 6 6  2 5 5  4 5 5  4 5 5  4 5 5
+3 4 3  5 5 5  3 4 3  2 0 0  30 32 34  137 136 137
+153 152 153  137 136 137  131 129 131  137 136 137  131 129 131  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  167 166 167  174 174 174  166 165 166
+174 174 174  177 184 187  153 152 153  30 32 34  1 0 0  3 3 3
+5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  90 154 193  90 154 193  26 108 161
+35 83 115  13 16 17  7 11 13  5 7 8  3 6 7  5 7 8
+2 5 5  6 6 6  4 5 5  4 5 5  3 4 3  4 5 5
+3 4 3  6 6 6  3 4 3  0 0 0  26 28 28  125 124 125
+131 129 131  125 124 125  125 124 125  131 129 131  131 129 131  37 38 37
+4 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  174 174 174  177 184 187  167 166 167
+174 174 174  177 184 187  153 152 153  30 32 34  0 0 0  3 3 3
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
+18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  90 154 193  26 108 161  26 108 161  24 86 132  13 20 25
+7 11 13  13 20 25  22 40 52  5 7 8  3 4 3  3 4 3
+4 5 5  3 4 3  4 5 5  3 4 3  4 5 5  3 4 3
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
+174 174 174  190 197 201  157 156 157  30 32 34  1 0 0  3 3 3
+5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  19 95 150  19 95 150  18 97 151  18 97 151  26 108 161
+18 97 151  26 108 161  26 108 161  26 108 161  26 108 161  90 154 193
+26 108 161  26 108 161  26 108 161  22 40 52  2 5 5  3 4 3
+28 67 93  37 112 160  34 86 122  2 5 5  3 4 3  3 4 3
+3 4 3  3 4 3  3 4 3  2 2 1  3 4 3  4 4 4
+4 5 5  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
+174 174 174  190 197 201  158 157 158  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
+10 87 144  19 95 150  19 95 150  18 97 151  18 97 151  18 97 151
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+18 97 151  22 40 52  2 5 5  2 2 1  22 40 52  26 108 161
+90 154 193  37 112 160  22 40 52  3 4 3  13 20 25  22 30 35
+3 6 7  1 1 1  2 2 2  6 9 11  5 5 5  4 3 3
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  177 184 187  193 200 203  174 174 174
+177 184 187  193 200 203  163 162 163  30 32 34  4 0 0  2 2 2
+5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
+10 87 144  10 87 144  19 95 150  19 95 150  19 95 150  18 97 151
+26 108 161  26 108 161  26 108 161  90 154 193  26 108 161  28 67 93
+6 10 14  2 5 5  13 20 25  24 86 132  37 112 160  90 154 193
+10 87 144  7 12 15  2 5 5  28 67 93  37 112 160  28 67 93
+2 2 1  7 12 15  35 83 115  28 67 93  3 6 7  1 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  174 174 174  190 197 201  174 174 174
+177 184 187  193 200 203  163 162 163  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  16 89 141  19 95 150  10 87 144  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  28 67 93  6 10 14  1 1 2
+7 12 15  28 67 93  26 108 161  16 89 141  24 86 132  21 29 34
+3 4 3  21 29 34  37 112 160  37 112 160  27 99 146  21 29 34
+21 29 34  26 108 161  90 154 193  35 83 115  1 1 2  2 0 0
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
+190 197 201  193 200 203  165 164 165  37 38 37  4 0 0  2 2 2
+5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
+10 87 144  10 87 144  16 89 141  18 97 151  18 97 151  10 87 144
+24 86 132  24 86 132  13 20 25  4 5 7  4 5 7  22 40 52
+18 97 151  37 112 160  26 108 161  7 12 15  1 1 1  0 0 0
+28 67 93  37 112 160  26 108 161  28 67 93  22 40 52  28 67 93
+26 108 161  90 154 193  26 108 161  10 87 144  0 0 0  2 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  193 200 203  174 174 174
+190 197 201  193 200 203  165 164 165  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  10 87 144  18 97 151  28 67 93  6 10 14
+0 0 0  1 1 2  4 5 7  13 20 25  16 89 141  26 108 161
+26 108 161  26 108 161  24 86 132  6 9 11  2 3 3  22 40 52
+37 112 160  16 89 141  22 40 52  28 67 93  26 108 161  26 108 161
+90 154 193  26 108 161  26 108 161  28 67 93  1 1 1  4 0 0
+4 4 4  5 5 5  3 3 3  4 0 0  26 28 28  124 126 130
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
+193 200 203  193 200 203  167 166 167  37 38 37  4 0 0  2 2 2
+5 5 5  4 4 4  4 0 0  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  13 20 25  4 5 7
+1 1 2  1 1 1  22 40 52  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  24 86 132  22 40 52  22 40 52
+22 40 52  22 40 52  10 87 144  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  90 154 193  10 87 144  0 0 0  4 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  205 212 215  167 166 167  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  22 40 52  1 1 2
+2 0 0  1 1 2  24 86 132  26 108 161  26 108 161  26 108 161
+26 108 161  19 95 150  16 89 141  10 87 144  22 40 52  22 40 52
+10 87 144  26 108 161  37 112 160  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  28 67 93  2 0 0  3 1 0
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
+193 200 203  193 200 203  174 174 174  37 38 37  4 0 0  2 2 2
+5 5 5  4 4 4  3 2 2  1 1 2  13 20 25  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  13 20 25
+13 20 25  22 40 52  10 87 144  18 97 151  18 97 151  26 108 161
+10 87 144  13 20 25  6 10 14  21 29 34  24 86 132  18 97 151
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  90 154 193  18 97 151  13 20 25  0 0 0  4 3 3
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  220 221 221  167 166 167  30 32 34  1 0 0  2 2 2
+5 5 5  4 4 4  4 4 5  2 5 5  4 5 7  13 20 25
+28 67 93  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  18 97 151  18 97 151
+28 67 93  2 3 3  0 0 0  28 67 93  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  10 87 144  13 20 25  1 1 2  3 2 2  4 4 4
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
+193 200 203  193 200 203  174 174 174  26 28 28  4 0 0  4 3 3
+5 5 5  4 4 4  4 4 4  4 4 5  1 1 2  2 5 5
+4 5 7  22 40 52  10 87 144  10 87 144  18 97 151  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  18 97 151
+10 87 144  28 67 93  22 40 52  10 87 144  26 108 161  18 97 151
+18 97 151  18 97 151  26 108 161  26 108 161  26 108 161  26 108 161
+22 40 52  1 1 2  0 0 0  2 3 3  4 4 4  4 4 4
+4 4 4  5 5 5  4 4 4  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  220 221 221  190 197 201  41 54 63  4 0 0  2 2 2
+6 6 6  4 4 4  4 4 4  4 4 5  4 4 5  3 3 3
+1 1 2  1 1 2  6 10 14  22 40 52  10 87 144  18 97 151
+18 97 151  10 87 144  10 87 144  10 87 144  18 97 151  10 87 144
+10 87 144  18 97 151  26 108 161  18 97 151  18 97 151  10 87 144
+26 108 161  26 108 161  26 108 161  10 87 144  28 67 93  6 10 14
+1 1 2  1 1 2  4 3 3  4 4 5  4 4 4  4 4 4
+5 5 5  5 5 5  1 1 1  4 0 0  37 51 59  137 136 137
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  193 200 203  174 174 174
+193 200 203  193 200 203  220 221 221  137 136 137  13 16 17  4 0 0
+2 2 2  4 4 4  4 4 4  4 4 4  4 4 4  4 4 5
+4 4 5  4 3 3  1 1 2  4 5 7  13 20 25  28 67 93
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
+10 87 144  18 97 151  18 97 151  10 87 144  18 97 151  26 108 161
+26 108 161  18 97 151  28 67 93  6 10 14  0 0 0  0 0 0
+2 3 3  4 5 5  4 4 5  4 4 4  4 4 4  5 5 5
+3 3 3  1 1 1  0 0 0  16 19 21  125 124 125  137 136 137
+131 129 131  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+193 200 203  190 197 201  220 221 221  220 221 221  153 152 153  30 32 34
+0 0 0  0 0 0  2 2 2  4 4 4  4 4 4  4 4 4
+4 4 4  4 5 5  4 5 7  1 1 2  1 1 2  4 5 7
+13 20 25  28 67 93  10 87 144  18 97 151  10 87 144  10 87 144
+10 87 144  10 87 144  10 87 144  18 97 151  26 108 161  18 97 151
+28 67 93  7 12 15  0 0 0  0 0 0  2 2 1  4 4 4
+4 5 5  4 5 5  4 4 4  4 4 4  3 3 3  0 0 0
+0 0 0  0 0 0  37 38 37  125 124 125  158 157 158  131 129 131
+125 124 125  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  4 0 0  41 54 63  193 200 203  220 221 221  174 174 174
+193 200 203  193 200 203  193 200 203  220 221 221  244 246 246  193 200 203
+120 125 127  5 5 5  1 0 0  0 0 0  1 1 1  4 4 4
+4 4 4  4 4 4  4 5 5  4 5 5  4 4 5  1 1 2
+4 5 7  4 5 7  22 40 52  10 87 144  10 87 144  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  10 87 144  13 20 25
+4 5 7  2 3 3  1 1 2  4 4 4  4 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 1 2
+24 26 27  60 74 84  153 152 153  163 162 163  137 136 137  125 124 125
+125 124 125  125 124 125  125 124 125  137 136 137  125 124 125  26 28 28
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  26 28 28  156 155 156  220 221 221  220 221 221
+174 174 174  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
+220 221 221  167 166 167  60 73 81  7 11 13  0 0 0  0 0 0
+3 3 3  4 4 4  4 4 4  4 4 4  4 4 5  4 4 5
+4 4 5  1 1 2  1 1 2  4 5 7  22 40 52  10 87 144
+10 87 144  10 87 144  10 87 144  22 40 52  4 5 7  1 1 2
+1 1 2  4 4 5  4 4 4  4 4 4  4 4 4  4 4 4
+5 5 5  2 2 2  0 0 0  4 0 0  16 19 21  60 73 81
+137 136 137  167 166 167  158 157 158  137 136 137  131 129 131  131 129 131
+125 124 125  125 124 125  131 129 131  155 154 155  60 74 84  5 7 8
+0 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 0 0  4 0 0  60 73 81  193 200 203  220 221 221
+193 200 203  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
+220 221 221  220 221 221  220 221 221  137 136 137  43 57 68  6 6 6
+4 0 0  1 1 1  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 5  4 4 5  3 2 2  1 1 2  2 5 5  13 20 25
+22 40 52  22 40 52  13 20 25  2 3 3  1 1 2  3 3 3
+4 5 7  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+1 1 1  0 0 0  2 3 3  41 54 63  131 129 131  166 165 166
+166 165 166  155 154 155  153 152 153  137 136 137  137 136 137  125 124 125
+125 124 125  137 136 137  137 136 137  125 124 125  37 38 37  4 3 3
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  6 6 6  6 6 6  13 16 17  60 73 81  167 166 167
+220 221 221  220 221 221  220 221 221  193 200 203  193 200 203  193 200 203
+205 212 215  220 221 221  220 221 221  244 246 246  205 212 215  125 124 125
+24 26 27  0 0 0  0 0 0  2 2 2  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 5  1 1 2  4 5 7
+4 5 7  4 5 7  1 1 2  3 2 2  4 4 5  4 4 4
+4 4 4  4 4 4  5 5 5  4 4 4  0 0 0  0 0 0
+2 0 0  26 28 28  125 124 125  174 174 174  174 174 174  166 165 166
+156 155 156  153 152 153  137 136 137  137 136 137  131 129 131  137 136 137
+137 136 137  137 136 137  60 74 84  30 32 34  4 0 0  4 0 0
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  6 6 6  4 0 0  4 0 0  6 6 6  26 28 28
+125 124 125  174 174 174  220 221 221  220 221 221  220 221 221  193 200 203
+205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
+193 200 203  60 74 84  13 16 17  4 0 0  0 0 0  3 3 3
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 5  3 3 3
+1 1 2  3 3 3  4 4 5  4 4 5  4 4 4  4 4 4
+5 5 5  5 5 5  2 2 2  0 0 0  0 0 0  13 16 17
+60 74 84  174 174 174  193 200 203  174 174 174  167 166 167  163 162 163
+153 152 153  153 152 153  137 136 137  137 136 137  153 152 153  137 136 137
+125 124 125  41 54 63  24 26 27  4 0 0  4 0 0  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+6 6 6  37 38 37  131 129 131  220 221 221  220 221 221  220 221 221
+193 200 203  193 200 203  220 221 221  205 212 215  220 221 221  244 246 246
+244 246 246  244 246 246  174 174 174  41 54 63  0 0 0  0 0 0
+0 0 0  4 4 4  5 5 5  5 5 5  4 4 4  4 4 5
+4 4 5  4 4 5  4 4 4  4 4 4  6 6 6  6 6 6
+3 3 3  0 0 0  2 0 0  13 16 17  60 73 81  156 155 156
+220 221 221  193 200 203  174 174 174  165 164 165  163 162 163  154 153 154
+153 152 153  153 152 153  158 157 158  163 162 163  137 136 137  60 73 81
+13 16 17  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  4 3 3  6 6 6  6 6 6  6 6 6
+6 6 6  6 6 6  6 6 6  37 38 37  167 166 167  244 246 246
+244 246 246  220 221 221  205 212 215  205 212 215  220 221 221  193 200 203
+220 221 221  244 246 246  244 246 246  244 246 246  137 136 137  37 38 37
+3 2 2  0 0 0  1 1 1  5 5 5  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 4 4  1 1 1
+0 0 0  5 5 5  43 57 68  153 152 153  193 200 203  220 221 221
+177 184 187  174 174 174  167 166 167  166 165 166  158 157 158  157 156 157
+158 157 158  166 165 166  156 155 156  85 115 134  13 16 17  4 0 0
+4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  6 6 6  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  6 6 6  6 6 6  13 16 17  60 73 81
+177 184 187  220 221 221  220 221 221  220 221 221  205 212 215  220 221 221
+220 221 221  205 212 215  220 221 221  244 246 246  244 246 246  205 212 215
+125 124 125  30 32 34  0 0 0  0 0 0  2 2 2  5 5 5
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 0 0
+37 38 37  131 129 131  205 212 215  220 221 221  193 200 203  174 174 174
+174 174 174  174 174 174  167 166 167  165 164 165  166 165 166  167 166 167
+158 157 158  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  4 3 3  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+26 28 28  125 124 125  205 212 215  220 221 221  220 221 221  220 221 221
+205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
+244 246 246  190 197 201  60 74 84  16 19 21  4 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  16 19 21  120 125 127
+177 184 187  220 221 221  205 212 215  177 184 187  174 174 174  177 184 187
+174 174 174  174 174 174  167 166 167  174 174 174  166 165 166  137 136 137
+60 73 81  13 16 17  4 0 0  4 0 0  4 3 3  6 6 6
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  5 5 5  4 3 3  6 6 6  4 0 0
+6 6 6  6 6 6  4 0 0  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  37 38 37  137 136 137  193 200 203  220 221 221
+220 221 221  205 212 215  220 221 221  205 212 215  205 212 215  220 221 221
+220 221 221  220 221 221  244 246 246  166 165 166  43 57 68  2 2 2
+0 0 0  4 0 0  16 19 21  60 73 81  157 156 157  202 210 214
+220 221 221  193 200 203  177 184 187  177 184 187  177 184 187  174 174 174
+174 174 174  174 174 174  174 174 174  157 156 157  60 74 84  24 26 27
+4 0 0  4 0 0  4 0 0  6 6 6  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  6 6 6  4 0 0
+4 0 0  4 0 0  6 6 6  24 26 27  60 73 81  167 166 167
+220 221 221  220 221 221  220 221 221  205 212 215  205 212 215  205 212 215
+205 212 215  220 221 221  220 221 221  220 221 221  205 212 215  137 136 137
+60 74 84  125 124 125  137 136 137  190 197 201  220 221 221  193 200 203
+177 184 187  177 184 187  177 184 187  174 174 174  174 174 174  177 184 187
+190 197 201  174 174 174  125 124 125  37 38 37  6 6 6  4 0 0
+4 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  4 3 3  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
+125 124 125  193 200 203  244 246 246  220 221 221  205 212 215  205 212 215
+205 212 215  193 200 203  205 212 215  205 212 215  220 221 221  220 221 221
+193 200 203  193 200 203  205 212 215  193 200 203  193 200 203  177 184 187
+190 197 201  190 197 201  174 174 174  190 197 201  193 200 203  190 197 201
+153 152 153  60 73 81  4 0 0  4 0 0  4 0 0  3 2 2
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
+6 6 6  4 3 3  4 3 3  4 3 3  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  4 0 0
+4 0 0  26 28 28  131 129 131  220 221 221  244 246 246  220 221 221
+205 212 215  193 200 203  205 212 215  193 200 203  193 200 203  205 212 215
+220 221 221  193 200 203  193 200 203  193 200 203  190 197 201  174 174 174
+174 174 174  190 197 201  193 200 203  193 200 203  167 166 167  125 124 125
+6 6 6  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+5 5 5  4 3 3  5 5 5  6 6 6  4 3 3  5 5 5
+6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+4 0 0  4 0 0  6 6 6  41 54 63  158 157 158  220 221 221
+220 221 221  220 221 221  193 200 203  193 200 203  193 200 203  190 197 201
+190 197 201  190 197 201  190 197 201  190 197 201  174 174 174  193 200 203
+193 200 203  220 221 221  174 174 174  125 124 125  37 38 37  4 0 0
+4 0 0  4 3 3  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  4 3 3  4 3 3  5 5 5
+4 3 3  6 6 6  5 5 5  4 3 3  6 6 6  6 6 6
+6 6 6  6 6 6  4 0 0  4 0 0  13 16 17  60 73 81
+174 174 174  220 221 221  220 221 221  205 212 215  190 197 201  174 174 174
+193 200 203  174 174 174  190 197 201  174 174 174  193 200 203  220 221 221
+193 200 203  131 129 131  37 38 37  6 6 6  4 0 0  4 0 0
+6 6 6  6 6 6  4 3 3  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
+5 5 5  4 3 3  4 3 3  5 5 5  4 3 3  4 3 3
+5 5 5  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
+6 6 6  125 124 125  174 174 174  220 221 221  220 221 221  193 200 203
+193 200 203  193 200 203  193 200 203  193 200 203  220 221 221  158 157 158
+60 73 81  6 6 6  4 0 0  4 0 0  5 5 5  6 6 6
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
+5 5 5  5 5 5  6 6 6  6 6 6  4 0 0  4 0 0
+4 0 0  4 0 0  26 28 28  125 124 125  174 174 174  193 200 203
+193 200 203  174 174 174  193 200 203  167 166 167  125 124 125  6 6 6
+6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  5 5 5
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+4 3 3  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+6 6 6  4 0 0  4 0 0  6 6 6  37 38 37  125 124 125
+153 152 153  131 129 131  125 124 125  37 38 37  6 6 6  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  4 3 3  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
+6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
+24 26 27  24 26 27  6 6 6  6 6 6  6 6 6  4 0 0
+6 6 6  6 6 6  4 0 0  6 6 6  5 5 5  4 3 3
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  4 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  5 5 5
+5 5 5  5 5 5  4 0 0  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  4 0 0
+6 6 6  4 3 3  5 5 5  4 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+4 3 3  6 6 6  4 3 3  6 6 6  6 6 6  6 6 6
+4 0 0  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+6 6 6  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  5 5 5  4 0 0  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  4 0 0  6 6 6
+4 3 3  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  5 5 5  6 6 6  4 3 3
+4 3 3  6 6 6  6 6 6  4 3 3  6 6 6  4 3 3
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  6 6 6
+5 5 5  4 3 3  4 3 3  4 3 3  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
+5 5 5  4 3 3  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
index fef20dbc6a5c2dfada86792f4eea7e7a631085e8..d28b1aba63bb8e9c151055a9b1fe8a2e69fa42bb 100644 (file)
@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
 static int xsd_kva_open(struct inode *inode, struct file *file)
 {
        file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+                                              NULL);
+#else
                                               xen_store_interface);
+#endif
+
        if (!file->private_data)
                return -ENOMEM;
        return 0;
index eb14e055ea83e8509e7ea6ae569e3c1966d3b896..5156de7df23f71802a29c34212f2ae5831286b74 100644 (file)
@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
 
        retval = v9fs_file_write_internal(inode,
                                          v9inode->writeback_fid,
-                                         (__force const char __user *)buffer,
+                                         (const char __force_user *)buffer,
                                          len, &offset, 0);
        if (retval > 0)
                retval = 0;
index 9ee5343d48849d85c1404538ce3f5433ad460d6a..5165e3c763e83aed5e1d07e33a0d7dc1bcf1746a 100644 (file)
@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 void
 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
-       char *s = nd_get_link(nd);
+       const char *s = nd_get_link(nd);
 
        p9_debug(P9_DEBUG_VFS, " %pd %s\n",
                 dentry, IS_ERR(s) ? "<error>" : s);
index c055d56ec63d04e1773909b7291627d8ee2b3193..a46f4f5fbd5787360e8d560b2b826d28c8ca9105 100644 (file)
@@ -106,7 +106,7 @@ config HAVE_AOUT
 
 config BINFMT_AOUT
        tristate "Kernel support for a.out and ECOFF binaries"
-       depends on HAVE_AOUT
+       depends on HAVE_AOUT && BROKEN
        ---help---
          A.out (Assembler.OUTput) is a set of formats for libraries and
          executables used in the earliest versions of UNIX.  Linux used
index 8a1d38ef0fc203678883493a672c0f638f94a107..300a14ebf2e1842de9e18730d47421ce60775461 100644 (file)
@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
        struct afs_vnode *vnode;
        struct super_block *sb;
        struct inode *inode;
-       static atomic_t afs_autocell_ino;
+       static atomic_unchecked_t afs_autocell_ino;
 
        _enter("{%x:%u},%*.*s,",
               AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
        data.fid.unique = 0;
        data.fid.vnode = 0;
 
-       inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
+       inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
                             afs_iget5_autocell_test, afs_iget5_set,
                             &data);
        if (!inode) {
index c428871f10934a87aa4b5a7038b8c0519b6bc80a..3f3041baf8174a8a37029d1f743a2dab292f8492 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
        size += sizeof(struct io_event) * nr_events;
 
        nr_pages = PFN_UP(size);
-       if (nr_pages < 0)
+       if (nr_pages <= 0)
                return -EINVAL;
 
        file = aio_private_file(ctx, nr_pages);
index 6530ced19697d49a9189fa289c9112187448fee3..4a827e2d882c1db7b50da7c6c3cd8003a20d8a06 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
                unsigned long limit;
 
                limit = rlimit(RLIMIT_FSIZE);
+               gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
                if (limit != RLIM_INFINITY && offset > limit)
                        goto out_sig;
                if (offset > inode->i_sb->s_maxbytes)
index 116fd38ee472c74f243c05428969c2bd1bbe85e2..c04182dacb69e07ea27ea8dd5ef294c8c5a29d21 100644 (file)
@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
 {
        unsigned long sigpipe, flags;
        mm_segment_t fs;
-       const char *data = (const char *)addr;
+       const char __user *data = (const char __force_user *)addr;
        ssize_t wr = 0;
 
        sigpipe = sigismember(&current->pending.signal, SIGPIPE);
@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
        return 1;
 }
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
+#endif
+
 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
                enum autofs_notify notify)
 {
@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
 
        /* If this is a direct mount request create a dummy name */
        if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+               /* this name does get written to userland via autofs4_write() */
+               qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
+#else
                qstr.len = sprintf(name, "%p", dentry);
+#endif
        else {
                qstr.len = autofs4_getpath(sbi, dentry, &name);
                if (!qstr.len) {
index 27223878ba9fc9b9ba354a887837653ecdd170f4..56059b57661de2ce3b49a6fe3849a08b7eede9ef 100644 (file)
@@ -11,7 +11,7 @@
 
 #include <asm/byteorder.h>
 
-static inline u64
+static inline u64 __intentional_overflow(-1)
 fs64_to_cpu(const struct super_block *sb, fs64 n)
 {
        if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
                return (__force fs64)cpu_to_be64(n);
 }
 
-static inline u32
+static inline u32 __intentional_overflow(-1)
 fs32_to_cpu(const struct super_block *sb, fs32 n)
 {
        if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
                return (__force fs32)cpu_to_be32(n);
 }
 
-static inline u16
+static inline u16 __intentional_overflow(-1)
 fs16_to_cpu(const struct super_block *sb, fs16 n)
 {
        if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
index 4c556680fa749fa388710eb09b7b822e07a6c977..eeae15071ec95d6153bb8e0cce8ceca78faaeb9d 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 #include <linux/fs.h>
 #include <linux/file.h>
+#include <linux/security.h>
 #include <linux/stat.h>
 #include <linux/fcntl.h>
 #include <linux/ptrace.h>
@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
 #endif
 #       define START_STACK(u)   ((void __user *)u.start_stack)
 
+       memset(&dump, 0, sizeof(dump));
+
        fs = get_fs();
        set_fs(KERNEL_DS);
        has_dumped = 1;
@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
 
 /* If the size of the dump file exceeds the rlimit, then see what would happen
    if we wrote the stack, but not the data area.  */
+       gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
        if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
                dump.u_dsize = 0;
 
 /* Make sure we have enough room to write the stack and data areas. */
+       gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
        if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
                dump.u_ssize = 0;
 
@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
        rlim = rlimit(RLIMIT_DATA);
        if (rlim >= RLIM_INFINITY)
                rlim = ~0;
+
+       gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
        if (ex.a_data + ex.a_bss > rlim)
                return -ENOMEM;
 
@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
 
        install_exec_creds(bprm);
 
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+       current->mm->pax_flags = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
+               current->mm->pax_flags |= MF_PAX_PAGEEXEC;
+
+#ifdef CONFIG_PAX_EMUTRAMP
+               if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
+                       current->mm->pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+               if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
+                       current->mm->pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+       }
+#endif
+
        if (N_MAGIC(ex) == OMAGIC) {
                unsigned long text_addr, map_size;
                loff_t pos;
@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
                        return error;
 
                error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
-                               PROT_READ | PROT_WRITE | PROT_EXEC,
+                               PROT_READ | PROT_WRITE,
                                MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
                                fd_offset + ex.a_text);
                if (error != N_DATADDR(ex))
index 995986b8e36b8f3fd8529582c50e545d9b26322e..dcc4ef201d9378e45dee37a7e3bda8b5c6ef0fe0 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/utsname.h>
 #include <linux/coredump.h>
 #include <linux/sched.h>
+#include <linux/xattr.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
@@ -47,7 +48,7 @@
 
 static int load_elf_binary(struct linux_binprm *bprm);
 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
-                               int, int, unsigned long);
+                               int, int, unsigned long) __intentional_overflow(-1);
 
 #ifdef CONFIG_USELIB
 static int load_elf_library(struct file *);
@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
 #define elf_core_dump  NULL
 #endif
 
+#ifdef CONFIG_PAX_MPROTECT
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+static void elf_handle_mmap(struct file *file);
+#endif
+
 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
 #define ELF_MIN_ALIGN  ELF_EXEC_PAGESIZE
 #else
@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
        .load_binary    = load_elf_binary,
        .load_shlib     = load_elf_library,
        .core_dump      = elf_core_dump,
+
+#ifdef CONFIG_PAX_MPROTECT
+       .handle_mprotect= elf_handle_mprotect,
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+       .handle_mmap    = elf_handle_mmap,
+#endif
+
        .min_coredump   = ELF_EXEC_PAGESIZE,
 };
 
@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
 
 static int set_brk(unsigned long start, unsigned long end)
 {
+       unsigned long e = end;
+
        start = ELF_PAGEALIGN(start);
        end = ELF_PAGEALIGN(end);
        if (end > start) {
@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
                if (BAD_ADDR(addr))
                        return addr;
        }
-       current->mm->start_brk = current->mm->brk = end;
+       current->mm->start_brk = current->mm->brk = e;
        return 0;
 }
 
@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        elf_addr_t __user *u_rand_bytes;
        const char *k_platform = ELF_PLATFORM;
        const char *k_base_platform = ELF_BASE_PLATFORM;
-       unsigned char k_rand_bytes[16];
+       u32 k_rand_bytes[4];
        int items;
        elf_addr_t *elf_info;
        int ei_index = 0;
        const struct cred *cred = current_cred();
        struct vm_area_struct *vma;
+       unsigned long saved_auxv[AT_VECTOR_SIZE];
 
        /*
         * In some cases (e.g. Hyper-Threading), we want to avoid L1
@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
         * Generate 16 random bytes for userspace PRNG seeding.
         */
        get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
-       u_rand_bytes = (elf_addr_t __user *)
-                      STACK_ALLOC(p, sizeof(k_rand_bytes));
+       prandom_seed(k_rand_bytes[0] ^ prandom_u32());
+       prandom_seed(k_rand_bytes[1] ^ prandom_u32());
+       prandom_seed(k_rand_bytes[2] ^ prandom_u32());
+       prandom_seed(k_rand_bytes[3] ^ prandom_u32());
+       p = STACK_ROUND(p, sizeof(k_rand_bytes));
+       u_rand_bytes = (elf_addr_t __user *) p;
        if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
                return -EFAULT;
 
@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
                return -EFAULT;
        current->mm->env_end = p;
 
+       memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
+
        /* Put the elf_info on the stack in the right place.  */
        sp = (elf_addr_t __user *)envp + 1;
-       if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+       if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
                return -EFAULT;
        return 0;
 }
@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
    an ELF header */
 
 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
-               struct file *interpreter, unsigned long *interp_map_addr,
+               struct file *interpreter,
                unsigned long no_base, struct elf_phdr *interp_elf_phdata)
 {
        struct elf_phdr *eppnt;
-       unsigned long load_addr = 0;
+       unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
        int load_addr_set = 0;
        unsigned long last_bss = 0, elf_bss = 0;
-       unsigned long error = ~0UL;
+       unsigned long error = -EINVAL;
        unsigned long total_size;
        int i;
 
@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                goto out;
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
        eppnt = interp_elf_phdata;
        for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
                if (eppnt->p_type == PT_LOAD) {
@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                        map_addr = elf_map(interpreter, load_addr + vaddr,
                                        eppnt, elf_prot, elf_type, total_size);
                        total_size = 0;
-                       if (!*interp_map_addr)
-                               *interp_map_addr = map_addr;
                        error = map_addr;
                        if (BAD_ADDR(map_addr))
                                goto out;
@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                        k = load_addr + eppnt->p_vaddr;
                        if (BAD_ADDR(k) ||
                            eppnt->p_filesz > eppnt->p_memsz ||
-                           eppnt->p_memsz > TASK_SIZE ||
-                           TASK_SIZE - eppnt->p_memsz < k) {
+                           eppnt->p_memsz > pax_task_size ||
+                           pax_task_size - eppnt->p_memsz < k) {
                                error = -ENOMEM;
                                goto out;
                        }
@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
                elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
 
                /* Map the last of the bss segment */
-               error = vm_brk(elf_bss, last_bss - elf_bss);
-               if (BAD_ADDR(error))
-                       goto out;
+               if (last_bss > elf_bss) {
+                       error = vm_brk(elf_bss, last_bss - elf_bss);
+                       if (BAD_ADDR(error))
+                               goto out;
+               }
        }
 
        error = load_addr;
@@ -634,6 +666,336 @@ out:
        return error;
 }
 
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+#ifdef CONFIG_PAX_SOFTMODE
+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
+{
+       unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (elf_phdata->p_flags & PF_PAGEEXEC)
+               pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (elf_phdata->p_flags & PF_SEGMEXEC)
+               pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+       if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
+               pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       if (elf_phdata->p_flags & PF_MPROTECT)
+               pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+       if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+       return pax_flags;
+}
+#endif
+
+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
+{
+       unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
+               pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
+               pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+       if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
+               pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       if (!(elf_phdata->p_flags & PF_NOMPROTECT))
+               pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+       if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+       return pax_flags;
+}
+#endif
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+#ifdef CONFIG_PAX_SOFTMODE
+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
+{
+       unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (pax_flags_softmode & MF_PAX_PAGEEXEC)
+               pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (pax_flags_softmode & MF_PAX_SEGMEXEC)
+               pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+       if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
+               pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       if (pax_flags_softmode & MF_PAX_MPROTECT)
+               pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+       if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+       return pax_flags;
+}
+#endif
+
+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
+{
+       unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
+               pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
+               pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+       if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
+               pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
+               pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+       if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+       return pax_flags;
+}
+#endif
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static unsigned long pax_parse_defaults(void)
+{
+       unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_SOFTMODE
+       if (pax_softmode)
+               return pax_flags;
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (randomize_va_space)
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+       return pax_flags;
+}
+
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+       unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_EI_PAX
+
+#ifdef CONFIG_PAX_SOFTMODE
+       if (pax_softmode)
+               return pax_flags;
+#endif
+
+       pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+       if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
+               pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
+               pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
+               pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
+               pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+       if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
+               pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+#endif
+
+       return pax_flags;
+
+}
+
+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
+{
+
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+       unsigned long i;
+
+       for (i = 0UL; i < elf_ex->e_phnum; i++)
+               if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
+                       if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
+                           ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
+                           ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
+                           ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
+                           ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
+                               return PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_SOFTMODE
+                       if (pax_softmode)
+                               return pax_parse_pt_pax_softmode(&elf_phdata[i]);
+                       else
+#endif
+
+                               return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
+                       break;
+               }
+#endif
+
+       return PAX_PARSE_FLAGS_FALLBACK;
+}
+
+static unsigned long pax_parse_xattr_pax(struct file * const file)
+{
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+       ssize_t xattr_size, i;
+       unsigned char xattr_value[sizeof("pemrs") - 1];
+       unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
+
+       xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
+       if (xattr_size < 0 || xattr_size > sizeof xattr_value)
+               return PAX_PARSE_FLAGS_FALLBACK;
+
+       for (i = 0; i < xattr_size; i++)
+               switch (xattr_value[i]) {
+               default:
+                       return PAX_PARSE_FLAGS_FALLBACK;
+
+#define parse_flag(option1, option2, flag)                     \
+               case option1:                                   \
+                       if (pax_flags_hardmode & MF_PAX_##flag) \
+                               return PAX_PARSE_FLAGS_FALLBACK;\
+                       pax_flags_hardmode |= MF_PAX_##flag;    \
+                       break;                                  \
+               case option2:                                   \
+                       if (pax_flags_softmode & MF_PAX_##flag) \
+                               return PAX_PARSE_FLAGS_FALLBACK;\
+                       pax_flags_softmode |= MF_PAX_##flag;    \
+                       break;
+
+               parse_flag('p', 'P', PAGEEXEC);
+               parse_flag('e', 'E', EMUTRAMP);
+               parse_flag('m', 'M', MPROTECT);
+               parse_flag('r', 'R', RANDMMAP);
+               parse_flag('s', 'S', SEGMEXEC);
+
+#undef parse_flag
+               }
+
+       if (pax_flags_hardmode & pax_flags_softmode)
+               return PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_SOFTMODE
+       if (pax_softmode)
+               return pax_parse_xattr_pax_softmode(pax_flags_softmode);
+       else
+#endif
+
+               return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
+#else
+       return PAX_PARSE_FLAGS_FALLBACK;
+#endif
+
+}
+
+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
+{
+       unsigned long pax_flags, ei_pax_flags,  pt_pax_flags, xattr_pax_flags;
+
+       pax_flags = pax_parse_defaults();
+       ei_pax_flags = pax_parse_ei_pax(elf_ex);
+       pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
+       xattr_pax_flags = pax_parse_xattr_pax(file);
+
+       if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+           xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+           pt_pax_flags != xattr_pax_flags)
+               return -EINVAL;
+       if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+               pax_flags = xattr_pax_flags;
+       else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+               pax_flags = pt_pax_flags;
+       else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+               pax_flags = ei_pax_flags;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
+       if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+               if ((__supported_pte_mask & _PAGE_NX))
+                       pax_flags &= ~MF_PAX_SEGMEXEC;
+               else
+                       pax_flags &= ~MF_PAX_PAGEEXEC;
+       }
+#endif
+
+       if (0 > pax_check_flags(&pax_flags))
+               return -EINVAL;
+
+       current->mm->pax_flags = pax_flags;
+       return 0;
+}
+#endif
+
 /*
  * These are the functions used to load ELF style executables and shared
  * libraries.  There is no binary dependent code anywhere else.
@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
 {
        unsigned long random_variable = 0;
 
+#ifdef CONFIG_PAX_RANDUSTACK
+       if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+               return stack_top - current->mm->delta_stack;
+#endif
+
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
                random_variable = (unsigned long) get_random_int();
@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
        unsigned long load_addr = 0, load_bias = 0;
        int load_addr_set = 0;
        char * elf_interpreter = NULL;
-       unsigned long error;
+       unsigned long error = 0;
        struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
        unsigned long elf_bss, elf_brk;
        int retval, i;
@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                struct elfhdr interp_elf_ex;
        } *loc;
        struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
+       unsigned long pax_task_size;
 
        loc = kmalloc(sizeof(*loc), GFP_KERNEL);
        if (!loc) {
@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
        /* Do this immediately, since STACK_TOP as used in setup_arg_pages
           may depend on the personality.  */
        SET_PERSONALITY2(loc->elf_ex, &arch_state);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+       current->mm->pax_flags = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_DLRESOLVE
+       current->mm->call_dl_resolve = 0UL;
+#endif
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
+       current->mm->call_syscall = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+       current->mm->delta_mmap = 0UL;
+       current->mm->delta_stack = 0UL;
+#endif
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+       if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+               send_sig(SIGKILL, current, 0);
+               goto out_free_dentry;
+       }
+#endif
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+       pax_set_initial_flags(bprm);
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
+       if (pax_set_initial_flags_func)
+               (pax_set_initial_flags_func)(bprm);
+#endif
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+       if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
+               current->mm->context.user_cs_limit = PAGE_SIZE;
+               current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
+       }
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+               current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
+               current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+               current->mm->def_flags |= VM_NOHUGEPAGE;
+       } else
+#endif
+
+       pax_task_size = TASK_SIZE;
+
+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
+       if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+               set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
+               put_cpu();
+       }
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+       if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
+               current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
+               current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
+       }
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+       if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+               executable_stack = EXSTACK_DISABLE_X;
+               current->personality &= ~READ_IMPLIES_EXEC;
+       } else
+#endif
+
        if (elf_read_implies_exec(loc->elf_ex, executable_stack))
                current->personality |= READ_IMPLIES_EXEC;
 
@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
 #else
                        load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+                       /* PaX: randomize base address at the default exe base if requested */
+                       if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
+#ifdef CONFIG_SPARC64
+                               load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
+#else
+                               load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
+#endif
+                               load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
+                               elf_flags |= MAP_FIXED;
+                       }
+#endif
+
                }
 
                error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
                 * allowed task size. Note that p_filesz must always be
                 * <= p_memsz so it is only necessary to check p_memsz.
                 */
-               if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
-                   elf_ppnt->p_memsz > TASK_SIZE ||
-                   TASK_SIZE - elf_ppnt->p_memsz < k) {
+               if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+                   elf_ppnt->p_memsz > pax_task_size ||
+                   pax_task_size - elf_ppnt->p_memsz < k) {
                        /* set_brk can never work. Avoid overflows. */
                        retval = -EINVAL;
                        goto out_free_dentry;
@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
        if (retval)
                goto out_free_dentry;
        if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
-               retval = -EFAULT; /* Nobody gets to see this, but.. */
-               goto out_free_dentry;
+               /*
+                * This bss-zeroing can fail if the ELF
+                * file specifies odd protections. So
+                * we don't check the return value
+                */
        }
 
-       if (elf_interpreter) {
-               unsigned long interp_map_addr = 0;
+#ifdef CONFIG_PAX_RANDMMAP
+       if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
+               unsigned long start, size, flags;
+               vm_flags_t vm_flags;
 
+               start = ELF_PAGEALIGN(elf_brk);
+               size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
+               flags = MAP_FIXED | MAP_PRIVATE;
+               vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
+
+               down_write(&current->mm->mmap_sem);
+               start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
+               retval = -ENOMEM;
+               if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
+//                     if (current->personality & ADDR_NO_RANDOMIZE)
+//                             vm_flags |= VM_READ | VM_MAYREAD;
+                       start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
+                       retval = IS_ERR_VALUE(start) ? start : 0;
+               }
+               up_write(&current->mm->mmap_sem);
+               if (retval == 0)
+                       retval = set_brk(start + size, start + size + PAGE_SIZE);
+               if (retval < 0)
+                       goto out_free_dentry;
+       }
+#endif
+
+       if (elf_interpreter) {
                elf_entry = load_elf_interp(&loc->interp_elf_ex,
                                            interpreter,
-                                           &interp_map_addr,
                                            load_bias, interp_elf_phdata);
                if (!IS_ERR((void *)elf_entry)) {
                        /*
@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
  * Decide what to dump of a segment, part, all or none.
  */
 static unsigned long vma_dump_size(struct vm_area_struct *vma,
-                                  unsigned long mm_flags)
+                                  unsigned long mm_flags, long signr)
 {
 #define FILTER(type)   (mm_flags & (1UL << MMF_DUMP_##type))
 
@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
        if (vma->vm_file == NULL)
                return 0;
 
-       if (FILTER(MAPPED_PRIVATE))
+       if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
                goto whole;
 
        /*
@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
 {
        elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
        int i = 0;
-       do
+       do {
                i += 2;
-       while (auxv[i - 2] != AT_NULL);
+       while (auxv[i - 2] != AT_NULL);
        fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
 }
 
@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
 {
        mm_segment_t old_fs = get_fs();
        set_fs(KERNEL_DS);
-       copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
+       copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
        set_fs(old_fs);
        fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
 }
@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                        vma = next_vma(vma, gate_vma)) {
                unsigned long dump_size;
 
-               dump_size = vma_dump_size(vma, cprm->mm_flags);
+               dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
                vma_filesz[i++] = dump_size;
                vma_data_size += dump_size;
        }
@@ -2314,6 +2794,167 @@ out:
 
 #endif         /* CONFIG_ELF_CORE */
 
+#ifdef CONFIG_PAX_MPROTECT
+/* PaX: non-PIC ELF libraries need relocations on their executable segments
+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
+ * we'll remove VM_MAYWRITE for good on RELRO segments.
+ *
+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
+ * basis because we want to allow the common case and not the special ones.
+ */
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
+{
+       struct elfhdr elf_h;
+       struct elf_phdr elf_p;
+       unsigned long i;
+       unsigned long oldflags;
+       bool is_textrel_rw, is_textrel_rx, is_relro;
+
+       if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
+               return;
+
+       oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
+       newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
+
+#ifdef CONFIG_PAX_ELFRELOCS
+       /* possible TEXTREL */
+       is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
+       is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
+#else
+       is_textrel_rw = false;
+       is_textrel_rx = false;
+#endif
+
+       /* possible RELRO */
+       is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
+
+       if (!is_textrel_rw && !is_textrel_rx && !is_relro)
+               return;
+
+       if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
+           memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
+
+#ifdef CONFIG_PAX_ETEXECRELOCS
+           ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
+#else
+           ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
+#endif
+
+           (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
+           !elf_check_arch(&elf_h) ||
+           elf_h.e_phentsize != sizeof(struct elf_phdr) ||
+           elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
+               return;
+
+       for (i = 0UL; i < elf_h.e_phnum; i++) {
+               if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
+                       return;
+               switch (elf_p.p_type) {
+               case PT_DYNAMIC:
+                       if (!is_textrel_rw && !is_textrel_rx)
+                               continue;
+                       i = 0UL;
+                       while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
+                               elf_dyn dyn;
+
+                               if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
+                                       break;
+                               if (dyn.d_tag == DT_NULL)
+                                       break;
+                               if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
+                                       gr_log_textrel(vma);
+                                       if (is_textrel_rw)
+                                               vma->vm_flags |= VM_MAYWRITE;
+                                       else
+                                               /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
+                                               vma->vm_flags &= ~VM_MAYWRITE;
+                                       break;
+                               }
+                               i++;
+                       }
+                       is_textrel_rw = false;
+                       is_textrel_rx = false;
+                       continue;
+
+               case PT_GNU_RELRO:
+                       if (!is_relro)
+                               continue;
+                       if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
+                               vma->vm_flags &= ~VM_MAYWRITE;
+                       is_relro = false;
+                       continue;
+
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+               case PT_PAX_FLAGS: {
+                       const char *msg_mprotect = "", *msg_emutramp = "";
+                       char *buffer_lib, *buffer_exe;
+
+                       if (elf_p.p_flags & PF_NOMPROTECT)
+                               msg_mprotect = "MPROTECT disabled";
+
+#ifdef CONFIG_PAX_EMUTRAMP
+                       if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
+                               msg_emutramp = "EMUTRAMP enabled";
+#endif
+
+                       if (!msg_mprotect[0] && !msg_emutramp[0])
+                               continue;
+
+                       if (!printk_ratelimit())
+                               continue;
+
+                       buffer_lib = (char *)__get_free_page(GFP_KERNEL);
+                       buffer_exe = (char *)__get_free_page(GFP_KERNEL);
+                       if (buffer_lib && buffer_exe) {
+                               char *path_lib, *path_exe;
+
+                               path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
+                               path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
+
+                               pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
+                                       (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
+
+                       }
+                       free_page((unsigned long)buffer_exe);
+                       free_page((unsigned long)buffer_lib);
+                       continue;
+               }
+#endif
+
+               }
+       }
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+
+extern int grsec_enable_log_rwxmaps;
+
+static void elf_handle_mmap(struct file *file)
+{
+       struct elfhdr elf_h;
+       struct elf_phdr elf_p;
+       unsigned long i;
+
+       if (!grsec_enable_log_rwxmaps)
+               return;
+
+       if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
+           memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
+           (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
+           elf_h.e_phentsize != sizeof(struct elf_phdr) ||
+           elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
+               return;
+
+       for (i = 0UL; i < elf_h.e_phnum; i++) {
+               if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
+                       return;
+               if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
+                       gr_log_ptgnustack(file);
+       }
+}
+#endif
+
 static int __init init_elf_binfmt(void)
 {
        register_binfmt(&elf_format);
index b48c41bf0f86755200808c1b33a82ce6c8066d71..e070416e54376387df790959adbcd6e7880069a2 100644 (file)
@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
        else if (bdev->bd_contains == bdev)
                return true;     /* is a whole device which isn't held */
 
-       else if (whole->bd_holder == bd_may_claim)
+       else if (whole->bd_holder == (void *)bd_may_claim)
                return true;     /* is a partition of a device that is being partitioned */
        else if (whole->bd_holder != NULL)
                return false;    /* is a partition of a held device */
index f54511dd287ebcb29cda0377c952fca694b52c39..58acdec5eccf1e8c5f9d6e60d68f2358ce5e808d 100644 (file)
@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
-               if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
-                       parent_start = parent->start;
-               else
+               if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+                       if (parent)
+                               parent_start = parent->start;
+                       else
+                               parent_start = 0;
+               } else
                        parent_start = 0;
 
                WARN_ON(trans->transid != btrfs_header_generation(parent));
index de4e70fb3cbbd4a5c28d13f1fe3aec16733ed49f..b41dc456370af8ee947a00dfbbdf655c7955aefd 100644 (file)
@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
 
 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 {
-       int seq = atomic_inc_return(&delayed_root->items_seq);
+       int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
        if ((atomic_dec_return(&delayed_root->items) <
            BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
            waitqueue_active(&delayed_root->wait))
@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
 
 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
 {
-       int val = atomic_read(&delayed_root->items_seq);
+       int val = atomic_read_unchecked(&delayed_root->items_seq);
 
        if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
                return 1;
@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
                int seq;
                int ret;
 
-               seq = atomic_read(&delayed_root->items_seq);
+               seq = atomic_read_unchecked(&delayed_root->items_seq);
 
                ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
                if (ret)
index f70119f254216583f3c7317085ec209eceb03d0f..ab5894d782071c9a1a85fbb0da5a0c9f54e8a368 100644 (file)
@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
         */
        struct list_head prepare_list;
        atomic_t items;         /* for delayed items */
-       atomic_t items_seq;     /* for delayed items */
+       atomic_unchecked_t items_seq;   /* for delayed items */
        int nodes;              /* for delayed nodes */
        wait_queue_head_t wait;
 };
@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
                                struct btrfs_delayed_root *delayed_root)
 {
        atomic_set(&delayed_root->items, 0);
-       atomic_set(&delayed_root->items_seq, 0);
+       atomic_set_unchecked(&delayed_root->items_seq, 0);
        delayed_root->nodes = 0;
        spin_lock_init(&delayed_root->lock);
        init_waitqueue_head(&delayed_root->wait);
index d49fe8a0f6b5c9ada112830f6f27a8eafe202c92..2e11037792eb878d81311057fcd39a648b47ac60 100644 (file)
@@ -3925,9 +3925,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
        for (i = 0; i < num_types; i++) {
                struct btrfs_space_info *tmp;
 
+               /* Don't copy in more than we allocated */
                if (!slot_count)
                        break;
 
+               slot_count--;
+
                info = NULL;
                rcu_read_lock();
                list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -3949,10 +3952,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                                memcpy(dest, &space, sizeof(space));
                                dest++;
                                space_args.total_spaces++;
-                               slot_count--;
                        }
-                       if (!slot_count)
-                               break;
                }
                up_read(&info->groups_sem);
        }
index 6f49b2872a6454330bac0ef912be3d0152e2ef4f..483410fe8ed0fc8b321e9de683d0d8085bf47c97 100644 (file)
@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
                           function, line, errstr);
                return;
        }
-       ACCESS_ONCE(trans->transaction->aborted) = errno;
+       ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
        /* Wake up anybody who may be waiting on this transaction */
        wake_up(&root->fs_info->transaction_wait);
        wake_up(&root->fs_info->transaction_blocked_wait);
index 92db3f648df40cc5d1fe3428a89348efffe45776..898a5619833d805c94a0e0cbafe88cf8c752ffbc 100644 (file)
@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
        for (set = 0; set < FEAT_MAX; set++) {
                int i;
                struct attribute *attrs[2];
-               struct attribute_group agroup = {
+               attribute_group_no_const agroup = {
                        .name = "features",
                        .attrs = attrs,
                };
index 2299bfde39eec666fe1b0876733746a76673f5a5..4098e72a9878fe201d8f3c86896efdaaa9e2116a 100644 (file)
@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * extent entry.
         */
        use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
-       cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
+       pax_open_kernel();
+       *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
+       pax_close_kernel();
 
        /*
         * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
        if (ret)
                return ret;
 
-       cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
+       pax_open_kernel();
+       *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
+       pax_close_kernel();
        __btrfs_remove_free_space_cache(cache->free_space_ctl);
 
        return 0;
index 154990c26dcbc9d63508228b9125182b7168b80e..d0cf6995aa1e0fd1a91d09a0c04631b5f1489c30 100644 (file)
@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
                                             struct btrfs_trans_handle *trans)
 {
-       ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
+       ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
 }
 
 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
index 20805db2c98774a8cbe3242d37c63f370149f1a9..2e8fc696c8fa20525a1a85318f25c54ed6cb6699 100644 (file)
@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,
                                (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-                               SLAB_MEM_SPREAD),
+                               SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
                                NULL);
 
        /*
index fbb08e97438d36eb0bc783386bf68d1521fb9a81..0fda76490d2cc15816c2bec2077a0e09851784c0 100644 (file)
@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
               args);
 
        /* start by checking things over */
-       ASSERT(cache->fstop_percent >= 0 &&
-              cache->fstop_percent < cache->fcull_percent &&
+       ASSERT(cache->fstop_percent < cache->fcull_percent &&
               cache->fcull_percent < cache->frun_percent &&
               cache->frun_percent  < 100);
 
-       ASSERT(cache->bstop_percent >= 0 &&
-              cache->bstop_percent < cache->bcull_percent &&
+       ASSERT(cache->bstop_percent < cache->bcull_percent &&
               cache->bcull_percent < cache->brun_percent &&
               cache->brun_percent  < 100);
 
index ce1b115dcc28bc2968009e5e8182004da99cf29e..4a6852c4291d17104d0112a71146bd18db638627 100644 (file)
@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
        if (n > buflen)
                return -EMSGSIZE;
 
-       if (copy_to_user(_buffer, buffer, n) != 0)
+       if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
                return -EFAULT;
 
        return n;
@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
        if (test_bit(CACHEFILES_DEAD, &cache->flags))
                return -EIO;
 
-       if (datalen < 0 || datalen > PAGE_SIZE - 1)
+       if (datalen > PAGE_SIZE - 1)
                return -EOPNOTSUPP;
 
        /* drag the command string into the kernel so we can parse it */
@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
        if (args[0] != '%' || args[1] != '\0')
                return -EINVAL;
 
-       if (fstop < 0 || fstop >= cache->fcull_percent)
+       if (fstop >= cache->fcull_percent)
                return cachefiles_daemon_range_error(cache, args);
 
        cache->fstop_percent = fstop;
@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
        if (args[0] != '%' || args[1] != '\0')
                return -EINVAL;
 
-       if (bstop < 0 || bstop >= cache->bcull_percent)
+       if (bstop >= cache->bcull_percent)
                return cachefiles_daemon_range_error(cache, args);
 
        cache->bstop_percent = bstop;
index 8c52472d2efa4ec745821e52d9deaa14bd440515..c4e3a69de716b4c7e3d5ab0283aaea3d0deaeb95 100644 (file)
@@ -66,7 +66,7 @@ struct cachefiles_cache {
        wait_queue_head_t               daemon_pollwq;  /* poll waitqueue for daemon */
        struct rb_root                  active_nodes;   /* active nodes (can't be culled) */
        rwlock_t                        active_lock;    /* lock for active_nodes */
-       atomic_t                        gravecounter;   /* graveyard uniquifier */
+       atomic_unchecked_t              gravecounter;   /* graveyard uniquifier */
        unsigned                        frun_percent;   /* when to stop culling (% files) */
        unsigned                        fcull_percent;  /* when to start culling (% files) */
        unsigned                        fstop_percent;  /* when to stop allocating (% files) */
@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
  * proc.c
  */
 #ifdef CONFIG_CACHEFILES_HISTOGRAM
-extern atomic_t cachefiles_lookup_histogram[HZ];
-extern atomic_t cachefiles_mkdir_histogram[HZ];
-extern atomic_t cachefiles_create_histogram[HZ];
+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
 
 extern int __init cachefiles_proc_init(void);
 extern void cachefiles_proc_cleanup(void);
 static inline
-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
 {
        unsigned long jif = jiffies - start_jif;
        if (jif >= HZ)
                jif = HZ - 1;
-       atomic_inc(&histogram[jif]);
+       atomic_inc_unchecked(&histogram[jif]);
 }
 
 #else
index 7f8e83f9d74eb87712db9e8fdc7da95fcccaefdb..8951aa45a80d8ea295bf8fc3ea6f7607dd7a8bb5 100644 (file)
@@ -309,7 +309,7 @@ try_again:
        /* first step is to make up a grave dentry in the graveyard */
        sprintf(nbuffer, "%08x%08x",
                (uint32_t) get_seconds(),
-               (uint32_t) atomic_inc_return(&cache->gravecounter));
+               (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
 
        /* do the multiway lock magic */
        trap = lock_rename(cache->graveyard, dir);
index eccd33941199c1a006ab790f9cbc687e15779e14..4c1d995288aaad5a6bca8856272ff81255427a37 100644 (file)
@@ -14,9 +14,9 @@
 #include <linux/seq_file.h>
 #include "internal.h"
 
-atomic_t cachefiles_lookup_histogram[HZ];
-atomic_t cachefiles_mkdir_histogram[HZ];
-atomic_t cachefiles_create_histogram[HZ];
+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
+atomic_unchecked_t cachefiles_create_histogram[HZ];
 
 /*
  * display the latency histogram
@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
                return 0;
        default:
                index = (unsigned long) v - 3;
-               x = atomic_read(&cachefiles_lookup_histogram[index]);
-               y = atomic_read(&cachefiles_mkdir_histogram[index]);
-               z = atomic_read(&cachefiles_create_histogram[index]);
+               x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
+               y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
+               z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
                if (x == 0 && y == 0 && z == 0)
                        return 0;
 
index c241603764fdc560ae72cea19410b49c15e860d2..56bae60357572b9ae6dff98620e56719f1931bcc 100644 (file)
@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
        struct dentry *dentry, *last;
        struct ceph_dentry_info *di;
        int err = 0;
+       char d_name[DNAME_INLINE_LEN];
+       const unsigned char *name;
 
        /* claim ref on last dentry we returned */
        last = fi->dentry;
@@ -192,7 +194,12 @@ more:
 
        dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
             dentry, dentry, dentry->d_inode);
-       if (!dir_emit(ctx, dentry->d_name.name,
+       name = dentry->d_name.name;
+       if (name == dentry->d_iname) {
+               memcpy(d_name, name, dentry->d_name.len);
+               name = d_name;
+       }
+       if (!dir_emit(ctx, name,
                      dentry->d_name.len,
                      ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
                      dentry->d_inode->i_mode >> 12)) {
@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
        unsigned frag = fpos_frag(ctx->pos);
-       int off = fpos_off(ctx->pos);
+       unsigned int off = fpos_off(ctx->pos);
        int err;
        u32 ftype;
        struct ceph_mds_reply_info_parsed *rinfo;
index 50f06cddc94b7da66031a5fcefad40d6c180902d..c7eba3e62b61405d5fa2efc51956c05254871fae 100644 (file)
@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
 /*
  * construct our own bdi so we can control readahead, etc.
  */
-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
 
 static int ceph_register_bdi(struct super_block *sb,
                             struct ceph_fs_client *fsc)
@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
                        default_backing_dev_info.ra_pages;
 
        err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
-                          atomic_long_inc_return(&bdi_seq));
+                          atomic_long_inc_return_unchecked(&bdi_seq));
        if (!err)
                sb->s_bdi = &fsc->backing_dev_info;
        return err;
index 7febcf2475c5ab675c04dfd2fddaa3ed574522a0..62a57215baa72968fe91df290226c34454dffd95 100644 (file)
@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
 
        if (strtobool(&c, &bv) == 0) {
 #ifdef CONFIG_CIFS_STATS2
-               atomic_set(&totBufAllocCount, 0);
-               atomic_set(&totSmBufAllocCount, 0);
+               atomic_set_unchecked(&totBufAllocCount, 0);
+               atomic_set_unchecked(&totSmBufAllocCount, 0);
 #endif /* CONFIG_CIFS_STATS2 */
                spin_lock(&cifs_tcp_ses_lock);
                list_for_each(tmp1, &cifs_tcp_ses_list) {
@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
                                        tcon = list_entry(tmp3,
                                                          struct cifs_tcon,
                                                          tcon_list);
-                                       atomic_set(&tcon->num_smbs_sent, 0);
+                                       atomic_set_unchecked(&tcon->num_smbs_sent, 0);
                                        if (server->ops->clear_stats)
                                                server->ops->clear_stats(tcon);
                                }
@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
                        smBufAllocCount.counter, cifs_min_small);
 #ifdef CONFIG_CIFS_STATS2
        seq_printf(m, "Total Large %d Small %d Allocations\n",
-                               atomic_read(&totBufAllocCount),
-                               atomic_read(&totSmBufAllocCount));
+                               atomic_read_unchecked(&totBufAllocCount),
+                               atomic_read_unchecked(&totSmBufAllocCount));
 #endif /* CONFIG_CIFS_STATS2 */
 
        seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
                                if (tcon->need_reconnect)
                                        seq_puts(m, "\tDISCONNECTED ");
                                seq_printf(m, "\nSMBs: %d",
-                                          atomic_read(&tcon->num_smbs_sent));
+                                          atomic_read_unchecked(&tcon->num_smbs_sent));
                                if (server->ops->print_stats)
                                        server->ops->print_stats(m, tcon);
                        }
index d72fe37f5420e03c7677e86fe2e26d478277d1ae..ded5511f8efbfb6df7dcd1edfb67fbb5b692ca4a 100644 (file)
@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
 */
        cifs_req_cachep = kmem_cache_create("cifs_request",
                                            CIFSMaxBufSize + max_hdr_size, 0,
-                                           SLAB_HWCACHE_ALIGN, NULL);
+                                           SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
        if (cifs_req_cachep == NULL)
                return -ENOMEM;
 
@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
        efficient to alloc 1 per page off the slab compared to 17K (5page)
        alloc of large cifs buffers even when page debugging is on */
        cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
-                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
+                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
                        NULL);
        if (cifs_sm_req_cachep == NULL) {
                mempool_destroy(cifs_req_poolp);
@@ -1204,8 +1204,8 @@ init_cifs(void)
        atomic_set(&bufAllocCount, 0);
        atomic_set(&smBufAllocCount, 0);
 #ifdef CONFIG_CIFS_STATS2
-       atomic_set(&totBufAllocCount, 0);
-       atomic_set(&totSmBufAllocCount, 0);
+       atomic_set_unchecked(&totBufAllocCount, 0);
+       atomic_set_unchecked(&totSmBufAllocCount, 0);
 #endif /* CONFIG_CIFS_STATS2 */
 
        atomic_set(&midCount, 0);
index 22b289a3b1c4d3e12727cc0a005456fa9b295a00..bbbba0826b1cc45c5c64189443416fbc0d35c130 100644 (file)
@@ -823,35 +823,35 @@ struct cifs_tcon {
        __u16 Flags;            /* optional support bits */
        enum statusEnum tidStatus;
 #ifdef CONFIG_CIFS_STATS
-       atomic_t num_smbs_sent;
+       atomic_unchecked_t num_smbs_sent;
        union {
                struct {
-                       atomic_t num_writes;
-                       atomic_t num_reads;
-                       atomic_t num_flushes;
-                       atomic_t num_oplock_brks;
-                       atomic_t num_opens;
-                       atomic_t num_closes;
-                       atomic_t num_deletes;
-                       atomic_t num_mkdirs;
-                       atomic_t num_posixopens;
-                       atomic_t num_posixmkdirs;
-                       atomic_t num_rmdirs;
-                       atomic_t num_renames;
-                       atomic_t num_t2renames;
-                       atomic_t num_ffirst;
-                       atomic_t num_fnext;
-                       atomic_t num_fclose;
-                       atomic_t num_hardlinks;
-                       atomic_t num_symlinks;
-                       atomic_t num_locks;
-                       atomic_t num_acl_get;
-                       atomic_t num_acl_set;
+                       atomic_unchecked_t num_writes;
+                       atomic_unchecked_t num_reads;
+                       atomic_unchecked_t num_flushes;
+                       atomic_unchecked_t num_oplock_brks;
+                       atomic_unchecked_t num_opens;
+                       atomic_unchecked_t num_closes;
+                       atomic_unchecked_t num_deletes;
+                       atomic_unchecked_t num_mkdirs;
+                       atomic_unchecked_t num_posixopens;
+                       atomic_unchecked_t num_posixmkdirs;
+                       atomic_unchecked_t num_rmdirs;
+                       atomic_unchecked_t num_renames;
+                       atomic_unchecked_t num_t2renames;
+                       atomic_unchecked_t num_ffirst;
+                       atomic_unchecked_t num_fnext;
+                       atomic_unchecked_t num_fclose;
+                       atomic_unchecked_t num_hardlinks;
+                       atomic_unchecked_t num_symlinks;
+                       atomic_unchecked_t num_locks;
+                       atomic_unchecked_t num_acl_get;
+                       atomic_unchecked_t num_acl_set;
                } cifs_stats;
 #ifdef CONFIG_CIFS_SMB2
                struct {
-                       atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
-                       atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+                       atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+                       atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
                } smb2_stats;
 #endif /* CONFIG_CIFS_SMB2 */
        } stats;
@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
 }
 
 #ifdef CONFIG_CIFS_STATS
-#define cifs_stats_inc atomic_inc
+#define cifs_stats_inc atomic_inc_unchecked
 
 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
                                            unsigned int bytes)
@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
 /* Various Debug counters */
 GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
 #ifdef CONFIG_CIFS_STATS2
-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
 #endif
 GLOBAL_EXTERN atomic_t smBufAllocCount;
 GLOBAL_EXTERN atomic_t midCount;
index 74f12877493ac6c3f87792192b8aa8c1190f3c05..7ef02377f87762501346955ee7b52491b72b7e11 100644 (file)
@@ -2060,10 +2060,14 @@ static int cifs_writepages(struct address_space *mapping,
                index = mapping->writeback_index; /* Start from prev offset */
                end = -1;
        } else {
-               index = wbc->range_start >> PAGE_CACHE_SHIFT;
-               end = wbc->range_end >> PAGE_CACHE_SHIFT;
-               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
                        range_whole = true;
+                       index = 0;
+                       end = ULONG_MAX;
+               } else {
+                       index = wbc->range_start >> PAGE_CACHE_SHIFT;
+                       end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               }
                scanned = true;
        }
        server = cifs_sb_master_tcon(cifs_sb)->ses->server;
index 337946355b29db5c71632f66a354295311e9f3e3..3af418af9af191c643f6e9333de2bd4b8ff987d1 100644 (file)
@@ -170,7 +170,7 @@ cifs_buf_get(void)
                memset(ret_buf, 0, buf_size + 3);
                atomic_inc(&bufAllocCount);
 #ifdef CONFIG_CIFS_STATS2
-               atomic_inc(&totBufAllocCount);
+               atomic_inc_unchecked(&totBufAllocCount);
 #endif /* CONFIG_CIFS_STATS2 */
        }
 
@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
        /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
                atomic_inc(&smBufAllocCount);
 #ifdef CONFIG_CIFS_STATS2
-               atomic_inc(&totSmBufAllocCount);
+               atomic_inc_unchecked(&totSmBufAllocCount);
 #endif /* CONFIG_CIFS_STATS2 */
 
        }
index d2979036a4c72756e91f9fa6c454337585c06800..1cb751651be951e0dae8b0248daba9a53e337784 100644 (file)
@@ -622,27 +622,27 @@ static void
 cifs_clear_stats(struct cifs_tcon *tcon)
 {
 #ifdef CONFIG_CIFS_STATS
-       atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
-       atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
+       atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
 #endif
 }
 
@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
 {
 #ifdef CONFIG_CIFS_STATS
        seq_printf(m, " Oplocks breaks: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
        seq_printf(m, "\nReads:  %d Bytes: %llu",
-                  atomic_read(&tcon->stats.cifs_stats.num_reads),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
                   (long long)(tcon->bytes_read));
        seq_printf(m, "\nWrites: %d Bytes: %llu",
-                  atomic_read(&tcon->stats.cifs_stats.num_writes),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
                   (long long)(tcon->bytes_written));
        seq_printf(m, "\nFlushes: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_flushes));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
        seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_locks),
-                  atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
-                  atomic_read(&tcon->stats.cifs_stats.num_symlinks));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
        seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_opens),
-                  atomic_read(&tcon->stats.cifs_stats.num_closes),
-                  atomic_read(&tcon->stats.cifs_stats.num_deletes));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
        seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_posixopens),
-                  atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
        seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
-                  atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
        seq_printf(m, "\nRenames: %d T2 Renames %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_renames),
-                  atomic_read(&tcon->stats.cifs_stats.num_t2renames));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
        seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
-                  atomic_read(&tcon->stats.cifs_stats.num_ffirst),
-                  atomic_read(&tcon->stats.cifs_stats.num_fnext),
-                  atomic_read(&tcon->stats.cifs_stats.num_fclose));
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
+                  atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
 #endif
 }
 
index 96b5d40a2ece611b27ed19668cc4b7b665605113..e5db0c1d3d96af23010d53dde5adf4404699be2c 100644 (file)
@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
 #ifdef CONFIG_CIFS_STATS
        int i;
        for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
-               atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
-               atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+               atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+               atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
        }
 #endif
 }
@@ -459,65 +459,65 @@ static void
 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
 {
 #ifdef CONFIG_CIFS_STATS
-       atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
-       atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+       atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+       atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
        seq_printf(m, "\nNegotiates: %d sent %d failed",
-                  atomic_read(&sent[SMB2_NEGOTIATE_HE]),
-                  atomic_read(&failed[SMB2_NEGOTIATE_HE]));
+                  atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
+                  atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
        seq_printf(m, "\nSessionSetups: %d sent %d failed",
-                  atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
-                  atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
+                  atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
+                  atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
        seq_printf(m, "\nLogoffs: %d sent %d failed",
-                  atomic_read(&sent[SMB2_LOGOFF_HE]),
-                  atomic_read(&failed[SMB2_LOGOFF_HE]));
+                  atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
+                  atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
        seq_printf(m, "\nTreeConnects: %d sent %d failed",
-                  atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
-                  atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
+                  atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
+                  atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
        seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
-                  atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
-                  atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
+                  atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
+                  atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
        seq_printf(m, "\nCreates: %d sent %d failed",
-                  atomic_read(&sent[SMB2_CREATE_HE]),
-                  atomic_read(&failed[SMB2_CREATE_HE]));
+                  atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
+                  atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
        seq_printf(m, "\nCloses: %d sent %d failed",
-                  atomic_read(&sent[SMB2_CLOSE_HE]),
-                  atomic_read(&failed[SMB2_CLOSE_HE]));
+                  atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
+                  atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
        seq_printf(m, "\nFlushes: %d sent %d failed",
-                  atomic_read(&sent[SMB2_FLUSH_HE]),
-                  atomic_read(&failed[SMB2_FLUSH_HE]));
+                  atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
+                  atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
        seq_printf(m, "\nReads: %d sent %d failed",
-                  atomic_read(&sent[SMB2_READ_HE]),
-                  atomic_read(&failed[SMB2_READ_HE]));
+                  atomic_read_unchecked(&sent[SMB2_READ_HE]),
+                  atomic_read_unchecked(&failed[SMB2_READ_HE]));
        seq_printf(m, "\nWrites: %d sent %d failed",
-                  atomic_read(&sent[SMB2_WRITE_HE]),
-                  atomic_read(&failed[SMB2_WRITE_HE]));
+                  atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
+                  atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
        seq_printf(m, "\nLocks: %d sent %d failed",
-                  atomic_read(&sent[SMB2_LOCK_HE]),
-                  atomic_read(&failed[SMB2_LOCK_HE]));
+                  atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
+                  atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
        seq_printf(m, "\nIOCTLs: %d sent %d failed",
-                  atomic_read(&sent[SMB2_IOCTL_HE]),
-                  atomic_read(&failed[SMB2_IOCTL_HE]));
+                  atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
+                  atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
        seq_printf(m, "\nCancels: %d sent %d failed",
-                  atomic_read(&sent[SMB2_CANCEL_HE]),
-                  atomic_read(&failed[SMB2_CANCEL_HE]));
+                  atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
+                  atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
        seq_printf(m, "\nEchos: %d sent %d failed",
-                  atomic_read(&sent[SMB2_ECHO_HE]),
-                  atomic_read(&failed[SMB2_ECHO_HE]));
+                  atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
+                  atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
        seq_printf(m, "\nQueryDirectories: %d sent %d failed",
-                  atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
-                  atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
+                  atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
+                  atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
        seq_printf(m, "\nChangeNotifies: %d sent %d failed",
-                  atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
-                  atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
+                  atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
+                  atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
        seq_printf(m, "\nQueryInfos: %d sent %d failed",
-                  atomic_read(&sent[SMB2_QUERY_INFO_HE]),
-                  atomic_read(&failed[SMB2_QUERY_INFO_HE]));
+                  atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
+                  atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
        seq_printf(m, "\nSetInfos: %d sent %d failed",
-                  atomic_read(&sent[SMB2_SET_INFO_HE]),
-                  atomic_read(&failed[SMB2_SET_INFO_HE]));
+                  atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
+                  atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
        seq_printf(m, "\nOplockBreaks: %d sent %d failed",
-                  atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
-                  atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
+                  atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
+                  atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
 #endif
 }
 
index 3417340bf89e677fe0c46bf98cf922dd39d29a3a..b94239007cdf86e57e2061f393e21209e9b8f872 100644 (file)
@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        default:
                cifs_dbg(VFS, "info level %u isn't supported\n",
                         srch_inf->info_level);
-               rc = -EINVAL;
-               goto qdir_exit;
+               return -EINVAL;
        }
 
        req->FileIndex = cpu_to_le32(index);
index 46ee6f238985a86186493cd22a245f4baa7638e0..89a9e7f6d9ae24efc58c4ac9f30fef9abf86c8fc 100644 (file)
@@ -24,7 +24,7 @@
 #include "coda_linux.h"
 #include "coda_cache.h"
 
-static atomic_t permission_epoch = ATOMIC_INIT(0);
+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
 
 /* replace or extend an acl cache hit */
 void coda_cache_enter(struct inode *inode, int mask)
@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
        struct coda_inode_info *cii = ITOC(inode);
 
        spin_lock(&cii->c_lock);
-       cii->c_cached_epoch = atomic_read(&permission_epoch);
+       cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
        if (!uid_eq(cii->c_uid, current_fsuid())) {
                cii->c_uid = current_fsuid();
                 cii->c_cached_perm = mask;
@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
 {
        struct coda_inode_info *cii = ITOC(inode);
        spin_lock(&cii->c_lock);
-       cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
+       cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
        spin_unlock(&cii->c_lock);
 }
 
 /* remove all acl caches */
 void coda_cache_clear_all(struct super_block *sb)
 {
-       atomic_inc(&permission_epoch);
+       atomic_inc_unchecked(&permission_epoch);
 }
 
 
@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
        spin_lock(&cii->c_lock);
        hit = (mask & cii->c_cached_perm) == mask &&
            uid_eq(cii->c_uid, current_fsuid()) &&
-           cii->c_cached_epoch == atomic_read(&permission_epoch);
+           cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
        spin_unlock(&cii->c_lock);
 
        return hit;
index 6fd272d455e4deb1112c277fc564324fd9d7b3ba..dd34ba23f506965c3d80bc734892be597524d13e 100644 (file)
@@ -54,7 +54,7 @@
 #include <asm/ioctls.h>
 #include "internal.h"
 
-int compat_log = 1;
+int compat_log = 0;
 
 int compat_printk(const char *fmt, ...)
 {
@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
 
        set_fs(KERNEL_DS);
        /* The __user pointer cast is valid because of the set_fs() */
-       ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
+       ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
        set_fs(oldfs);
        /* truncating is ok because it's a user address */
        if (!ret)
@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
                goto out;
 
        ret = -EINVAL;
-       if (nr_segs > UIO_MAXIOV || nr_segs < 0)
+       if (nr_segs > UIO_MAXIOV)
                goto out;
        if (nr_segs > fast_segs) {
                ret = -ENOMEM;
@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
 struct compat_readdir_callback {
        struct dir_context ctx;
        struct compat_old_linux_dirent __user *dirent;
+       struct file * file;
        int result;
 };
 
@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
                buf->result = -EOVERFLOW;
                return -EOVERFLOW;
        }
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        buf->result++;
        dirent = buf->dirent;
        if (!access_ok(VERIFY_WRITE, dirent,
@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (buf.result)
                error = buf.result;
@@ -913,6 +919,7 @@ struct compat_getdents_callback {
        struct dir_context ctx;
        struct compat_linux_dirent __user *current_dir;
        struct compat_linux_dirent __user *previous;
+       struct file * file;
        int count;
        int error;
 };
@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
                buf->error = -EOVERFLOW;
                return -EOVERFLOW;
        }
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        dirent = buf->previous;
        if (dirent) {
                if (__put_user(offset, &dirent->d_off))
@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
        struct dir_context ctx;
        struct linux_dirent64 __user *current_dir;
        struct linux_dirent64 __user *previous;
+       struct file * file;
        int count;
        int error;
 };
@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        dirent = buf->previous;
 
        if (dirent) {
@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
index 4d24d17bcfc1dc3ecd917f12f2a889da1a506e24..4f8c09e003839945c260587c311a0ba0df9511bc 100644 (file)
 #undef elf_phdr
 #undef elf_shdr
 #undef elf_note
+#undef elf_dyn
 #undef elf_addr_t
 #define elfhdr         elf32_hdr
 #define elf_phdr       elf32_phdr
 #define elf_shdr       elf32_shdr
 #define elf_note       elf32_note
+#define elf_dyn                Elf32_Dyn
 #define elf_addr_t     Elf32_Addr
 
 /*
index afec6450450ff08e6be4c1cd7a05dbc293ef0a76..9c656207c0b8675b43487ebe75be9ba333019697 100644 (file)
@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
                        return -EFAULT;
                 if (__get_user(udata, &ss32->iomem_base))
                        return -EFAULT;
-                ss.iomem_base = compat_ptr(udata);
+                ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
                 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
                    __get_user(ss.port_high, &ss32->port_high))
                        return -EFAULT;
@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
        for (i = 0; i < nmsgs; i++) {
                if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
                        return -EFAULT;
-               if (get_user(datap, &umsgs[i].buf) ||
-                   put_user(compat_ptr(datap), &tmsgs[i].buf))
+               if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
+                   put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
                        return -EFAULT;
        }
        return sys_ioctl(fd, cmd, (unsigned long)tdata);
@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
            copy_in_user(&p->l_len,     &p32->l_len,    sizeof(s64)) ||
            copy_in_user(&p->l_sysid,   &p32->l_sysid,  sizeof(s32)) ||
            copy_in_user(&p->l_pid,     &p32->l_pid,    sizeof(u32)) ||
-           copy_in_user(&p->l_pad,     &p32->l_pad,    4*sizeof(u32)))
+           copy_in_user(p->l_pad,      p32->l_pad,     4*sizeof(u32)))
                return -EFAULT;
 
        return ioctl_preallocate(file, p);
@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
 {
        unsigned int a, b;
-       a = *(unsigned int *)p;
-       b = *(unsigned int *)q;
+       a = *(const unsigned int *)p;
+       b = *(const unsigned int *)q;
        if (a > b)
                return 1;
        if (a < b)
index c9c298bd3058924b8fed471ccdebabeef2feeb88..544d1006a185e60113ec90555b82b642e49417f5 100644 (file)
@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
        }
        for (p = q->next; p != &parent_sd->s_children; p = p->next) {
                struct configfs_dirent *next;
-               const char *name;
+               const unsigned char * name;
+               char d_name[sizeof(next->s_dentry->d_iname)];
                int len;
                struct inode *inode = NULL;
 
@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
                        continue;
 
                name = configfs_get_name(next);
-               len = strlen(name);
+               if (next->s_dentry && name == next->s_dentry->d_iname) {
+                       len =  next->s_dentry->d_name.len;
+                       memcpy(d_name, name, len);
+                       name = d_name;
+               } else
+                       len = strlen(name);
 
                /*
                 * We'll have a dentry and an inode for
index b5c86ffd5033420523934c7153080d8cdc605bea..0dac2628bf49e919aee289a9dcce78cd974f390f 100644 (file)
@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
        struct pipe_inode_info *pipe = file->private_data;
 
        pipe_lock(pipe);
-       pipe->readers++;
-       pipe->writers--;
+       atomic_inc(&pipe->readers);
+       atomic_dec(&pipe->writers);
        wake_up_interruptible_sync(&pipe->wait);
        kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        pipe_unlock(pipe);
@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
         * We actually want wait_event_freezable() but then we need
         * to clear TIF_SIGPENDING and improve dump_interrupted().
         */
-       wait_event_interruptible(pipe->wait, pipe->readers == 1);
+       wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
 
        pipe_lock(pipe);
-       pipe->readers--;
-       pipe->writers++;
+       atomic_dec(&pipe->readers);
+       atomic_inc(&pipe->writers);
        pipe_unlock(pipe);
 }
 
@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
        struct files_struct *displaced;
        bool need_nonrelative = false;
        bool core_dumped = false;
-       static atomic_t core_dump_count = ATOMIC_INIT(0);
+       static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
+       long signr = siginfo->si_signo;
+       int dumpable;
        struct coredump_params cprm = {
                .siginfo = siginfo,
                .regs = signal_pt_regs(),
@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
                .mm_flags = mm->flags,
        };
 
-       audit_core_dumps(siginfo->si_signo);
+       audit_core_dumps(signr);
+
+       dumpable = __get_dumpable(cprm.mm_flags);
+
+       if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
+               gr_handle_brute_attach(dumpable);
 
        binfmt = mm->binfmt;
        if (!binfmt || !binfmt->core_dump)
                goto fail;
-       if (!__get_dumpable(cprm.mm_flags))
+       if (!dumpable)
                goto fail;
 
        cred = prepare_creds();
@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
                need_nonrelative = true;
        }
 
-       retval = coredump_wait(siginfo->si_signo, &core_state);
+       retval = coredump_wait(signr, &core_state);
        if (retval < 0)
                goto fail_creds;
 
@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
                }
                cprm.limit = RLIM_INFINITY;
 
-               dump_count = atomic_inc_return(&core_dump_count);
+               dump_count = atomic_inc_return_unchecked(&core_dump_count);
                if (core_pipe_limit && (core_pipe_limit < dump_count)) {
                        printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
                               task_tgid_vnr(current), current->comm);
@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
        } else {
                struct inode *inode;
 
+               gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
+
                if (cprm.limit < binfmt->min_coredump)
                        goto fail_unlock;
 
@@ -681,7 +690,7 @@ close_fail:
                filp_close(cprm.file, NULL);
 fail_dropcount:
        if (ispipe)
-               atomic_dec(&core_dump_count);
+               atomic_dec_unchecked(&core_dump_count);
 fail_unlock:
        kfree(cn.corename);
        coredump_finish(mm, core_dumped);
@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
        struct file *file = cprm->file;
        loff_t pos = file->f_pos;
        ssize_t n;
+
+       gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
        if (cprm->written + nr > cprm->limit)
                return 0;
        while (nr) {
index e368d4f412f97ed11039e3329c77358b3fc1fea0..b40ba595aa0553c1af8ab5fe6a50ffdae56f60db 100644 (file)
@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
         * dentry_iput drops the locks, at which point nobody (except
         * transient RCU lookups) can reach this dentry.
         */
-       BUG_ON((int)dentry->d_lockref.count > 0);
+       BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
        this_cpu_dec(nr_dentry);
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
        struct dentry *parent = dentry->d_parent;
        if (IS_ROOT(dentry))
                return NULL;
-       if (unlikely((int)dentry->d_lockref.count < 0))
+       if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
                return NULL;
        if (likely(spin_trylock(&parent->d_lock)))
                return parent;
@@ -638,7 +638,7 @@ repeat:
                dentry->d_flags |= DCACHE_REFERENCED;
        dentry_lru_add(dentry);
 
-       dentry->d_lockref.count--;
+       __lockref_dec(&dentry->d_lockref);
        spin_unlock(&dentry->d_lock);
        return;
 
@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
 /* This must be called with d_lock held */
 static inline void __dget_dlock(struct dentry *dentry)
 {
-       dentry->d_lockref.count++;
+       __lockref_inc(&dentry->d_lockref);
 }
 
 static inline void __dget(struct dentry *dentry)
@@ -694,8 +694,8 @@ repeat:
                goto repeat;
        }
        rcu_read_unlock();
-       BUG_ON(!ret->d_lockref.count);
-       ret->d_lockref.count++;
+       BUG_ON(!__lockref_read(&ret->d_lockref));
+       __lockref_inc(&ret->d_lockref);
        spin_unlock(&ret->d_lock);
        return ret;
 }
@@ -773,9 +773,9 @@ restart:
        spin_lock(&inode->i_lock);
        hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
                spin_lock(&dentry->d_lock);
-               if (!dentry->d_lockref.count) {
+               if (!__lockref_read(&dentry->d_lockref)) {
                        struct dentry *parent = lock_parent(dentry);
-                       if (likely(!dentry->d_lockref.count)) {
+                       if (likely(!__lockref_read(&dentry->d_lockref))) {
                                __dentry_kill(dentry);
                                dput(parent);
                                goto restart;
@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
                 * We found an inuse dentry which was not removed from
                 * the LRU because of laziness during lookup. Do not free it.
                 */
-               if ((int)dentry->d_lockref.count > 0) {
+               if ((int)__lockref_read(&dentry->d_lockref) > 0) {
                        spin_unlock(&dentry->d_lock);
                        if (parent)
                                spin_unlock(&parent->d_lock);
@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
                dentry = parent;
                while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
                        parent = lock_parent(dentry);
-                       if (dentry->d_lockref.count != 1) {
-                               dentry->d_lockref.count--;
+                       if (__lockref_read(&dentry->d_lockref) != 1) {
+                               __lockref_inc(&dentry->d_lockref);
                                spin_unlock(&dentry->d_lock);
                                if (parent)
                                        spin_unlock(&parent->d_lock);
@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
         * counts, just remove them from the LRU. Otherwise give them
         * another pass through the LRU.
         */
-       if (dentry->d_lockref.count) {
+       if (__lockref_read(&dentry->d_lockref) > 0) {
                d_lru_isolate(dentry);
                spin_unlock(&dentry->d_lock);
                return LRU_REMOVED;
@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
        } else {
                if (dentry->d_flags & DCACHE_LRU_LIST)
                        d_lru_del(dentry);
-               if (!dentry->d_lockref.count) {
+               if (!__lockref_read(&dentry->d_lockref)) {
                        d_shrink_add(dentry, &data->dispose);
                        data->found++;
                }
@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
                return D_WALK_CONTINUE;
 
        /* root with refcount 1 is fine */
-       if (dentry == _data && dentry->d_lockref.count == 1)
+       if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
                return D_WALK_CONTINUE;
 
        printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
                       dentry->d_inode ?
                       dentry->d_inode->i_ino : 0UL,
                       dentry,
-                      dentry->d_lockref.count,
+                      __lockref_read(&dentry->d_lockref),
                       dentry->d_sb->s_type->name,
                       dentry->d_sb->s_id);
        WARN_ON(1);
@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
        if (name->len > DNAME_INLINE_LEN-1) {
                size_t size = offsetof(struct external_name, name[1]);
-               struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
+               struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
                if (!p) {
                        kmem_cache_free(dentry_cache, dentry); 
                        return NULL;
@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        smp_wmb();
        dentry->d_name.name = dname;
 
-       dentry->d_lockref.count = 1;
+       __lockref_set(&dentry->d_lockref, 1);
        dentry->d_flags = 0;
        spin_lock_init(&dentry->d_lock);
        seqcount_init(&dentry->d_seq);
@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
        dentry->d_sb = sb;
        dentry->d_op = NULL;
        dentry->d_fsdata = NULL;
+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
+       atomic_set(&dentry->chroot_refcnt, 0);
+#endif
        INIT_HLIST_BL_NODE(&dentry->d_hash);
        INIT_LIST_HEAD(&dentry->d_lru);
        INIT_LIST_HEAD(&dentry->d_subdirs);
@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
                                goto next;
                }
 
-               dentry->d_lockref.count++;
+               __lockref_inc(&dentry->d_lockref);
                found = dentry;
                spin_unlock(&dentry->d_lock);
                break;
@@ -2250,7 +2253,7 @@ again:
        spin_lock(&dentry->d_lock);
        inode = dentry->d_inode;
        isdir = S_ISDIR(inode->i_mode);
-       if (dentry->d_lockref.count == 1) {
+       if (__lockref_read(&dentry->d_lockref) == 1) {
                if (!spin_trylock(&inode->i_lock)) {
                        spin_unlock(&dentry->d_lock);
                        cpu_relax();
@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
 
                if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
                        dentry->d_flags |= DCACHE_GENOCIDE;
-                       dentry->d_lockref.count--;
+                       __lockref_dec(&dentry->d_lockref);
                }
        }
        return D_WALK_CONTINUE;
@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
        mempages -= reserve;
 
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
+                       SLAB_NO_SANITIZE, NULL);
 
        dcache_init();
        inode_init();
index 6f0ce531e2211a42535b0027196f56440b2da63d..780f4f862c7674a1bccd3814a562a47b8ca9eb4d 100644 (file)
@@ -425,7 +425,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
  */
 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
 {
+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
+       return __create_file(name, S_IFDIR | S_IRWXU,
+#else
        return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+#endif
                                   parent, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_dir);
index 1686dc2da9fd7627df7fea9035262ca900b5676e..9611c501932258dd6a2ce3f140492fd8885e4bd8 100644 (file)
@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
        old_fs = get_fs();
        set_fs(get_ds());
        rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
-                                                  (char __user *)lower_buf,
+                                                  (char __force_user *)lower_buf,
                                                   PATH_MAX);
        set_fs(old_fs);
        if (rc < 0)
index e4141f2574953893377939f2362a673a62a609b9..d8263e837adac817cebf1ea944b2d5ff899c5ddf 100644 (file)
@@ -304,7 +304,7 @@ check_list:
                goto out_unlock_msg_ctx;
        i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
        if (msg_ctx->msg) {
-               if (copy_to_user(&buf[i], packet_length, packet_length_size))
+               if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
                        goto out_unlock_msg_ctx;
                i += packet_length_size;
                if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
index ad8798e26be9f6e40d15607d0971bc2143611fcc..5f872c904df45e509486b9ddb236aa54dd7163aa 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
 #include <linux/pipe_fs_i.h>
 #include <linux/oom.h>
 #include <linux/compat.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+#include <linux/coredump.h>
+#include <linux/mman.h>
+
+#ifdef CONFIG_PAX_REFCOUNT
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#endif
+
+#include <trace/events/fs.h>
 
 #include <asm/uaccess.h>
+#include <asm/sections.h>
 #include <asm/mmu_context.h>
 #include <asm/tlb.h>
 
 
 #include <trace/events/sched.h>
 
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
+{
+       pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
+}
+#endif
+
+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+EXPORT_SYMBOL(pax_set_initial_flags_func);
+#endif
+
 int suid_dumpable = 0;
 
 static LIST_HEAD(formats);
 static DEFINE_RWLOCK(binfmt_lock);
 
+extern int gr_process_kernel_exec_ban(void);
+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
+
 void __register_binfmt(struct linux_binfmt * fmt, int insert)
 {
        BUG_ON(!fmt);
        if (WARN_ON(!fmt->load_binary))
                return;
        write_lock(&binfmt_lock);
-       insert ? list_add(&fmt->lh, &formats) :
-                list_add_tail(&fmt->lh, &formats);
+       insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
+                pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
        write_unlock(&binfmt_lock);
 }
 
@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
 void unregister_binfmt(struct linux_binfmt * fmt)
 {
        write_lock(&binfmt_lock);
-       list_del(&fmt->lh);
+       pax_list_del((struct list_head *)&fmt->lh);
        write_unlock(&binfmt_lock);
 }
 
@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                int write)
 {
        struct page *page;
-       int ret;
 
-#ifdef CONFIG_STACK_GROWSUP
-       if (write) {
-               ret = expand_downwards(bprm->vma, pos);
-               if (ret < 0)
-                       return NULL;
-       }
-#endif
-       ret = get_user_pages(current, bprm->mm, pos,
-                       1, write, 1, &page, NULL);
-       if (ret <= 0)
+       if (0 > expand_downwards(bprm->vma, pos))
+               return NULL;
+       if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
                return NULL;
 
        if (write) {
@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                if (size <= ARG_MAX)
                        return page;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+               // only allow 512KB for argv+env on suid/sgid binaries
+               // to prevent easy ASLR exhaustion
+               if (((!uid_eq(bprm->cred->euid, current_euid())) ||
+                    (!gid_eq(bprm->cred->egid, current_egid()))) &&
+                   (size > (512 * 1024))) {
+                       put_page(page);
+                       return NULL;
+               }
+#endif
+
                /*
                 * Limit to 1/4-th the stack size for the argv+env strings.
                 * This ensures that:
@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_end = STACK_TOP_MAX;
        vma->vm_start = vma->vm_end - PAGE_SIZE;
        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        arch_bprm_mm_init(mm, vma);
        up_write(&mm->mmap_sem);
        bprm->p = vma->vm_end - sizeof(void *);
+
+#ifdef CONFIG_PAX_RANDUSTACK
+       if (randomize_va_space)
+               bprm->p ^= prandom_u32() & ~PAGE_MASK;
+#endif
+
        return 0;
 err:
        up_write(&mm->mmap_sem);
@@ -396,7 +437,7 @@ struct user_arg_ptr {
        } ptr;
 };
 
-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 {
        const char __user *native;
 
@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
                compat_uptr_t compat;
 
                if (get_user(compat, argv.ptr.compat + nr))
-                       return ERR_PTR(-EFAULT);
+                       return (const char __force_user *)ERR_PTR(-EFAULT);
 
                return compat_ptr(compat);
        }
 #endif
 
        if (get_user(native, argv.ptr.native + nr))
-               return ERR_PTR(-EFAULT);
+               return (const char __force_user *)ERR_PTR(-EFAULT);
 
        return native;
 }
@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
                        if (!p)
                                break;
 
-                       if (IS_ERR(p))
+                       if (IS_ERR((const char __force_kernel *)p))
                                return -EFAULT;
 
                        if (i >= max)
@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
 
                ret = -EFAULT;
                str = get_user_arg_ptr(argv, argc);
-               if (IS_ERR(str))
+               if (IS_ERR((const char __force_kernel *)str))
                        goto out;
 
                len = strnlen_user(str, MAX_ARG_STRLEN);
@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
        int r;
        mm_segment_t oldfs = get_fs();
        struct user_arg_ptr argv = {
-               .ptr.native = (const char __user *const  __user *)__argv,
+               .ptr.native = (const char __user * const __force_user *)__argv,
        };
 
        set_fs(KERNEL_DS);
@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        unsigned long new_end = old_end - shift;
        struct mmu_gather tlb;
 
-       BUG_ON(new_start > new_end);
+       if (new_start >= new_end || new_start < mmap_min_addr)
+               return -ENOMEM;
 
        /*
         * ensure there are no vmas between where we want to go
@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
        if (vma != find_vma(mm, new_start))
                return -EFAULT;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       BUG_ON(pax_find_mirror_vma(vma));
+#endif
+
        /*
         * cover the whole range: [new_start, old_end)
         */
@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
 
-       if (unlikely(stack_top < mmap_min_addr) ||
-           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
-               return -ENOMEM;
-
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
        bprm->exec -= stack_shift;
 
        down_write(&mm->mmap_sem);
+
+       /* Move stack pages down in memory. */
+       if (stack_shift) {
+               ret = shift_arg_pages(vma, stack_shift);
+               if (ret)
+                       goto out_unlock;
+       }
+
        vm_flags = VM_STACK_FLAGS;
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+       if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+               vm_flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+               if (mm->pax_flags & MF_PAX_MPROTECT)
+                       vm_flags &= ~VM_MAYEXEC;
+#endif
+
+       }
+#endif
+
        /*
         * Adjust stack execute permissions; explicitly enable for
         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
                goto out_unlock;
        BUG_ON(prev != vma);
 
-       /* Move stack pages down in memory. */
-       if (stack_shift) {
-               ret = shift_arg_pages(vma, stack_shift);
-               if (ret)
-                       goto out_unlock;
-       }
-
        /* mprotect_fixup is overkill to remove the temporary stack flags */
        vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 
@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #endif
        current->mm->start_stack = bprm->p;
        ret = expand_stack(vma, stack_base);
+
+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
+       if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
+               unsigned long size;
+               vm_flags_t vm_flags;
+
+               size = STACK_TOP - vma->vm_end;
+               vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
+
+               ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
+
+#ifdef CONFIG_X86
+               if (!ret) {
+                       size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
+                       ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
+               }
+#endif
+
+       }
+#endif
+
        if (ret)
                ret = -EFAULT;
 
@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
        if (err)
                goto exit;
 
-       if (name->name[0] != '\0')
+       if (name->name[0] != '\0') {
                fsnotify_open(file);
+               trace_open_exec(name->name);
+       }
 
 out:
        return file;
@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
        old_fs = get_fs();
        set_fs(get_ds());
        /* The cast to a user pointer is valid due to the set_fs() */
-       result = vfs_read(file, (void __user *)addr, count, &pos);
+       result = vfs_read(file, (void __force_user *)addr, count, &pos);
        set_fs(old_fs);
        return result;
 }
@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->mm = mm;
        tsk->active_mm = mm;
        activate_mm(active_mm, mm);
+       populate_stack();
        tsk->mm->vmacache_seqnum = 0;
        vmacache_flush(tsk);
        task_unlock(tsk);
@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        }
        rcu_read_unlock();
 
-       if (p->fs->users > n_fs)
+       if (atomic_read(&p->fs->users) > n_fs)
                bprm->unsafe |= LSM_UNSAFE_SHARE;
        else
                p->fs->in_exec = 1;
@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
        return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+static DEFINE_PER_CPU(u64, exec_counter);
+static int __init init_exec_counters(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               per_cpu(exec_counter, cpu) = (u64)cpu;
+       }
+
+       return 0;
+}
+early_initcall(init_exec_counters);
+static inline void increment_exec_counter(void)
+{
+       BUILD_BUG_ON(NR_CPUS > (1 << 16));
+       current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
+}
+#else
+static inline void increment_exec_counter(void) {}
+#endif
+
+extern void gr_handle_exec_args(struct linux_binprm *bprm,
+                               struct user_arg_ptr argv);
+
 /*
  * sys_execve() executes a new program.
  */
@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
                              struct user_arg_ptr envp,
                              int flags)
 {
+#ifdef CONFIG_GRKERNSEC
+       struct file *old_exec_file;
+       struct acl_subject_label *old_acl;
+       struct rlimit old_rlim[RLIM_NLIMITS];
+#endif
        char *pathbuf = NULL;
        struct linux_binprm *bprm;
        struct file *file;
@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
        if (IS_ERR(filename))
                return PTR_ERR(filename);
 
+       gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
+
        /*
         * We move the actual failure in case of RLIMIT_NPROC excess from
         * set*uid() to execve() because too many poorly written programs
@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
        if (IS_ERR(file))
                goto out_unmark;
 
+       if (gr_ptrace_readexec(file, bprm->unsafe)) {
+               retval = -EPERM;
+               goto out_unmark;
+       }
+
        sched_exec();
 
        bprm->file = file;
@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
        }
        bprm->interp = bprm->filename;
 
+       if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
+               retval = -EACCES;
+               goto out_unmark;
+       }
+
        retval = bprm_mm_init(bprm);
        if (retval)
                goto out_unmark;
@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
        if (retval < 0)
                goto out;
 
+#ifdef CONFIG_GRKERNSEC
+       old_acl = current->acl;
+       memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
+       old_exec_file = current->exec_file;
+       get_file(file);
+       current->exec_file = file;
+#endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       /* limit suid stack to 8MB
+        * we saved the old limits above and will restore them if this exec fails
+        */
+       if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
+           (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
+               current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
+#endif
+
+       if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
+               retval = -EPERM;
+               goto out_fail;
+       }
+
+       if (!gr_tpe_allow(file)) {
+               retval = -EACCES;
+               goto out_fail;
+       }
+
+       if (gr_check_crash_exec(file)) {
+               retval = -EACCES;
+               goto out_fail;
+       }
+
+       retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
+                                       bprm->unsafe);
+       if (retval < 0)
+               goto out_fail;
+
        retval = copy_strings_kernel(1, &bprm->filename, bprm);
        if (retval < 0)
-               goto out;
+               goto out_fail;
 
        bprm->exec = bprm->p;
        retval = copy_strings(bprm->envc, envp, bprm);
        if (retval < 0)
-               goto out;
+               goto out_fail;
 
        retval = copy_strings(bprm->argc, argv, bprm);
        if (retval < 0)
-               goto out;
+               goto out_fail;
+
+       gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
+
+       gr_handle_exec_args(bprm, argv);
 
        retval = exec_binprm(bprm);
        if (retval < 0)
-               goto out;
+               goto out_fail;
+#ifdef CONFIG_GRKERNSEC
+       if (old_exec_file)
+               fput(old_exec_file);
+#endif
 
        /* execve succeeded */
+
+       increment_exec_counter();
        current->fs->in_exec = 0;
        current->in_execve = 0;
        acct_update_integrals(current);
@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
                put_files_struct(displaced);
        return retval;
 
+out_fail:
+#ifdef CONFIG_GRKERNSEC
+       current->acl = old_acl;
+       memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
+       fput(current->exec_file);
+       current->exec_file = old_exec_file;
+#endif
+
 out:
        if (bprm->mm) {
                acct_arg_size(bprm, 0);
@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
                                  argv, envp, flags);
 }
 #endif
+
+int pax_check_flags(unsigned long *flags)
+{
+       int retval = 0;
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
+       if (*flags & MF_PAX_SEGMEXEC)
+       {
+               *flags &= ~MF_PAX_SEGMEXEC;
+       retval = -EINVAL;
+       }
+#endif
+
+       if ((*flags & MF_PAX_PAGEEXEC)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+           &&  (*flags & MF_PAX_SEGMEXEC)
+#endif
+
+          )
+       {
+               *flags &= ~MF_PAX_PAGEEXEC;
+               retval = -EINVAL;
+       }
+
+       if ((*flags & MF_PAX_MPROTECT)
+
+#ifdef CONFIG_PAX_MPROTECT
+           && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
+#endif
+
+          )
+       {
+               *flags &= ~MF_PAX_MPROTECT;
+       retval = -EINVAL;
+       }
+
+       if ((*flags & MF_PAX_EMUTRAMP)
+
+#ifdef CONFIG_PAX_EMUTRAMP
+           && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
+#endif
+
+          )
+       {
+               *flags &= ~MF_PAX_EMUTRAMP;
+               retval = -EINVAL;
+       }
+
+       return retval;
+}
+
+EXPORT_SYMBOL(pax_check_flags);
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+char *pax_get_path(const struct path *path, char *buf, int buflen)
+{
+       char *pathname = d_path(path, buf, buflen);
+
+       if (IS_ERR(pathname))
+               goto toolong;
+
+       pathname = mangle_path(buf, pathname, "\t\n\\");
+       if (!pathname)
+               goto toolong;
+
+       *pathname = 0;
+       return buf;
+
+toolong:
+       return "<path too long>";
+}
+EXPORT_SYMBOL(pax_get_path);
+
+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = current->mm;
+       char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
+       char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
+       char *path_exec = NULL;
+       char *path_fault = NULL;
+       unsigned long start = 0UL, end = 0UL, offset = 0UL;
+       siginfo_t info = { };
+
+       if (buffer_exec && buffer_fault) {
+               struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
+
+               down_read(&mm->mmap_sem);
+               vma = mm->mmap;
+               while (vma && (!vma_exec || !vma_fault)) {
+                       if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
+                               vma_exec = vma;
+                       if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
+                               vma_fault = vma;
+                       vma = vma->vm_next;
+               }
+               if (vma_exec)
+                       path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
+               if (vma_fault) {
+                       start = vma_fault->vm_start;
+                       end = vma_fault->vm_end;
+                       offset = vma_fault->vm_pgoff << PAGE_SHIFT;
+                       if (vma_fault->vm_file)
+                               path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
+                       else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
+                               path_fault = "<heap>";
+                       else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
+                               path_fault = "<stack>";
+                       else
+                               path_fault = "<anonymous mapping>";
+               }
+               up_read(&mm->mmap_sem);
+       }
+       if (tsk->signal->curr_ip)
+               printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
+       else
+               printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
+       printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
+                       from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
+       free_page((unsigned long)buffer_exec);
+       free_page((unsigned long)buffer_fault);
+       pax_report_insns(regs, pc, sp);
+       info.si_signo = SIGKILL;
+       info.si_errno = 0;
+       info.si_code = SI_KERNEL;
+       info.si_pid = 0;
+       info.si_uid = 0;
+       do_coredump(&info);
+}
+#endif
+
+#ifdef CONFIG_PAX_REFCOUNT
+void pax_report_refcount_overflow(struct pt_regs *regs)
+{
+       if (current->signal->curr_ip)
+               printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
+                               &current->signal->curr_ip, current->comm, task_pid_nr(current),
+                               from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
+       else
+               printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
+                               from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
+       print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
+       preempt_disable();
+       show_regs(regs);
+       preempt_enable();
+       force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
+}
+#endif
+
+#ifdef CONFIG_PAX_USERCOPY
+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+       const void * const stack = task_stack_page(current);
+       const void * const stackend = stack + THREAD_SIZE;
+
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
+       const void *frame = NULL;
+       const void *oldframe;
+#endif
+
+       if (obj + len < obj)
+               return -1;
+
+       if (obj + len <= stack || stackend <= obj)
+               return 0;
+
+       if (obj < stack || stackend < obj + len)
+               return -1;
+
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
+       oldframe = __builtin_frame_address(1);
+       if (oldframe)
+               frame = __builtin_frame_address(2);
+       /*
+         low ----------------------------------------------> high
+         [saved bp][saved ip][args][local vars][saved bp][saved ip]
+                             ^----------------^
+                         allow copies only within here
+       */
+       while (stack <= frame && frame < stackend) {
+               /* if obj + len extends past the last frame, this
+                  check won't pass and the next frame will be 0,
+                  causing us to bail out and correctly report
+                  the copy as invalid
+               */
+               if (obj + len <= frame)
+                       return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
+               oldframe = frame;
+               frame = *(const void * const *)frame;
+       }
+       return -1;
+#else
+       return 1;
+#endif
+}
+
+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
+{
+       if (current->signal->curr_ip)
+               printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+                       &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       else
+               printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+                       to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
+       dump_stack();
+       gr_handle_kernel_exploit();
+       do_group_exit(SIGKILL);
+}
+#endif
+
+#ifdef CONFIG_PAX_USERCOPY
+
+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
+{
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       unsigned long textlow = ktla_ktva((unsigned long)_stext);
+#ifdef CONFIG_MODULES
+       unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
+#else
+       unsigned long texthigh = ktla_ktva((unsigned long)_etext);
+#endif
+
+#else
+       unsigned long textlow = (unsigned long)_stext;
+       unsigned long texthigh = (unsigned long)_etext;
+
+#ifdef CONFIG_X86_64
+       /* check against linear mapping as well */
+       if (high > (unsigned long)__va(__pa(textlow)) &&
+           low < (unsigned long)__va(__pa(texthigh)))
+               return true;
+#endif
+
+#endif
+
+       if (high <= textlow || low >= texthigh)
+               return false;
+       else
+               return true;
+}
+#endif
+
+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
+{
+#ifdef CONFIG_PAX_USERCOPY
+       const char *type;
+#endif
+
+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
+       unsigned long stackstart = (unsigned long)task_stack_page(current);
+       unsigned long currentsp = (unsigned long)&stackstart;
+       if (unlikely((currentsp < stackstart + 512 ||
+                    currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
+               BUG();
+#endif
+
+#ifndef CONFIG_PAX_USERCOPY_DEBUG
+       if (const_size)
+               return;
+#endif
+
+#ifdef CONFIG_PAX_USERCOPY
+       if (!n)
+               return;
+
+       type = check_heap_object(ptr, n);
+       if (!type) {
+               int ret = check_stack_object(ptr, n);
+               if (ret == 1 || ret == 2)
+                       return;
+               if (ret == 0) {
+                       if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
+                               type = "<kernel text>";
+                       else
+                               return;
+               } else
+                       type = "<process stack>";
+       }
+
+       pax_report_usercopy(ptr, n, to_user, type);
+#endif
+
+}
+EXPORT_SYMBOL(__check_object_size);
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_track_stack(void)
+{
+       unsigned long sp = (unsigned long)&sp;
+       if (sp < current_thread_info()->lowest_stack &&
+           sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
+               current_thread_info()->lowest_stack = sp;
+       if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
+               BUG();
+}
+EXPORT_SYMBOL(pax_track_stack);
+#endif
+
+#ifdef CONFIG_PAX_SIZE_OVERFLOW
+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
+{
+       printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
+       dump_stack();
+       do_group_exit(SIGKILL);
+}
+EXPORT_SYMBOL(report_size_overflow);
+#endif
index 9f9992b37924a43eb4da2c56ecb9b65f68d628a2..8b5941103cca35793f44e402b609982b68ed9774 100644 (file)
@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
 
        free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
        root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-       if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+       if (free_blocks < root_blocks + 1 &&
                !uid_eq(sbi->s_resuid, current_fsuid()) &&
                (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-                !in_group_p (sbi->s_resgid))) {
+                !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
                return 0;
        }
        return 1;
index ae55fddc26a9d46f5ad7c3c2910150b9557ee235..5e64c2766dff339c369bdd81e91e337019ff4c44 100644 (file)
@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
 #ifdef CONFIG_EXT2_FS_XATTR
        if (test_opt(sb, XATTR_USER))
                seq_puts(seq, ",user_xattr");
-       if (!test_opt(sb, XATTR_USER) &&
-           (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
+       if (!test_opt(sb, XATTR_USER))
                seq_puts(seq, ",nouser_xattr");
-       }
 #endif
 
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        if (def_mount_opts & EXT2_DEFM_UID16)
                set_opt(sbi->s_mount_opt, NO_UID32);
 #ifdef CONFIG_EXT2_FS_XATTR
-       if (def_mount_opts & EXT2_DEFM_XATTR_USER)
-               set_opt(sbi->s_mount_opt, XATTR_USER);
+       /* always enable user xattrs */
+       set_opt(sbi->s_mount_opt, XATTR_USER);
 #endif
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
        if (def_mount_opts & EXT2_DEFM_ACL)
index 91426141c33a32a1bd37bb95fa2414da5bcbad92..97484facee75b5e35ffb55998281ced3ed393dd4 100644 (file)
@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
        struct buffer_head *bh = NULL;
        struct ext2_xattr_entry *entry;
        char *end;
-       size_t rest = buffer_size;
+       size_t rest = buffer_size, total_size = 0;
        int error;
 
        ea_idebug(inode, "buffer=%p, buffer_size=%ld",
@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
                                buffer += size;
                        }
                        rest -= size;
+                       total_size += size;
                }
        }
-       error = buffer_size - rest;  /* total size */
+       error = total_size;
 
 cleanup:
        brelse(bh);
index 158b5d4ce0670f31aff64638274b8ae6849655d4..2432610743f4c6035bda211d128acf3aea169f8e 100644 (file)
@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
 
        free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
        root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-       if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+       if (free_blocks < root_blocks + 1 &&
                !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
                (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-                !in_group_p (sbi->s_resgid))) {
+                !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
                return 0;
        }
        return 1;
index 9b4e7d750d4fdf23571e4d95a816349d78530856..048d025df90e883cdf3e01a6d78a687e092b6ace 100644 (file)
@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
 #ifdef CONFIG_EXT3_FS_XATTR
        if (test_opt(sb, XATTR_USER))
                seq_puts(seq, ",user_xattr");
-       if (!test_opt(sb, XATTR_USER) &&
-           (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
+       if (!test_opt(sb, XATTR_USER))
                seq_puts(seq, ",nouser_xattr");
-       }
 #endif
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
        if (test_opt(sb, POSIX_ACL))
@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
        if (def_mount_opts & EXT3_DEFM_UID16)
                set_opt(sbi->s_mount_opt, NO_UID32);
 #ifdef CONFIG_EXT3_FS_XATTR
-       if (def_mount_opts & EXT3_DEFM_XATTR_USER)
-               set_opt(sbi->s_mount_opt, XATTR_USER);
+       /* always enable user xattrs */
+       set_opt(sbi->s_mount_opt, XATTR_USER);
 #endif
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
        if (def_mount_opts & EXT3_DEFM_ACL)
index c6874be6d58b41f02bcaa3b93cf182845bd85bec..f8a6ae82213cda5fa7aaed03991e382b7ee33374 100644 (file)
@@ -330,7 +330,7 @@ static int
 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
                        char *buffer, size_t buffer_size)
 {
-       size_t rest = buffer_size;
+       size_t rest = buffer_size, total_size = 0;
 
        for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
                const struct xattr_handler *handler =
@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
                                buffer += size;
                        }
                        rest -= size;
+                       total_size += size;
                }
        }
-       return buffer_size - rest;
+       return total_size;
 }
 
 static int
index 83a6f497c4e0e6803345d4b69bde90504ffd02ce..d4e4d03bebaf0ddfb1a9d3d92fed1247f2411272 100644 (file)
@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
        /* Hm, nope.  Are (enough) root reserved clusters available? */
        if (uid_eq(sbi->s_resuid, current_fsuid()) ||
            (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
-           capable(CAP_SYS_RESOURCE) ||
-           (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
+           (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
+           capable_nolog(CAP_SYS_RESOURCE)) {
 
                if (free_clusters >= (nclusters + dirty_clusters +
                                      resv_clusters))
index a75fba67bb1f197ba83143484a2a93de0b0dabe9..8235fcacaf427cc47d5db735e2a3aec8ecb1d712 100644 (file)
@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
        unsigned long s_mb_last_start;
 
        /* stats for buddy allocator */
-       atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
-       atomic_t s_bal_success; /* we found long enough chunks */
-       atomic_t s_bal_allocated;       /* in blocks */
-       atomic_t s_bal_ex_scanned;      /* total extents scanned */
-       atomic_t s_bal_goals;   /* goal hits */
-       atomic_t s_bal_breaks;  /* too long searches */
-       atomic_t s_bal_2orders; /* 2^order hits */
+       atomic_unchecked_t s_bal_reqs;  /* number of reqs with len > 1 */
+       atomic_unchecked_t s_bal_success;       /* we found long enough chunks */
+       atomic_unchecked_t s_bal_allocated;     /* in blocks */
+       atomic_unchecked_t s_bal_ex_scanned;    /* total extents scanned */
+       atomic_unchecked_t s_bal_goals; /* goal hits */
+       atomic_unchecked_t s_bal_breaks;        /* too long searches */
+       atomic_unchecked_t s_bal_2orders;       /* 2^order hits */
        spinlock_t s_bal_lock;
        unsigned long s_mb_buddies_generated;
        unsigned long long s_mb_generation_time;
-       atomic_t s_mb_lost_chunks;
-       atomic_t s_mb_preallocated;
-       atomic_t s_mb_discarded;
+       atomic_unchecked_t s_mb_lost_chunks;
+       atomic_unchecked_t s_mb_preallocated;
+       atomic_unchecked_t s_mb_discarded;
        atomic_t s_lock_busy;
 
        /* locality groups */
index 8d1e60214ef0a88af4ef88ee82db2b79f29382d4..abf497b5c5a1898fde034c0e1b15b645fc3f3430 100644 (file)
@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
                BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
 
                if (EXT4_SB(sb)->s_mb_stats)
-                       atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
+                       atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
 
                break;
        }
@@ -2211,7 +2211,7 @@ repeat:
                        ac->ac_status = AC_STATUS_CONTINUE;
                        ac->ac_flags |= EXT4_MB_HINT_FIRST;
                        cr = 3;
-                       atomic_inc(&sbi->s_mb_lost_chunks);
+                       atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
                        goto repeat;
                }
        }
@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
        if (sbi->s_mb_stats) {
                ext4_msg(sb, KERN_INFO,
                       "mballoc: %u blocks %u reqs (%u success)",
-                               atomic_read(&sbi->s_bal_allocated),
-                               atomic_read(&sbi->s_bal_reqs),
-                               atomic_read(&sbi->s_bal_success));
+                               atomic_read_unchecked(&sbi->s_bal_allocated),
+                               atomic_read_unchecked(&sbi->s_bal_reqs),
+                               atomic_read_unchecked(&sbi->s_bal_success));
                ext4_msg(sb, KERN_INFO,
                      "mballoc: %u extents scanned, %u goal hits, "
                                "%u 2^N hits, %u breaks, %u lost",
-                               atomic_read(&sbi->s_bal_ex_scanned),
-                               atomic_read(&sbi->s_bal_goals),
-                               atomic_read(&sbi->s_bal_2orders),
-                               atomic_read(&sbi->s_bal_breaks),
-                               atomic_read(&sbi->s_mb_lost_chunks));
+                               atomic_read_unchecked(&sbi->s_bal_ex_scanned),
+                               atomic_read_unchecked(&sbi->s_bal_goals),
+                               atomic_read_unchecked(&sbi->s_bal_2orders),
+                               atomic_read_unchecked(&sbi->s_bal_breaks),
+                               atomic_read_unchecked(&sbi->s_mb_lost_chunks));
                ext4_msg(sb, KERN_INFO,
                       "mballoc: %lu generated and it took %Lu",
                                sbi->s_mb_buddies_generated,
                                sbi->s_mb_generation_time);
                ext4_msg(sb, KERN_INFO,
                       "mballoc: %u preallocated, %u discarded",
-                               atomic_read(&sbi->s_mb_preallocated),
-                               atomic_read(&sbi->s_mb_discarded));
+                               atomic_read_unchecked(&sbi->s_mb_preallocated),
+                               atomic_read_unchecked(&sbi->s_mb_discarded));
        }
 
        free_percpu(sbi->s_locality_groups);
@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 
        if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
-               atomic_inc(&sbi->s_bal_reqs);
-               atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+               atomic_inc_unchecked(&sbi->s_bal_reqs);
+               atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
                if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
-                       atomic_inc(&sbi->s_bal_success);
-               atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
+                       atomic_inc_unchecked(&sbi->s_bal_success);
+               atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
                if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
                                ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-                       atomic_inc(&sbi->s_bal_goals);
+                       atomic_inc_unchecked(&sbi->s_bal_goals);
                if (ac->ac_found > sbi->s_mb_max_to_scan)
-                       atomic_inc(&sbi->s_bal_breaks);
+                       atomic_inc_unchecked(&sbi->s_bal_breaks);
        }
 
        if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
        trace_ext4_mb_new_inode_pa(ac, pa);
 
        ext4_mb_use_inode_pa(ac, pa);
-       atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
+       atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
 
        ei = EXT4_I(ac->ac_inode);
        grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
        trace_ext4_mb_new_group_pa(ac, pa);
 
        ext4_mb_use_group_pa(ac, pa);
-       atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+       atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
 
        grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
        lg = ac->ac_lg;
@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                 * from the bitmap and continue.
                 */
        }
-       atomic_add(free, &sbi->s_mb_discarded);
+       atomic_add_unchecked(free, &sbi->s_mb_discarded);
 
        return err;
 }
@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
        mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
-       atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+       atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
        trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
 
        return 0;
index 8313ca3324ec96a1c3413d0041817a5f1726e4f4..8a37d08a30e374cb8fa62d066fdab468af85ea00 100644 (file)
@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
                    const char *function, unsigned int line, const char *msg)
 {
-       __ext4_warning(sb, function, line, msg);
+       __ext4_warning(sb, function, line, "%s", msg);
        __ext4_warning(sb, function, line,
                       "MMP failure info: last update time: %llu, last update "
                       "node: %s, last update device: %s\n",
index 8a8ec6293b195f16623e716342463979427b3156..1b02de57e0603ee3e6b11e4b82a43b8da7721974 100644 (file)
@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
 
        ext4_debug("mark blocks [%llu/%u] used\n", block, count);
        for (count2 = count; count > 0; count -= count2, block += count2) {
-               ext4_fsblk_t start;
+               ext4_fsblk_t start, diff;
                struct buffer_head *bh;
                ext4_group_t group;
                int err;
@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
                start = ext4_group_first_block_no(sb, group);
                group -= flex_gd->groups[0].group;
 
-               count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
-               if (count2 > count)
-                       count2 = count;
-
                if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
                        BUG_ON(flex_gd->count > 1);
                        continue;
@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
                err = ext4_journal_get_write_access(handle, bh);
                if (err)
                        return err;
+
+               diff = block - start;
+               count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
+               if (count2 > count)
+                       count2 = count;
+
                ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
-                          block - start, count2);
-               ext4_set_bits(bh->b_data, block - start, count2);
+                          diff, count2);
+               ext4_set_bits(bh->b_data, diff, count2);
 
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
                if (unlikely(err))
index fc29b2c91bef37c5d75256892b60f5fe1f7854b3..6c8b255fecf7ac733274b569abf1adbbfbf57c1c 100644 (file)
@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
 }
 
 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
        "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
 
 #ifdef CONFIG_QUOTA
@@ -2440,7 +2440,7 @@ struct ext4_attr {
                int offset;
                int deprecated_val;
        } u;
-};
+} __do_const;
 
 static int parse_strtoull(const char *buf,
                unsigned long long max, unsigned long long *value)
index 1e09fc77395ce0c7cc20496161f09cf0d6eefdbd..0400dd463b654a6b73287aef648995b315c88435 100644 (file)
@@ -399,7 +399,7 @@ static int
 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
                        char *buffer, size_t buffer_size)
 {
-       size_t rest = buffer_size;
+       size_t rest = buffer_size, total_size = 0;
 
        for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
                const struct xattr_handler *handler =
@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
                                buffer += size;
                        }
                        rest -= size;
+                       total_size += size;
                }
        }
-       return buffer_size - rest;
+       return total_size;
 }
 
 static int
index ee85cd4e136abbff33409fb018343028d21578e2..9dd0d20677ca0c5e54dbe616099aba9505319d93 100644 (file)
@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
                int force)
 {
        security_file_set_fowner(filp);
+       if (gr_handle_chroot_fowner(pid, type))
+               return;
+       if (gr_check_protected_task_fowner(pid, type))
+               return;
        f_modown(filp, pid, type, force);
 }
 EXPORT_SYMBOL(__f_setown);
index 999ff5c3cab0edacd585447132180d5c35554e3c..2281df9f11b17425c0ae0c74d6ec26adb32b6645 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/fs_struct.h>
 #include <linux/fsnotify.h>
 #include <linux/personality.h>
+#include <linux/grsecurity.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 #include "mount.h"
@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
        } else
                retval = 0;
        /* copy the mount id */
-       if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
-                        sizeof(*mnt_id)) ||
+       if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
            copy_to_user(ufh, handle,
                         sizeof(struct file_handle) + handle_bytes))
                retval = -EFAULT;
@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
         * the directory. Ideally we would like CAP_DAC_SEARCH.
         * But we don't have that
         */
-       if (!capable(CAP_DAC_READ_SEARCH)) {
+       if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
                retval = -EPERM;
                goto out_err;
        }
@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
                goto out_err;
        }
        /* copy the full handle */
-       if (copy_from_user(handle, ufh,
-                          sizeof(struct file_handle) +
+       *handle = f_handle;
+       if (copy_from_user(&handle->f_handle,
+                          &ufh->f_handle,
                           f_handle.handle_bytes)) {
                retval = -EFAULT;
                goto out_handle;
index ee738ea028fadab5d742445c6e48c151cb53f612..f6c156297c86c6c88100be3be045385fa6d9bea6 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/file.h>
+#include <linux/security.h>
 #include <linux/fdtable.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
@@ -139,7 +140,7 @@ out:
  * Return <0 error code on error; 1 on successful completion.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_fdtable(struct files_struct *files, int nr)
+static int expand_fdtable(struct files_struct *files, unsigned int nr)
        __releases(files->file_lock)
        __acquires(files->file_lock)
 {
@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
  * expanded and execution may have blocked.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_files(struct files_struct *files, int nr)
+static int expand_files(struct files_struct *files, unsigned int nr)
 {
        struct fdtable *fdt;
 
@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
        if (!file)
                return __close_fd(files, fd);
 
+       gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
        if (fd >= rlimit(RLIMIT_NOFILE))
                return -EBADF;
 
@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
        if (unlikely(oldfd == newfd))
                return -EINVAL;
 
+       gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
        if (newfd >= rlimit(RLIMIT_NOFILE))
                return -EBADF;
 
@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
 {
        int err;
+       gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
        if (from >= rlimit(RLIMIT_NOFILE))
                return -EINVAL;
        err = alloc_fd(from, flags);
index 5797d45a78cb29fddcff617a8fc03139ed609369..7d7d79a1ed3872d79bd55c417d73ca6a0342d722 100644 (file)
@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
        int len = dot ? dot - name : strlen(name);
 
        fs = __get_fs_type(name, len);
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
+#else
        if (!fs && (request_module("fs-%.*s", len, name) == 0))
+#endif
                fs = __get_fs_type(name, len);
 
        if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
index 7dca743b2ce1c8796155a14c3d3a83023eed3de4..2f2786d64c86df2baf3fc8c63bba34f3741c558f 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/path.h>
 #include <linux/slab.h>
 #include <linux/fs_struct.h>
+#include <linux/grsecurity.h>
 #include "internal.h"
 
 /*
@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
        struct path old_root;
 
        path_get(path);
+       gr_inc_chroot_refcnts(path->dentry, path->mnt);
        spin_lock(&fs->lock);
        write_seqcount_begin(&fs->seq);
        old_root = fs->root;
        fs->root = *path;
+       gr_set_chroot_entries(current, path);
        write_seqcount_end(&fs->seq);
        spin_unlock(&fs->lock);
-       if (old_root.dentry)
+       if (old_root.dentry) {
+               gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
                path_put(&old_root);
+       }
 }
 
 /*
@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
                        int hits = 0;
                        spin_lock(&fs->lock);
                        write_seqcount_begin(&fs->seq);
+                       /* this root replacement is only done by pivot_root,
+                          leave grsec's chroot tagging alone for this task
+                          so that a pivoted root isn't treated as a chroot
+                       */
                        hits += replace_path(&fs->root, old_root, new_root);
                        hits += replace_path(&fs->pwd, old_root, new_root);
                        write_seqcount_end(&fs->seq);
@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
 
 void free_fs_struct(struct fs_struct *fs)
 {
+       gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
        path_put(&fs->root);
        path_put(&fs->pwd);
        kmem_cache_free(fs_cachep, fs);
@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
                task_lock(tsk);
                spin_lock(&fs->lock);
                tsk->fs = NULL;
-               kill = !--fs->users;
+               gr_clear_chroot_entries(tsk);
+               kill = !atomic_dec_return(&fs->users);
                spin_unlock(&fs->lock);
                task_unlock(tsk);
                if (kill)
@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
        struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
        /* We don't need to lock fs - think why ;-) */
        if (fs) {
-               fs->users = 1;
+               atomic_set(&fs->users, 1);
                fs->in_exec = 0;
                spin_lock_init(&fs->lock);
                seqcount_init(&fs->seq);
@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
                spin_lock(&old->lock);
                fs->root = old->root;
                path_get(&fs->root);
+               /* instead of calling gr_set_chroot_entries here,
+                  we call it from every caller of this function
+               */
                fs->pwd = old->pwd;
                path_get(&fs->pwd);
                spin_unlock(&old->lock);
@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
 
        task_lock(current);
        spin_lock(&fs->lock);
-       kill = !--fs->users;
+       kill = !atomic_dec_return(&fs->users);
        current->fs = new_fs;
+       gr_set_chroot_entries(current, &new_fs->root);
        spin_unlock(&fs->lock);
        task_unlock(current);
 
@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
 
 int current_umask(void)
 {
-       return current->fs->umask;
+       return current->fs->umask | gr_acl_umask();
 }
 EXPORT_SYMBOL(current_umask);
 
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
-       .users          = 1,
+       .users          = ATOMIC_INIT(1),
        .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
        .seq            = SEQCNT_ZERO(init_fs.seq),
        .umask          = 0022,
index 89acec742e0bfdb2b1aab30e31991655462f05d9..a575262e91204754aa5f3198a3cd1b996983f1d8 100644 (file)
@@ -19,7 +19,7 @@
 
 struct kmem_cache *fscache_cookie_jar;
 
-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
 
 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
 static int fscache_alloc_object(struct fscache_cache *cache,
@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
               parent ? (char *) parent->def->name : "<no-parent>",
               def->name, netfs_data, enable);
 
-       fscache_stat(&fscache_n_acquires);
+       fscache_stat_unchecked(&fscache_n_acquires);
 
        /* if there's no parent cookie, then we don't create one here either */
        if (!parent) {
-               fscache_stat(&fscache_n_acquires_null);
+               fscache_stat_unchecked(&fscache_n_acquires_null);
                _leave(" [no parent]");
                return NULL;
        }
@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
        /* allocate and initialise a cookie */
        cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
        if (!cookie) {
-               fscache_stat(&fscache_n_acquires_oom);
+               fscache_stat_unchecked(&fscache_n_acquires_oom);
                _leave(" [ENOMEM]");
                return NULL;
        }
@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
 
        switch (cookie->def->type) {
        case FSCACHE_COOKIE_TYPE_INDEX:
-               fscache_stat(&fscache_n_cookie_index);
+               fscache_stat_unchecked(&fscache_n_cookie_index);
                break;
        case FSCACHE_COOKIE_TYPE_DATAFILE:
-               fscache_stat(&fscache_n_cookie_data);
+               fscache_stat_unchecked(&fscache_n_cookie_data);
                break;
        default:
-               fscache_stat(&fscache_n_cookie_special);
+               fscache_stat_unchecked(&fscache_n_cookie_special);
                break;
        }
 
@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
                        } else {
                                atomic_dec(&parent->n_children);
                                __fscache_cookie_put(cookie);
-                               fscache_stat(&fscache_n_acquires_nobufs);
+                               fscache_stat_unchecked(&fscache_n_acquires_nobufs);
                                _leave(" = NULL");
                                return NULL;
                        }
@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
                }
        }
 
-       fscache_stat(&fscache_n_acquires_ok);
+       fscache_stat_unchecked(&fscache_n_acquires_ok);
        _leave(" = %p", cookie);
        return cookie;
 }
@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
        cache = fscache_select_cache_for_object(cookie->parent);
        if (!cache) {
                up_read(&fscache_addremove_sem);
-               fscache_stat(&fscache_n_acquires_no_cache);
+               fscache_stat_unchecked(&fscache_n_acquires_no_cache);
                _leave(" = -ENOMEDIUM [no cache]");
                return -ENOMEDIUM;
        }
@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
        object = cache->ops->alloc_object(cache, cookie);
        fscache_stat_d(&fscache_n_cop_alloc_object);
        if (IS_ERR(object)) {
-               fscache_stat(&fscache_n_object_no_alloc);
+               fscache_stat_unchecked(&fscache_n_object_no_alloc);
                ret = PTR_ERR(object);
                goto error;
        }
 
-       fscache_stat(&fscache_n_object_alloc);
+       fscache_stat_unchecked(&fscache_n_object_alloc);
 
-       object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+       object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
 
        _debug("ALLOC OBJ%x: %s {%lx}",
               object->debug_id, cookie->def->name, object->events);
@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
 
        _enter("{%s}", cookie->def->name);
 
-       fscache_stat(&fscache_n_invalidates);
+       fscache_stat_unchecked(&fscache_n_invalidates);
 
        /* Only permit invalidation of data files.  Invalidating an index will
         * require the caller to release all its attachments to the tree rooted
@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
 {
        struct fscache_object *object;
 
-       fscache_stat(&fscache_n_updates);
+       fscache_stat_unchecked(&fscache_n_updates);
 
        if (!cookie) {
-               fscache_stat(&fscache_n_updates_null);
+               fscache_stat_unchecked(&fscache_n_updates_null);
                _leave(" [no cookie]");
                return;
        }
@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
  */
 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 {
-       fscache_stat(&fscache_n_relinquishes);
+       fscache_stat_unchecked(&fscache_n_relinquishes);
        if (retire)
-               fscache_stat(&fscache_n_relinquishes_retire);
+               fscache_stat_unchecked(&fscache_n_relinquishes_retire);
 
        if (!cookie) {
-               fscache_stat(&fscache_n_relinquishes_null);
+               fscache_stat_unchecked(&fscache_n_relinquishes_null);
                _leave(" [no cookie]");
                return;
        }
@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
        if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
                goto inconsistent;
 
-       op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+       op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
 
        __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, op) < 0)
index 7872a62ef30c1e019b8cc111143b722394a8b23f..d91b19fd63d80ea50cb09174ddef6603df28a873 100644 (file)
@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
 extern int fscache_wait_for_operation_activation(struct fscache_object *,
                                                 struct fscache_operation *,
-                                                atomic_t *,
-                                                atomic_t *,
+                                                atomic_unchecked_t *,
+                                                atomic_unchecked_t *,
                                                 void (*)(struct fscache_operation *));
 extern void fscache_invalidate_writes(struct fscache_cookie *);
 
@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
  * stats.c
  */
 #ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
-
-extern atomic_t fscache_n_op_pend;
-extern atomic_t fscache_n_op_run;
-extern atomic_t fscache_n_op_enqueue;
-extern atomic_t fscache_n_op_deferred_release;
-extern atomic_t fscache_n_op_release;
-extern atomic_t fscache_n_op_gc;
-extern atomic_t fscache_n_op_cancelled;
-extern atomic_t fscache_n_op_rejected;
-
-extern atomic_t fscache_n_attr_changed;
-extern atomic_t fscache_n_attr_changed_ok;
-extern atomic_t fscache_n_attr_changed_nobufs;
-extern atomic_t fscache_n_attr_changed_nomem;
-extern atomic_t fscache_n_attr_changed_calls;
-
-extern atomic_t fscache_n_allocs;
-extern atomic_t fscache_n_allocs_ok;
-extern atomic_t fscache_n_allocs_wait;
-extern atomic_t fscache_n_allocs_nobufs;
-extern atomic_t fscache_n_allocs_intr;
-extern atomic_t fscache_n_allocs_object_dead;
-extern atomic_t fscache_n_alloc_ops;
-extern atomic_t fscache_n_alloc_op_waits;
-
-extern atomic_t fscache_n_retrievals;
-extern atomic_t fscache_n_retrievals_ok;
-extern atomic_t fscache_n_retrievals_wait;
-extern atomic_t fscache_n_retrievals_nodata;
-extern atomic_t fscache_n_retrievals_nobufs;
-extern atomic_t fscache_n_retrievals_intr;
-extern atomic_t fscache_n_retrievals_nomem;
-extern atomic_t fscache_n_retrievals_object_dead;
-extern atomic_t fscache_n_retrieval_ops;
-extern atomic_t fscache_n_retrieval_op_waits;
-
-extern atomic_t fscache_n_stores;
-extern atomic_t fscache_n_stores_ok;
-extern atomic_t fscache_n_stores_again;
-extern atomic_t fscache_n_stores_nobufs;
-extern atomic_t fscache_n_stores_oom;
-extern atomic_t fscache_n_store_ops;
-extern atomic_t fscache_n_store_calls;
-extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
-extern atomic_t fscache_n_store_pages_over_limit;
-
-extern atomic_t fscache_n_store_vmscan_not_storing;
-extern atomic_t fscache_n_store_vmscan_gone;
-extern atomic_t fscache_n_store_vmscan_busy;
-extern atomic_t fscache_n_store_vmscan_cancelled;
-extern atomic_t fscache_n_store_vmscan_wait;
-
-extern atomic_t fscache_n_marks;
-extern atomic_t fscache_n_uncaches;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_null;
-extern atomic_t fscache_n_acquires_no_cache;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_nobufs;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-extern atomic_t fscache_n_invalidates_run;
-
-extern atomic_t fscache_n_updates;
-extern atomic_t fscache_n_updates_null;
-extern atomic_t fscache_n_updates_run;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_null;
-extern atomic_t fscache_n_relinquishes_waitcrt;
-extern atomic_t fscache_n_relinquishes_retire;
-
-extern atomic_t fscache_n_cookie_index;
-extern atomic_t fscache_n_cookie_data;
-extern atomic_t fscache_n_cookie_special;
-
-extern atomic_t fscache_n_object_alloc;
-extern atomic_t fscache_n_object_no_alloc;
-extern atomic_t fscache_n_object_lookups;
-extern atomic_t fscache_n_object_lookups_negative;
-extern atomic_t fscache_n_object_lookups_positive;
-extern atomic_t fscache_n_object_lookups_timed_out;
-extern atomic_t fscache_n_object_created;
-extern atomic_t fscache_n_object_avail;
-extern atomic_t fscache_n_object_dead;
-
-extern atomic_t fscache_n_checkaux_none;
-extern atomic_t fscache_n_checkaux_okay;
-extern atomic_t fscache_n_checkaux_update;
-extern atomic_t fscache_n_checkaux_obsolete;
+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+
+extern atomic_unchecked_t fscache_n_op_pend;
+extern atomic_unchecked_t fscache_n_op_run;
+extern atomic_unchecked_t fscache_n_op_enqueue;
+extern atomic_unchecked_t fscache_n_op_deferred_release;
+extern atomic_unchecked_t fscache_n_op_release;
+extern atomic_unchecked_t fscache_n_op_gc;
+extern atomic_unchecked_t fscache_n_op_cancelled;
+extern atomic_unchecked_t fscache_n_op_rejected;
+
+extern atomic_unchecked_t fscache_n_attr_changed;
+extern atomic_unchecked_t fscache_n_attr_changed_ok;
+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
+extern atomic_unchecked_t fscache_n_attr_changed_calls;
+
+extern atomic_unchecked_t fscache_n_allocs;
+extern atomic_unchecked_t fscache_n_allocs_ok;
+extern atomic_unchecked_t fscache_n_allocs_wait;
+extern atomic_unchecked_t fscache_n_allocs_nobufs;
+extern atomic_unchecked_t fscache_n_allocs_intr;
+extern atomic_unchecked_t fscache_n_allocs_object_dead;
+extern atomic_unchecked_t fscache_n_alloc_ops;
+extern atomic_unchecked_t fscache_n_alloc_op_waits;
+
+extern atomic_unchecked_t fscache_n_retrievals;
+extern atomic_unchecked_t fscache_n_retrievals_ok;
+extern atomic_unchecked_t fscache_n_retrievals_wait;
+extern atomic_unchecked_t fscache_n_retrievals_nodata;
+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
+extern atomic_unchecked_t fscache_n_retrievals_intr;
+extern atomic_unchecked_t fscache_n_retrievals_nomem;
+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
+extern atomic_unchecked_t fscache_n_retrieval_ops;
+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+extern atomic_unchecked_t fscache_n_stores;
+extern atomic_unchecked_t fscache_n_stores_ok;
+extern atomic_unchecked_t fscache_n_stores_again;
+extern atomic_unchecked_t fscache_n_stores_nobufs;
+extern atomic_unchecked_t fscache_n_stores_oom;
+extern atomic_unchecked_t fscache_n_store_ops;
+extern atomic_unchecked_t fscache_n_store_calls;
+extern atomic_unchecked_t fscache_n_store_pages;
+extern atomic_unchecked_t fscache_n_store_radix_deletes;
+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
+
+extern atomic_unchecked_t fscache_n_marks;
+extern atomic_unchecked_t fscache_n_uncaches;
+
+extern atomic_unchecked_t fscache_n_acquires;
+extern atomic_unchecked_t fscache_n_acquires_null;
+extern atomic_unchecked_t fscache_n_acquires_no_cache;
+extern atomic_unchecked_t fscache_n_acquires_ok;
+extern atomic_unchecked_t fscache_n_acquires_nobufs;
+extern atomic_unchecked_t fscache_n_acquires_oom;
+
+extern atomic_unchecked_t fscache_n_invalidates;
+extern atomic_unchecked_t fscache_n_invalidates_run;
+
+extern atomic_unchecked_t fscache_n_updates;
+extern atomic_unchecked_t fscache_n_updates_null;
+extern atomic_unchecked_t fscache_n_updates_run;
+
+extern atomic_unchecked_t fscache_n_relinquishes;
+extern atomic_unchecked_t fscache_n_relinquishes_null;
+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
+extern atomic_unchecked_t fscache_n_relinquishes_retire;
+
+extern atomic_unchecked_t fscache_n_cookie_index;
+extern atomic_unchecked_t fscache_n_cookie_data;
+extern atomic_unchecked_t fscache_n_cookie_special;
+
+extern atomic_unchecked_t fscache_n_object_alloc;
+extern atomic_unchecked_t fscache_n_object_no_alloc;
+extern atomic_unchecked_t fscache_n_object_lookups;
+extern atomic_unchecked_t fscache_n_object_lookups_negative;
+extern atomic_unchecked_t fscache_n_object_lookups_positive;
+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
+extern atomic_unchecked_t fscache_n_object_created;
+extern atomic_unchecked_t fscache_n_object_avail;
+extern atomic_unchecked_t fscache_n_object_dead;
+
+extern atomic_unchecked_t fscache_n_checkaux_none;
+extern atomic_unchecked_t fscache_n_checkaux_okay;
+extern atomic_unchecked_t fscache_n_checkaux_update;
+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
 
 extern atomic_t fscache_n_cop_alloc_object;
 extern atomic_t fscache_n_cop_lookup_object;
@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
        atomic_inc(stat);
 }
 
+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
+{
+       atomic_inc_unchecked(stat);
+}
+
 static inline void fscache_stat_d(atomic_t *stat)
 {
        atomic_dec(stat);
@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
 
 #define __fscache_stat(stat) (NULL)
 #define fscache_stat(stat) do {} while (0)
+#define fscache_stat_unchecked(stat) do {} while (0)
 #define fscache_stat_d(stat) do {} while (0)
 #endif
 
index da032daf0e0d7562f40e5a050685829af68869ac..0076ce722a4cf6519ff3a4e461080458d2ca7702 100644 (file)
@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
        _debug("LOOKUP \"%s\" in \"%s\"",
               cookie->def->name, object->cache->tag->name);
 
-       fscache_stat(&fscache_n_object_lookups);
+       fscache_stat_unchecked(&fscache_n_object_lookups);
        fscache_stat(&fscache_n_cop_lookup_object);
        ret = object->cache->ops->lookup_object(object);
        fscache_stat_d(&fscache_n_cop_lookup_object);
@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
        if (ret == -ETIMEDOUT) {
                /* probably stuck behind another object, so move this one to
                 * the back of the queue */
-               fscache_stat(&fscache_n_object_lookups_timed_out);
+               fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
                _leave(" [timeout]");
                return NO_TRANSIT;
        }
@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
        _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
 
        if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
-               fscache_stat(&fscache_n_object_lookups_negative);
+               fscache_stat_unchecked(&fscache_n_object_lookups_negative);
 
                /* Allow write requests to begin stacking up and read requests to begin
                 * returning ENODATA.
@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
        /* if we were still looking up, then we must have a positive lookup
         * result, in which case there may be data available */
        if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
-               fscache_stat(&fscache_n_object_lookups_positive);
+               fscache_stat_unchecked(&fscache_n_object_lookups_positive);
 
                /* We do (presumably) have data */
                clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
                clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
                wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
        } else {
-               fscache_stat(&fscache_n_object_created);
+               fscache_stat_unchecked(&fscache_n_object_created);
        }
 
        set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
        fscache_stat_d(&fscache_n_cop_lookup_complete);
 
        fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
-       fscache_stat(&fscache_n_object_avail);
+       fscache_stat_unchecked(&fscache_n_object_avail);
 
        _leave("");
        return transit_to(JUMPSTART_DEPS);
@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
 
        /* this just shifts the object release to the work processor */
        fscache_put_object(object);
-       fscache_stat(&fscache_n_object_dead);
+       fscache_stat_unchecked(&fscache_n_object_dead);
 
        _leave("");
        return transit_to(OBJECT_DEAD);
@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
        enum fscache_checkaux result;
 
        if (!object->cookie->def->check_aux) {
-               fscache_stat(&fscache_n_checkaux_none);
+               fscache_stat_unchecked(&fscache_n_checkaux_none);
                return FSCACHE_CHECKAUX_OKAY;
        }
 
@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
        switch (result) {
                /* entry okay as is */
        case FSCACHE_CHECKAUX_OKAY:
-               fscache_stat(&fscache_n_checkaux_okay);
+               fscache_stat_unchecked(&fscache_n_checkaux_okay);
                break;
 
                /* entry requires update */
        case FSCACHE_CHECKAUX_NEEDS_UPDATE:
-               fscache_stat(&fscache_n_checkaux_update);
+               fscache_stat_unchecked(&fscache_n_checkaux_update);
                break;
 
                /* entry requires deletion */
        case FSCACHE_CHECKAUX_OBSOLETE:
-               fscache_stat(&fscache_n_checkaux_obsolete);
+               fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
                break;
 
        default:
@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
 {
        const struct fscache_state *s;
 
-       fscache_stat(&fscache_n_invalidates_run);
+       fscache_stat_unchecked(&fscache_n_invalidates_run);
        fscache_stat(&fscache_n_cop_invalidate_object);
        s = _fscache_invalidate_object(object, event);
        fscache_stat_d(&fscache_n_cop_invalidate_object);
@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
 {
        _enter("{OBJ%x},%d", object->debug_id, event);
 
-       fscache_stat(&fscache_n_updates_run);
+       fscache_stat_unchecked(&fscache_n_updates_run);
        fscache_stat(&fscache_n_cop_update_object);
        object->cache->ops->update_object(object);
        fscache_stat_d(&fscache_n_cop_update_object);
index e7b87a0e5185b53ac304d50b46daf14d8149356c..a85d47a74d3373f22d4d26048cc573a6019077c2 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/slab.h>
 #include "internal.h"
 
-atomic_t fscache_op_debug_id;
+atomic_unchecked_t fscache_op_debug_id;
 EXPORT_SYMBOL(fscache_op_debug_id);
 
 /**
@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
        ASSERTCMP(atomic_read(&op->usage), >, 0);
        ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
 
-       fscache_stat(&fscache_n_op_enqueue);
+       fscache_stat_unchecked(&fscache_n_op_enqueue);
        switch (op->flags & FSCACHE_OP_TYPE) {
        case FSCACHE_OP_ASYNC:
                _debug("queue async");
@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
                wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
        if (op->processor)
                fscache_enqueue_operation(op);
-       fscache_stat(&fscache_n_op_run);
+       fscache_stat_unchecked(&fscache_n_op_run);
 }
 
 /*
@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
                if (object->n_in_progress > 0) {
                        atomic_inc(&op->usage);
                        list_add_tail(&op->pend_link, &object->pending_ops);
-                       fscache_stat(&fscache_n_op_pend);
+                       fscache_stat_unchecked(&fscache_n_op_pend);
                } else if (!list_empty(&object->pending_ops)) {
                        atomic_inc(&op->usage);
                        list_add_tail(&op->pend_link, &object->pending_ops);
-                       fscache_stat(&fscache_n_op_pend);
+                       fscache_stat_unchecked(&fscache_n_op_pend);
                        fscache_start_operations(object);
                } else {
                        ASSERTCMP(object->n_in_progress, ==, 0);
@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
                object->n_exclusive++;  /* reads and writes must wait */
                atomic_inc(&op->usage);
                list_add_tail(&op->pend_link, &object->pending_ops);
-               fscache_stat(&fscache_n_op_pend);
+               fscache_stat_unchecked(&fscache_n_op_pend);
                ret = 0;
        } else {
                /* If we're in any other state, there must have been an I/O
@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
                if (object->n_exclusive > 0) {
                        atomic_inc(&op->usage);
                        list_add_tail(&op->pend_link, &object->pending_ops);
-                       fscache_stat(&fscache_n_op_pend);
+                       fscache_stat_unchecked(&fscache_n_op_pend);
                } else if (!list_empty(&object->pending_ops)) {
                        atomic_inc(&op->usage);
                        list_add_tail(&op->pend_link, &object->pending_ops);
-                       fscache_stat(&fscache_n_op_pend);
+                       fscache_stat_unchecked(&fscache_n_op_pend);
                        fscache_start_operations(object);
                } else {
                        ASSERTCMP(object->n_exclusive, ==, 0);
@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
                object->n_ops++;
                atomic_inc(&op->usage);
                list_add_tail(&op->pend_link, &object->pending_ops);
-               fscache_stat(&fscache_n_op_pend);
+               fscache_stat_unchecked(&fscache_n_op_pend);
                ret = 0;
        } else if (fscache_object_is_dying(object)) {
-               fscache_stat(&fscache_n_op_rejected);
+               fscache_stat_unchecked(&fscache_n_op_rejected);
                op->state = FSCACHE_OP_ST_CANCELLED;
                ret = -ENOBUFS;
        } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
        ret = -EBUSY;
        if (op->state == FSCACHE_OP_ST_PENDING) {
                ASSERT(!list_empty(&op->pend_link));
-               fscache_stat(&fscache_n_op_cancelled);
+               fscache_stat_unchecked(&fscache_n_op_cancelled);
                list_del_init(&op->pend_link);
                if (do_cancel)
                        do_cancel(op);
@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
        while (!list_empty(&object->pending_ops)) {
                op = list_entry(object->pending_ops.next,
                                struct fscache_operation, pend_link);
-               fscache_stat(&fscache_n_op_cancelled);
+               fscache_stat_unchecked(&fscache_n_op_cancelled);
                list_del_init(&op->pend_link);
 
                ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
                    op->state, ==, FSCACHE_OP_ST_CANCELLED);
        op->state = FSCACHE_OP_ST_DEAD;
 
-       fscache_stat(&fscache_n_op_release);
+       fscache_stat_unchecked(&fscache_n_op_release);
 
        if (op->release) {
                op->release(op);
@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
         * lock, and defer it otherwise */
        if (!spin_trylock(&object->lock)) {
                _debug("defer put");
-               fscache_stat(&fscache_n_op_deferred_release);
+               fscache_stat_unchecked(&fscache_n_op_deferred_release);
 
                cache = object->cache;
                spin_lock(&cache->op_gc_list_lock);
@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
 
                _debug("GC DEFERRED REL OBJ%x OP%x",
                       object->debug_id, op->debug_id);
-               fscache_stat(&fscache_n_op_gc);
+               fscache_stat_unchecked(&fscache_n_op_gc);
 
                ASSERTCMP(atomic_read(&op->usage), ==, 0);
                ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
index de33b3fccca650da99ee850597d5aef437b3a6fe..8be4d29656ecc353d609b46063b7f43d3e5b3416 100644 (file)
@@ -74,7 +74,7 @@ try_again:
        val = radix_tree_lookup(&cookie->stores, page->index);
        if (!val) {
                rcu_read_unlock();
-               fscache_stat(&fscache_n_store_vmscan_not_storing);
+               fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
                __fscache_uncache_page(cookie, page);
                return true;
        }
@@ -104,11 +104,11 @@ try_again:
        spin_unlock(&cookie->stores_lock);
 
        if (xpage) {
-               fscache_stat(&fscache_n_store_vmscan_cancelled);
-               fscache_stat(&fscache_n_store_radix_deletes);
+               fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
+               fscache_stat_unchecked(&fscache_n_store_radix_deletes);
                ASSERTCMP(xpage, ==, page);
        } else {
-               fscache_stat(&fscache_n_store_vmscan_gone);
+               fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
        }
 
        wake_up_bit(&cookie->flags, 0);
@@ -123,11 +123,11 @@ page_busy:
         * sleeping on memory allocation, so we may need to impose a timeout
         * too. */
        if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
-               fscache_stat(&fscache_n_store_vmscan_busy);
+               fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
                return false;
        }
 
-       fscache_stat(&fscache_n_store_vmscan_wait);
+       fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
        if (!release_page_wait_timeout(cookie, page))
                _debug("fscache writeout timeout page: %p{%lx}",
                        page, page->index);
@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
                                     FSCACHE_COOKIE_STORING_TAG);
                if (!radix_tree_tag_get(&cookie->stores, page->index,
                                        FSCACHE_COOKIE_PENDING_TAG)) {
-                       fscache_stat(&fscache_n_store_radix_deletes);
+                       fscache_stat_unchecked(&fscache_n_store_radix_deletes);
                        xpage = radix_tree_delete(&cookie->stores, page->index);
                }
                spin_unlock(&cookie->stores_lock);
@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
 
        _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
 
-       fscache_stat(&fscache_n_attr_changed_calls);
+       fscache_stat_unchecked(&fscache_n_attr_changed_calls);
 
        if (fscache_object_is_active(object)) {
                fscache_stat(&fscache_n_cop_attr_changed);
@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 
        ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
 
-       fscache_stat(&fscache_n_attr_changed);
+       fscache_stat_unchecked(&fscache_n_attr_changed);
 
        op = kzalloc(sizeof(*op), GFP_KERNEL);
        if (!op) {
-               fscache_stat(&fscache_n_attr_changed_nomem);
+               fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
                _leave(" = -ENOMEM");
                return -ENOMEM;
        }
@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        if (fscache_submit_exclusive_op(object, op) < 0)
                goto nobufs_dec;
        spin_unlock(&cookie->lock);
-       fscache_stat(&fscache_n_attr_changed_ok);
+       fscache_stat_unchecked(&fscache_n_attr_changed_ok);
        fscache_put_operation(op);
        _leave(" = 0");
        return 0;
@@ -242,7 +242,7 @@ nobufs:
        kfree(op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
-       fscache_stat(&fscache_n_attr_changed_nobufs);
+       fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
        _leave(" = %d", -ENOBUFS);
        return -ENOBUFS;
 }
@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
        /* allocate a retrieval operation and attempt to submit it */
        op = kzalloc(sizeof(*op), GFP_NOIO);
        if (!op) {
-               fscache_stat(&fscache_n_retrievals_nomem);
+               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
                return NULL;
        }
 
@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
                return 0;
        }
 
-       fscache_stat(&fscache_n_retrievals_wait);
+       fscache_stat_unchecked(&fscache_n_retrievals_wait);
 
        jif = jiffies;
        if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
                        TASK_INTERRUPTIBLE) != 0) {
-               fscache_stat(&fscache_n_retrievals_intr);
+               fscache_stat_unchecked(&fscache_n_retrievals_intr);
                _leave(" = -ERESTARTSYS");
                return -ERESTARTSYS;
        }
@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  */
 int fscache_wait_for_operation_activation(struct fscache_object *object,
                                          struct fscache_operation *op,
-                                         atomic_t *stat_op_waits,
-                                         atomic_t *stat_object_dead,
+                                         atomic_unchecked_t *stat_op_waits,
+                                         atomic_unchecked_t *stat_object_dead,
                                          void (*do_cancel)(struct fscache_operation *))
 {
        int ret;
@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
 
        _debug(">>> WT");
        if (stat_op_waits)
-               fscache_stat(stat_op_waits);
+               fscache_stat_unchecked(stat_op_waits);
        if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
                        TASK_INTERRUPTIBLE) != 0) {
                ret = fscache_cancel_op(op, do_cancel);
@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
 check_if_dead:
        if (op->state == FSCACHE_OP_ST_CANCELLED) {
                if (stat_object_dead)
-                       fscache_stat(stat_object_dead);
+                       fscache_stat_unchecked(stat_object_dead);
                _leave(" = -ENOBUFS [cancelled]");
                return -ENOBUFS;
        }
@@ -381,7 +381,7 @@ check_if_dead:
                pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
                fscache_cancel_op(op, do_cancel);
                if (stat_object_dead)
-                       fscache_stat(stat_object_dead);
+                       fscache_stat_unchecked(stat_object_dead);
                return -ENOBUFS;
        }
        return 0;
@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        _enter("%p,%p,,,", cookie, page);
 
-       fscache_stat(&fscache_n_retrievals);
+       fscache_stat_unchecked(&fscache_n_retrievals);
 
        if (hlist_empty(&cookie->backing_objects))
                goto nobufs;
@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                goto nobufs_unlock_dec;
        spin_unlock(&cookie->lock);
 
-       fscache_stat(&fscache_n_retrieval_ops);
+       fscache_stat_unchecked(&fscache_n_retrieval_ops);
 
        /* pin the netfs read context in case we need to do the actual netfs
         * read because we've encountered a cache read failure */
@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
 error:
        if (ret == -ENOMEM)
-               fscache_stat(&fscache_n_retrievals_nomem);
+               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
        else if (ret == -ERESTARTSYS)
-               fscache_stat(&fscache_n_retrievals_intr);
+               fscache_stat_unchecked(&fscache_n_retrievals_intr);
        else if (ret == -ENODATA)
-               fscache_stat(&fscache_n_retrievals_nodata);
+               fscache_stat_unchecked(&fscache_n_retrievals_nodata);
        else if (ret < 0)
-               fscache_stat(&fscache_n_retrievals_nobufs);
+               fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
        else
-               fscache_stat(&fscache_n_retrievals_ok);
+               fscache_stat_unchecked(&fscache_n_retrievals_ok);
 
        fscache_put_retrieval(op);
        _leave(" = %d", ret);
@@ -505,7 +505,7 @@ nobufs_unlock:
                __fscache_wake_unused_cookie(cookie);
        kfree(op);
 nobufs:
-       fscache_stat(&fscache_n_retrievals_nobufs);
+       fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
 }
@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
        _enter("%p,,%d,,,", cookie, *nr_pages);
 
-       fscache_stat(&fscache_n_retrievals);
+       fscache_stat_unchecked(&fscache_n_retrievals);
 
        if (hlist_empty(&cookie->backing_objects))
                goto nobufs;
@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
                goto nobufs_unlock_dec;
        spin_unlock(&cookie->lock);
 
-       fscache_stat(&fscache_n_retrieval_ops);
+       fscache_stat_unchecked(&fscache_n_retrieval_ops);
 
        /* pin the netfs read context in case we need to do the actual netfs
         * read because we've encountered a cache read failure */
@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
 error:
        if (ret == -ENOMEM)
-               fscache_stat(&fscache_n_retrievals_nomem);
+               fscache_stat_unchecked(&fscache_n_retrievals_nomem);
        else if (ret == -ERESTARTSYS)
-               fscache_stat(&fscache_n_retrievals_intr);
+               fscache_stat_unchecked(&fscache_n_retrievals_intr);
        else if (ret == -ENODATA)
-               fscache_stat(&fscache_n_retrievals_nodata);
+               fscache_stat_unchecked(&fscache_n_retrievals_nodata);
        else if (ret < 0)
-               fscache_stat(&fscache_n_retrievals_nobufs);
+               fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
        else
-               fscache_stat(&fscache_n_retrievals_ok);
+               fscache_stat_unchecked(&fscache_n_retrievals_ok);
 
        fscache_put_retrieval(op);
        _leave(" = %d", ret);
@@ -636,7 +636,7 @@ nobufs_unlock:
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
 nobufs:
-       fscache_stat(&fscache_n_retrievals_nobufs);
+       fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
 }
@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 
        _enter("%p,%p,,,", cookie, page);
 
-       fscache_stat(&fscache_n_allocs);
+       fscache_stat_unchecked(&fscache_n_allocs);
 
        if (hlist_empty(&cookie->backing_objects))
                goto nobufs;
@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
                goto nobufs_unlock_dec;
        spin_unlock(&cookie->lock);
 
-       fscache_stat(&fscache_n_alloc_ops);
+       fscache_stat_unchecked(&fscache_n_alloc_ops);
 
        ret = fscache_wait_for_operation_activation(
                object, &op->op,
@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 
 error:
        if (ret == -ERESTARTSYS)
-               fscache_stat(&fscache_n_allocs_intr);
+               fscache_stat_unchecked(&fscache_n_allocs_intr);
        else if (ret < 0)
-               fscache_stat(&fscache_n_allocs_nobufs);
+               fscache_stat_unchecked(&fscache_n_allocs_nobufs);
        else
-               fscache_stat(&fscache_n_allocs_ok);
+               fscache_stat_unchecked(&fscache_n_allocs_ok);
 
        fscache_put_retrieval(op);
        _leave(" = %d", ret);
@@ -730,7 +730,7 @@ nobufs_unlock:
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
 nobufs:
-       fscache_stat(&fscache_n_allocs_nobufs);
+       fscache_stat_unchecked(&fscache_n_allocs_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
 }
@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
 
        spin_lock(&cookie->stores_lock);
 
-       fscache_stat(&fscache_n_store_calls);
+       fscache_stat_unchecked(&fscache_n_store_calls);
 
        /* find a page to store */
        page = NULL;
@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
        page = results[0];
        _debug("gang %d [%lx]", n, page->index);
        if (page->index > op->store_limit) {
-               fscache_stat(&fscache_n_store_pages_over_limit);
+               fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
                goto superseded;
        }
 
@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
        spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
 
-       fscache_stat(&fscache_n_store_pages);
+       fscache_stat_unchecked(&fscache_n_store_pages);
        fscache_stat(&fscache_n_cop_write_page);
        ret = object->cache->ops->write_page(op, page);
        fscache_stat_d(&fscache_n_cop_write_page);
@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
        ASSERT(PageFsCache(page));
 
-       fscache_stat(&fscache_n_stores);
+       fscache_stat_unchecked(&fscache_n_stores);
 
        if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
                _leave(" = -ENOBUFS [invalidating]");
@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
 
-       op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
+       op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
        op->store_limit = object->store_limit;
 
        __fscache_use_cookie(cookie);
@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
-       fscache_stat(&fscache_n_store_ops);
-       fscache_stat(&fscache_n_stores_ok);
+       fscache_stat_unchecked(&fscache_n_store_ops);
+       fscache_stat_unchecked(&fscache_n_stores_ok);
 
        /* the work queue now carries its own ref on the object */
        fscache_put_operation(&op->op);
@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        return 0;
 
 already_queued:
-       fscache_stat(&fscache_n_stores_again);
+       fscache_stat_unchecked(&fscache_n_stores_again);
 already_pending:
        spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
        kfree(op);
-       fscache_stat(&fscache_n_stores_ok);
+       fscache_stat_unchecked(&fscache_n_stores_ok);
        _leave(" = 0");
        return 0;
 
@@ -1039,14 +1039,14 @@ nobufs:
        kfree(op);
        if (wake_cookie)
                __fscache_wake_unused_cookie(cookie);
-       fscache_stat(&fscache_n_stores_nobufs);
+       fscache_stat_unchecked(&fscache_n_stores_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
 
 nomem_free:
        kfree(op);
 nomem:
-       fscache_stat(&fscache_n_stores_oom);
+       fscache_stat_unchecked(&fscache_n_stores_oom);
        _leave(" = -ENOMEM");
        return -ENOMEM;
 }
@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
        ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
        ASSERTCMP(page, !=, NULL);
 
-       fscache_stat(&fscache_n_uncaches);
+       fscache_stat_unchecked(&fscache_n_uncaches);
 
        /* cache withdrawal may beat us to it */
        if (!PageFsCache(page))
@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
        struct fscache_cookie *cookie = op->op.object->cookie;
 
 #ifdef CONFIG_FSCACHE_STATS
-       atomic_inc(&fscache_n_marks);
+       atomic_inc_unchecked(&fscache_n_marks);
 #endif
 
        _debug("- mark %p{%lx}", page, page->index);
index 40d13c70ef518e492565ae59a86ee758244cdd65..ddf52b9a70a1ad395f3d4a905e1eaf4894d4d8db 100644 (file)
 /*
  * operation counters
  */
-atomic_t fscache_n_op_pend;
-atomic_t fscache_n_op_run;
-atomic_t fscache_n_op_enqueue;
-atomic_t fscache_n_op_requeue;
-atomic_t fscache_n_op_deferred_release;
-atomic_t fscache_n_op_release;
-atomic_t fscache_n_op_gc;
-atomic_t fscache_n_op_cancelled;
-atomic_t fscache_n_op_rejected;
-
-atomic_t fscache_n_attr_changed;
-atomic_t fscache_n_attr_changed_ok;
-atomic_t fscache_n_attr_changed_nobufs;
-atomic_t fscache_n_attr_changed_nomem;
-atomic_t fscache_n_attr_changed_calls;
-
-atomic_t fscache_n_allocs;
-atomic_t fscache_n_allocs_ok;
-atomic_t fscache_n_allocs_wait;
-atomic_t fscache_n_allocs_nobufs;
-atomic_t fscache_n_allocs_intr;
-atomic_t fscache_n_allocs_object_dead;
-atomic_t fscache_n_alloc_ops;
-atomic_t fscache_n_alloc_op_waits;
-
-atomic_t fscache_n_retrievals;
-atomic_t fscache_n_retrievals_ok;
-atomic_t fscache_n_retrievals_wait;
-atomic_t fscache_n_retrievals_nodata;
-atomic_t fscache_n_retrievals_nobufs;
-atomic_t fscache_n_retrievals_intr;
-atomic_t fscache_n_retrievals_nomem;
-atomic_t fscache_n_retrievals_object_dead;
-atomic_t fscache_n_retrieval_ops;
-atomic_t fscache_n_retrieval_op_waits;
-
-atomic_t fscache_n_stores;
-atomic_t fscache_n_stores_ok;
-atomic_t fscache_n_stores_again;
-atomic_t fscache_n_stores_nobufs;
-atomic_t fscache_n_stores_oom;
-atomic_t fscache_n_store_ops;
-atomic_t fscache_n_store_calls;
-atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
-atomic_t fscache_n_store_pages_over_limit;
-
-atomic_t fscache_n_store_vmscan_not_storing;
-atomic_t fscache_n_store_vmscan_gone;
-atomic_t fscache_n_store_vmscan_busy;
-atomic_t fscache_n_store_vmscan_cancelled;
-atomic_t fscache_n_store_vmscan_wait;
-
-atomic_t fscache_n_marks;
-atomic_t fscache_n_uncaches;
-
-atomic_t fscache_n_acquires;
-atomic_t fscache_n_acquires_null;
-atomic_t fscache_n_acquires_no_cache;
-atomic_t fscache_n_acquires_ok;
-atomic_t fscache_n_acquires_nobufs;
-atomic_t fscache_n_acquires_oom;
-
-atomic_t fscache_n_invalidates;
-atomic_t fscache_n_invalidates_run;
-
-atomic_t fscache_n_updates;
-atomic_t fscache_n_updates_null;
-atomic_t fscache_n_updates_run;
-
-atomic_t fscache_n_relinquishes;
-atomic_t fscache_n_relinquishes_null;
-atomic_t fscache_n_relinquishes_waitcrt;
-atomic_t fscache_n_relinquishes_retire;
-
-atomic_t fscache_n_cookie_index;
-atomic_t fscache_n_cookie_data;
-atomic_t fscache_n_cookie_special;
-
-atomic_t fscache_n_object_alloc;
-atomic_t fscache_n_object_no_alloc;
-atomic_t fscache_n_object_lookups;
-atomic_t fscache_n_object_lookups_negative;
-atomic_t fscache_n_object_lookups_positive;
-atomic_t fscache_n_object_lookups_timed_out;
-atomic_t fscache_n_object_created;
-atomic_t fscache_n_object_avail;
-atomic_t fscache_n_object_dead;
-
-atomic_t fscache_n_checkaux_none;
-atomic_t fscache_n_checkaux_okay;
-atomic_t fscache_n_checkaux_update;
-atomic_t fscache_n_checkaux_obsolete;
+atomic_unchecked_t fscache_n_op_pend;
+atomic_unchecked_t fscache_n_op_run;
+atomic_unchecked_t fscache_n_op_enqueue;
+atomic_unchecked_t fscache_n_op_requeue;
+atomic_unchecked_t fscache_n_op_deferred_release;
+atomic_unchecked_t fscache_n_op_release;
+atomic_unchecked_t fscache_n_op_gc;
+atomic_unchecked_t fscache_n_op_cancelled;
+atomic_unchecked_t fscache_n_op_rejected;
+
+atomic_unchecked_t fscache_n_attr_changed;
+atomic_unchecked_t fscache_n_attr_changed_ok;
+atomic_unchecked_t fscache_n_attr_changed_nobufs;
+atomic_unchecked_t fscache_n_attr_changed_nomem;
+atomic_unchecked_t fscache_n_attr_changed_calls;
+
+atomic_unchecked_t fscache_n_allocs;
+atomic_unchecked_t fscache_n_allocs_ok;
+atomic_unchecked_t fscache_n_allocs_wait;
+atomic_unchecked_t fscache_n_allocs_nobufs;
+atomic_unchecked_t fscache_n_allocs_intr;
+atomic_unchecked_t fscache_n_allocs_object_dead;
+atomic_unchecked_t fscache_n_alloc_ops;
+atomic_unchecked_t fscache_n_alloc_op_waits;
+
+atomic_unchecked_t fscache_n_retrievals;
+atomic_unchecked_t fscache_n_retrievals_ok;
+atomic_unchecked_t fscache_n_retrievals_wait;
+atomic_unchecked_t fscache_n_retrievals_nodata;
+atomic_unchecked_t fscache_n_retrievals_nobufs;
+atomic_unchecked_t fscache_n_retrievals_intr;
+atomic_unchecked_t fscache_n_retrievals_nomem;
+atomic_unchecked_t fscache_n_retrievals_object_dead;
+atomic_unchecked_t fscache_n_retrieval_ops;
+atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+atomic_unchecked_t fscache_n_stores;
+atomic_unchecked_t fscache_n_stores_ok;
+atomic_unchecked_t fscache_n_stores_again;
+atomic_unchecked_t fscache_n_stores_nobufs;
+atomic_unchecked_t fscache_n_stores_oom;
+atomic_unchecked_t fscache_n_store_ops;
+atomic_unchecked_t fscache_n_store_calls;
+atomic_unchecked_t fscache_n_store_pages;
+atomic_unchecked_t fscache_n_store_radix_deletes;
+atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
+atomic_unchecked_t fscache_n_store_vmscan_gone;
+atomic_unchecked_t fscache_n_store_vmscan_busy;
+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+atomic_unchecked_t fscache_n_store_vmscan_wait;
+
+atomic_unchecked_t fscache_n_marks;
+atomic_unchecked_t fscache_n_uncaches;
+
+atomic_unchecked_t fscache_n_acquires;
+atomic_unchecked_t fscache_n_acquires_null;
+atomic_unchecked_t fscache_n_acquires_no_cache;
+atomic_unchecked_t fscache_n_acquires_ok;
+atomic_unchecked_t fscache_n_acquires_nobufs;
+atomic_unchecked_t fscache_n_acquires_oom;
+
+atomic_unchecked_t fscache_n_invalidates;
+atomic_unchecked_t fscache_n_invalidates_run;
+
+atomic_unchecked_t fscache_n_updates;
+atomic_unchecked_t fscache_n_updates_null;
+atomic_unchecked_t fscache_n_updates_run;
+
+atomic_unchecked_t fscache_n_relinquishes;
+atomic_unchecked_t fscache_n_relinquishes_null;
+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
+atomic_unchecked_t fscache_n_relinquishes_retire;
+
+atomic_unchecked_t fscache_n_cookie_index;
+atomic_unchecked_t fscache_n_cookie_data;
+atomic_unchecked_t fscache_n_cookie_special;
+
+atomic_unchecked_t fscache_n_object_alloc;
+atomic_unchecked_t fscache_n_object_no_alloc;
+atomic_unchecked_t fscache_n_object_lookups;
+atomic_unchecked_t fscache_n_object_lookups_negative;
+atomic_unchecked_t fscache_n_object_lookups_positive;
+atomic_unchecked_t fscache_n_object_lookups_timed_out;
+atomic_unchecked_t fscache_n_object_created;
+atomic_unchecked_t fscache_n_object_avail;
+atomic_unchecked_t fscache_n_object_dead;
+
+atomic_unchecked_t fscache_n_checkaux_none;
+atomic_unchecked_t fscache_n_checkaux_okay;
+atomic_unchecked_t fscache_n_checkaux_update;
+atomic_unchecked_t fscache_n_checkaux_obsolete;
 
 atomic_t fscache_n_cop_alloc_object;
 atomic_t fscache_n_cop_lookup_object;
@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
        seq_puts(m, "FS-Cache statistics\n");
 
        seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
-                  atomic_read(&fscache_n_cookie_index),
-                  atomic_read(&fscache_n_cookie_data),
-                  atomic_read(&fscache_n_cookie_special));
+                  atomic_read_unchecked(&fscache_n_cookie_index),
+                  atomic_read_unchecked(&fscache_n_cookie_data),
+                  atomic_read_unchecked(&fscache_n_cookie_special));
 
        seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
-                  atomic_read(&fscache_n_object_alloc),
-                  atomic_read(&fscache_n_object_no_alloc),
-                  atomic_read(&fscache_n_object_avail),
-                  atomic_read(&fscache_n_object_dead));
+                  atomic_read_unchecked(&fscache_n_object_alloc),
+                  atomic_read_unchecked(&fscache_n_object_no_alloc),
+                  atomic_read_unchecked(&fscache_n_object_avail),
+                  atomic_read_unchecked(&fscache_n_object_dead));
        seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
-                  atomic_read(&fscache_n_checkaux_none),
-                  atomic_read(&fscache_n_checkaux_okay),
-                  atomic_read(&fscache_n_checkaux_update),
-                  atomic_read(&fscache_n_checkaux_obsolete));
+                  atomic_read_unchecked(&fscache_n_checkaux_none),
+                  atomic_read_unchecked(&fscache_n_checkaux_okay),
+                  atomic_read_unchecked(&fscache_n_checkaux_update),
+                  atomic_read_unchecked(&fscache_n_checkaux_obsolete));
 
        seq_printf(m, "Pages  : mrk=%u unc=%u\n",
-                  atomic_read(&fscache_n_marks),
-                  atomic_read(&fscache_n_uncaches));
+                  atomic_read_unchecked(&fscache_n_marks),
+                  atomic_read_unchecked(&fscache_n_uncaches));
 
        seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
                   " oom=%u\n",
-                  atomic_read(&fscache_n_acquires),
-                  atomic_read(&fscache_n_acquires_null),
-                  atomic_read(&fscache_n_acquires_no_cache),
-                  atomic_read(&fscache_n_acquires_ok),
-                  atomic_read(&fscache_n_acquires_nobufs),
-                  atomic_read(&fscache_n_acquires_oom));
+                  atomic_read_unchecked(&fscache_n_acquires),
+                  atomic_read_unchecked(&fscache_n_acquires_null),
+                  atomic_read_unchecked(&fscache_n_acquires_no_cache),
+                  atomic_read_unchecked(&fscache_n_acquires_ok),
+                  atomic_read_unchecked(&fscache_n_acquires_nobufs),
+                  atomic_read_unchecked(&fscache_n_acquires_oom));
 
        seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
-                  atomic_read(&fscache_n_object_lookups),
-                  atomic_read(&fscache_n_object_lookups_negative),
-                  atomic_read(&fscache_n_object_lookups_positive),
-                  atomic_read(&fscache_n_object_created),
-                  atomic_read(&fscache_n_object_lookups_timed_out));
+                  atomic_read_unchecked(&fscache_n_object_lookups),
+                  atomic_read_unchecked(&fscache_n_object_lookups_negative),
+                  atomic_read_unchecked(&fscache_n_object_lookups_positive),
+                  atomic_read_unchecked(&fscache_n_object_created),
+                  atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
 
        seq_printf(m, "Invals : n=%u run=%u\n",
-                  atomic_read(&fscache_n_invalidates),
-                  atomic_read(&fscache_n_invalidates_run));
+                  atomic_read_unchecked(&fscache_n_invalidates),
+                  atomic_read_unchecked(&fscache_n_invalidates_run));
 
        seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
-                  atomic_read(&fscache_n_updates),
-                  atomic_read(&fscache_n_updates_null),
-                  atomic_read(&fscache_n_updates_run));
+                  atomic_read_unchecked(&fscache_n_updates),
+                  atomic_read_unchecked(&fscache_n_updates_null),
+                  atomic_read_unchecked(&fscache_n_updates_run));
 
        seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
-                  atomic_read(&fscache_n_relinquishes),
-                  atomic_read(&fscache_n_relinquishes_null),
-                  atomic_read(&fscache_n_relinquishes_waitcrt),
-                  atomic_read(&fscache_n_relinquishes_retire));
+                  atomic_read_unchecked(&fscache_n_relinquishes),
+                  atomic_read_unchecked(&fscache_n_relinquishes_null),
+                  atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
+                  atomic_read_unchecked(&fscache_n_relinquishes_retire));
 
        seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
-                  atomic_read(&fscache_n_attr_changed),
-                  atomic_read(&fscache_n_attr_changed_ok),
-                  atomic_read(&fscache_n_attr_changed_nobufs),
-                  atomic_read(&fscache_n_attr_changed_nomem),
-                  atomic_read(&fscache_n_attr_changed_calls));
+                  atomic_read_unchecked(&fscache_n_attr_changed),
+                  atomic_read_unchecked(&fscache_n_attr_changed_ok),
+                  atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
+                  atomic_read_unchecked(&fscache_n_attr_changed_nomem),
+                  atomic_read_unchecked(&fscache_n_attr_changed_calls));
 
        seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
-                  atomic_read(&fscache_n_allocs),
-                  atomic_read(&fscache_n_allocs_ok),
-                  atomic_read(&fscache_n_allocs_wait),
-                  atomic_read(&fscache_n_allocs_nobufs),
-                  atomic_read(&fscache_n_allocs_intr));
+                  atomic_read_unchecked(&fscache_n_allocs),
+                  atomic_read_unchecked(&fscache_n_allocs_ok),
+                  atomic_read_unchecked(&fscache_n_allocs_wait),
+                  atomic_read_unchecked(&fscache_n_allocs_nobufs),
+                  atomic_read_unchecked(&fscache_n_allocs_intr));
        seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
-                  atomic_read(&fscache_n_alloc_ops),
-                  atomic_read(&fscache_n_alloc_op_waits),
-                  atomic_read(&fscache_n_allocs_object_dead));
+                  atomic_read_unchecked(&fscache_n_alloc_ops),
+                  atomic_read_unchecked(&fscache_n_alloc_op_waits),
+                  atomic_read_unchecked(&fscache_n_allocs_object_dead));
 
        seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
                   " int=%u oom=%u\n",
-                  atomic_read(&fscache_n_retrievals),
-                  atomic_read(&fscache_n_retrievals_ok),
-                  atomic_read(&fscache_n_retrievals_wait),
-                  atomic_read(&fscache_n_retrievals_nodata),
-                  atomic_read(&fscache_n_retrievals_nobufs),
-                  atomic_read(&fscache_n_retrievals_intr),
-                  atomic_read(&fscache_n_retrievals_nomem));
+                  atomic_read_unchecked(&fscache_n_retrievals),
+                  atomic_read_unchecked(&fscache_n_retrievals_ok),
+                  atomic_read_unchecked(&fscache_n_retrievals_wait),
+                  atomic_read_unchecked(&fscache_n_retrievals_nodata),
+                  atomic_read_unchecked(&fscache_n_retrievals_nobufs),
+                  atomic_read_unchecked(&fscache_n_retrievals_intr),
+                  atomic_read_unchecked(&fscache_n_retrievals_nomem));
        seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
-                  atomic_read(&fscache_n_retrieval_ops),
-                  atomic_read(&fscache_n_retrieval_op_waits),
-                  atomic_read(&fscache_n_retrievals_object_dead));
+                  atomic_read_unchecked(&fscache_n_retrieval_ops),
+                  atomic_read_unchecked(&fscache_n_retrieval_op_waits),
+                  atomic_read_unchecked(&fscache_n_retrievals_object_dead));
 
        seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
-                  atomic_read(&fscache_n_stores),
-                  atomic_read(&fscache_n_stores_ok),
-                  atomic_read(&fscache_n_stores_again),
-                  atomic_read(&fscache_n_stores_nobufs),
-                  atomic_read(&fscache_n_stores_oom));
+                  atomic_read_unchecked(&fscache_n_stores),
+                  atomic_read_unchecked(&fscache_n_stores_ok),
+                  atomic_read_unchecked(&fscache_n_stores_again),
+                  atomic_read_unchecked(&fscache_n_stores_nobufs),
+                  atomic_read_unchecked(&fscache_n_stores_oom));
        seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
-                  atomic_read(&fscache_n_store_ops),
-                  atomic_read(&fscache_n_store_calls),
-                  atomic_read(&fscache_n_store_pages),
-                  atomic_read(&fscache_n_store_radix_deletes),
-                  atomic_read(&fscache_n_store_pages_over_limit));
+                  atomic_read_unchecked(&fscache_n_store_ops),
+                  atomic_read_unchecked(&fscache_n_store_calls),
+                  atomic_read_unchecked(&fscache_n_store_pages),
+                  atomic_read_unchecked(&fscache_n_store_radix_deletes),
+                  atomic_read_unchecked(&fscache_n_store_pages_over_limit));
 
        seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
-                  atomic_read(&fscache_n_store_vmscan_not_storing),
-                  atomic_read(&fscache_n_store_vmscan_gone),
-                  atomic_read(&fscache_n_store_vmscan_busy),
-                  atomic_read(&fscache_n_store_vmscan_cancelled),
-                  atomic_read(&fscache_n_store_vmscan_wait));
+                  atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
+                  atomic_read_unchecked(&fscache_n_store_vmscan_gone),
+                  atomic_read_unchecked(&fscache_n_store_vmscan_busy),
+                  atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
+                  atomic_read_unchecked(&fscache_n_store_vmscan_wait));
 
        seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
-                  atomic_read(&fscache_n_op_pend),
-                  atomic_read(&fscache_n_op_run),
-                  atomic_read(&fscache_n_op_enqueue),
-                  atomic_read(&fscache_n_op_cancelled),
-                  atomic_read(&fscache_n_op_rejected));
+                  atomic_read_unchecked(&fscache_n_op_pend),
+                  atomic_read_unchecked(&fscache_n_op_run),
+                  atomic_read_unchecked(&fscache_n_op_enqueue),
+                  atomic_read_unchecked(&fscache_n_op_cancelled),
+                  atomic_read_unchecked(&fscache_n_op_rejected));
        seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
-                  atomic_read(&fscache_n_op_deferred_release),
-                  atomic_read(&fscache_n_op_release),
-                  atomic_read(&fscache_n_op_gc));
+                  atomic_read_unchecked(&fscache_n_op_deferred_release),
+                  atomic_read_unchecked(&fscache_n_op_release),
+                  atomic_read_unchecked(&fscache_n_op_gc));
 
        seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
                   atomic_read(&fscache_n_cop_alloc_object),
index 28d0c7abba1c2fa7748d3b1c2874b855427b3897..04816b76aa4e283fdefca23b85da3c9068f789f1 100644 (file)
@@ -611,10 +611,12 @@ static int __init cuse_init(void)
                INIT_LIST_HEAD(&cuse_conntbl[i]);
 
        /* inherit and extend fuse_dev_operations */
-       cuse_channel_fops               = fuse_dev_operations;
-       cuse_channel_fops.owner         = THIS_MODULE;
-       cuse_channel_fops.open          = cuse_channel_open;
-       cuse_channel_fops.release       = cuse_channel_release;
+       pax_open_kernel();
+       memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
+       *(void **)&cuse_channel_fops.owner      = THIS_MODULE;
+       *(void **)&cuse_channel_fops.open       = cuse_channel_open;
+       *(void **)&cuse_channel_fops.release    = cuse_channel_release;
+       pax_close_kernel();
 
        cuse_class = class_create(THIS_MODULE, "cuse");
        if (IS_ERR(cuse_class))
index ed19a7d622fa35decaa08b10e83b8bdee8712419..91e9a4cb0fd62ea3c2ffe6b5c7e7842f43a0fd90 100644 (file)
@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
        ret = 0;
        pipe_lock(pipe);
 
-       if (!pipe->readers) {
+       if (!atomic_read(&pipe->readers)) {
                send_sig(SIGPIPE, current, 0);
                if (!ret)
                        ret = -EPIPE;
@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
                page_nr++;
                ret += buf->len;
 
-               if (pipe->files)
+               if (atomic_read(&pipe->files))
                        do_wakeup = 1;
        }
 
index 08e7b1a9d5d0edaca8b94ef386d9200078958df3..d91c6ee6ddc2ca1ba7a9fbf6b00e1c994a1cf579 100644 (file)
@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
        return link;
 }
 
-static void free_link(char *link)
+static void free_link(const char *link)
 {
        if (!IS_ERR(link))
                free_page((unsigned long) link);
index fd62cae0fdcb66db03712d419c25014312112546..3494dfa3bc79e1688a892f2f4fd82ce4c57d1f87 100644 (file)
@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 
 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
-       char *s = nd_get_link(nd);
+       const char *s = nd_get_link(nd);
        if (!IS_ERR(s))
                __putname(s);
 }
index 5eba47f593f8f888855c6b83428455f8b46af670..d353c2219ae82069afc581c571bba9057e57571e 100644 (file)
@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct hstate *h = hstate_file(file);
+       unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
        struct vm_unmapped_area_info info;
 
        if (len & ~huge_page_mask(h))
@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return addr;
        }
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+               if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
        info.flags = 0;
        info.length = len;
        info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP)
+               info.low_limit += mm->delta_mmap;
+#endif
+
        info.high_limit = TASK_SIZE;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
 };
 MODULE_ALIAS_FS("hugetlbfs");
 
-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
 
 static int can_do_hugetlb_shm(void)
 {
index aa149e7262acffff13db5707eeb0c0144add985c..46f1f65fb839cf387eff9756d077239594ad85e7 100644 (file)
@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
        unsigned int *p = &get_cpu_var(last_ino);
        unsigned int res = *p;
 
+start:
+
 #ifdef CONFIG_SMP
        if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
-               static atomic_t shared_last_ino;
-               int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
+               static atomic_unchecked_t shared_last_ino;
+               int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
 
                res = next - LAST_INO_BATCH;
        }
 #endif
 
-       *p = ++res;
+       if (unlikely(!++res))
+               goto start;     /* never zero */
+       *p = res;
        put_cpu_var(last_ino);
        return res;
 }
index 4a6cf289be248cbfaedc60dea23afc84fcbbbf44..d3a29d3dc063a9b1e3f87e5654371ae813a337f2 100644 (file)
@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
                struct jffs2_unknown_node marker = {
                        .magic =        cpu_to_je16(JFFS2_MAGIC_BITMASK),
                        .nodetype =     cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-                       .totlen =       cpu_to_je32(c->cleanmarker_size)
+                       .totlen =       cpu_to_je32(c->cleanmarker_size),
+                       .hdr_crc =      cpu_to_je32(0)
                };
 
                jffs2_prealloc_raw_node_refs(c, jeb, 1);
index 09ed55190ee2c077524e38a11b4fe41c00f51ac9..45684f8bf3915478f9d5013b4b408278efd11a08 100644 (file)
@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
 {
        .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
        .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-       .totlen = constant_cpu_to_je32(8)
+       .totlen = constant_cpu_to_je32(8),
+       .hdr_crc = constant_cpu_to_je32(0)
 };
 
 /*
index 16c3a9556634ddf7b294bf85bb73c2a5025258b3..e9cb75d56250bce7cc13813664977ec19e3856be 100644 (file)
@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
 
        jfs_inode_cachep =
            kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
-                           SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+                           SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
                            init_once);
        if (jfs_inode_cachep == NULL)
                return -ENOMEM;
index 2d881b381d2b787bbb2ff40b151e7496c0abafae..fe1ac77cfd3854bff639bd1ba63b9e8e70be066d 100644 (file)
@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
  *
  *     Returns 31 bit hash of ns + name (so it fits in an off_t )
  */
-static unsigned int kernfs_name_hash(const char *name, const void *ns)
+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
 {
        unsigned long hash = init_name_hash();
        unsigned int len = strlen(name);
@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
        ret = scops->mkdir(parent, dentry->d_name.name, mode);
 
        kernfs_put_active(parent);
+
+       if (!ret) {
+               struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
+               ret = PTR_ERR_OR_ZERO(dentry_ret);
+       }
+
        return ret;
 }
 
index ddc9f9612f168f026ea71aa4f4aac41bf6860369..4e450adadc710f90975909c996934994a9e71462 100644 (file)
@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
 
 struct kernfs_open_node {
        atomic_t                refcnt;
-       atomic_t                event;
+       atomic_unchecked_t      event;
        wait_queue_head_t       poll;
        struct list_head        files; /* goes through kernfs_open_file.list */
 };
@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
 {
        struct kernfs_open_file *of = sf->private;
 
-       of->event = atomic_read(&of->kn->attr.open->event);
+       of->event = atomic_read_unchecked(&of->kn->attr.open->event);
 
        return of->kn->attr.ops->seq_show(sf, v);
 }
@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
 {
        struct kernfs_open_file *of = kernfs_of(file);
        const struct kernfs_ops *ops;
-       size_t len;
+       ssize_t len;
        char *buf;
 
        if (of->atomic_write_len) {
@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
        return ret;
 }
 
-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
-                            void *buf, int len, int write)
+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
+                            void *buf, size_t len, int write)
 {
        struct file *file = vma->vm_file;
        struct kernfs_open_file *of = kernfs_of(file);
-       int ret;
+       ssize_t ret;
 
        if (!of->vm_ops)
                return -EINVAL;
@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
                return -ENOMEM;
 
        atomic_set(&new_on->refcnt, 0);
-       atomic_set(&new_on->event, 1);
+       atomic_set_unchecked(&new_on->event, 1);
        init_waitqueue_head(&new_on->poll);
        INIT_LIST_HEAD(&new_on->files);
        goto retry;
@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
 
        kernfs_put_active(kn);
 
-       if (of->event != atomic_read(&on->event))
+       if (of->event != atomic_read_unchecked(&on->event))
                goto trigger;
 
        return DEFAULT_POLLMASK;
@@ -823,7 +823,7 @@ repeat:
 
        on = kn->attr.open;
        if (on) {
-               atomic_inc(&on->event);
+               atomic_inc_unchecked(&on->event);
                wake_up_interruptible(&on->poll);
        }
 
index 8a198898e39afd3ffde994cee7d732dcdfa8bdcd..4c3069a14a1eadd0440cd3a7b7f8abc20f89f1bf 100644 (file)
@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
                                void *cookie)
 {
-       char *page = nd_get_link(nd);
+       const char *page = nd_get_link(nd);
        if (!IS_ERR(page))
                free_page((unsigned long)page);
 }
index 005843ce5dbd532d683b1c94e16f765b8585d236..06c4191daf2bde4a22db02e6c15ae01a152ef2a9 100644 (file)
@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 
        for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
                struct dentry *next = list_entry(p, struct dentry, d_child);
+               char d_name[sizeof(next->d_iname)];
+               const unsigned char *name;
+
                spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
                if (!simple_positive(next)) {
                        spin_unlock(&next->d_lock);
@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 
                spin_unlock(&next->d_lock);
                spin_unlock(&dentry->d_lock);
-               if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
+               name = next->d_name.name;
+               if (name == next->d_iname) {
+                       memcpy(d_name, name, next->d_name.len);
+                       name = d_name;
+               }
+               if (!dir_emit(ctx, name, next->d_name.len,
                              next->d_inode->i_ino, dt_type(next->d_inode)))
                        return 0;
                spin_lock(&dentry->d_lock);
@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
                                void *cookie)
 {
-       char *s = nd_get_link(nd);
+       const char *s = nd_get_link(nd);
        if (!IS_ERR(s))
                kfree(s);
 }
index acd3947163497b802544a632aa02512c37b88d9d..1f896e276767dd4e0fa2d6fcf515b684101520aa 100644 (file)
@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
 /*
  * Cookie counter for NLM requests
  */
-static atomic_t        nlm_cookie = ATOMIC_INIT(0x1234);
+static atomic_unchecked_t      nlm_cookie = ATOMIC_INIT(0x1234);
 
 void nlmclnt_next_cookie(struct nlm_cookie *c)
 {
-       u32     cookie = atomic_inc_return(&nlm_cookie);
+       u32     cookie = atomic_inc_return_unchecked(&nlm_cookie);
 
        memcpy(c->data, &cookie, 4);
        c->len=4;
index 59e2f905e4ffea324dbf44faf1b666974adc6c23..bd69071de9dddc442e2872bb37217e402c1e2341 100644 (file)
@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
        locks_remove_posix(filp, filp);
 
        if (filp->f_op->flock) {
-               struct file_lock fl = {
+               struct file_lock flock = {
                        .fl_owner = filp,
                        .fl_pid = current->tgid,
                        .fl_file = filp,
@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
                        .fl_type = F_UNLCK,
                        .fl_end = OFFSET_MAX,
                };
-               filp->f_op->flock(filp, F_SETLKW, &fl);
-               if (fl.fl_ops && fl.fl_ops->fl_release_private)
-                       fl.fl_ops->fl_release_private(&fl);
+               filp->f_op->flock(filp, F_SETLKW, &flock);
+               if (flock.fl_ops && flock.fl_ops->fl_release_private)
+                       flock.fl_ops->fl_release_private(&flock);
        }
 
        spin_lock(&inode->i_lock);
index 0ad6f760ce521b93a2847c501e579d1c4a440334..a04c1468084e4db6a1b4a8e1ecb63a33a27a299f 100644 (file)
@@ -12,7 +12,7 @@ struct mnt_namespace {
        u64                     seq;    /* Sequence number to prevent loops */
        wait_queue_head_t poll;
        u64 event;
-};
+} __randomize_layout;
 
 struct mnt_pcp {
        int mnt_count;
@@ -63,7 +63,7 @@ struct mount {
        int mnt_expiry_mark;            /* true if marked for expiry */
        struct hlist_head mnt_pins;
        struct path mnt_ex_mountpoint;
-};
+} __randomize_layout;
 
 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
 
index bc35b02883bb968812a7ec7cb62f4e1ff6e88de3..7ed1f1d410377a500bb74cb5d68ba9c8198eca95 100644 (file)
@@ -331,16 +331,31 @@ int generic_permission(struct inode *inode, int mask)
        if (ret != -EACCES)
                return ret;
 
+#ifdef CONFIG_GRKERNSEC
+       /* we'll block if we have to log due to a denied capability use */
+       if (mask & MAY_NOT_BLOCK)
+               return -ECHILD;
+#endif
+
        if (S_ISDIR(inode->i_mode)) {
                /* DACs are overridable for directories */
-               if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
-                       return 0;
                if (!(mask & MAY_WRITE))
-                       if (capable_wrt_inode_uidgid(inode,
-                                                    CAP_DAC_READ_SEARCH))
+                       if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
+                           capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
                                return 0;
+               if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+                       return 0;
                return -EACCES;
        }
+       /*
+        * Searching includes executable on directories, else just read.
+        */
+       mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+       if (mask == MAY_READ)
+               if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
+                   capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+                       return 0;
+
        /*
         * Read/write DACs are always overridable.
         * Executable DACs are overridable when there is
@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
                if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
                        return 0;
 
-       /*
-        * Searching includes executable on directories, else just read.
-        */
-       mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
-       if (mask == MAY_READ)
-               if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
-                       return 0;
-
        return -EACCES;
 }
 EXPORT_SYMBOL(generic_permission);
@@ -497,7 +504,7 @@ struct nameidata {
        int             last_type;
        unsigned        depth;
        struct file     *base;
-       char *saved_names[MAX_NESTED_LINKS + 1];
+       const char *saved_names[MAX_NESTED_LINKS + 1];
 };
 
 /*
@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
        nd->flags |= LOOKUP_JUMPED;
 }
 
-void nd_set_link(struct nameidata *nd, char *path)
+void nd_set_link(struct nameidata *nd, const char *path)
 {
        nd->saved_names[nd->depth] = path;
 }
 EXPORT_SYMBOL(nd_set_link);
 
-char *nd_get_link(struct nameidata *nd)
+const char *nd_get_link(const struct nameidata *nd)
 {
        return nd->saved_names[nd->depth];
 }
@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
 {
        struct dentry *dentry = link->dentry;
        int error;
-       char *s;
+       const char *s;
 
        BUG_ON(nd->flags & LOOKUP_RCU);
 
@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
        if (error)
                goto out_put_nd_path;
 
+       if (gr_handle_follow_link(dentry->d_parent->d_inode,
+                                 dentry->d_inode, dentry, nd->path.mnt)) {
+               error = -EACCES;
+               goto out_put_nd_path;
+       }       
+
        nd->last_type = LAST_BIND;
        *p = dentry->d_inode->i_op->follow_link(dentry, nd);
        error = PTR_ERR(*p);
@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
                if (res)
                        break;
                res = walk_component(nd, path, LOOKUP_FOLLOW);
+               if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
+                       res = -EACCES;
                put_link(nd, &link, cookie);
        } while (res > 0);
 
@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
 static inline u64 hash_name(const char *name)
 {
        unsigned long a, b, adata, bdata, mask, hash, len;
-       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
        hash = a = 0;
        len = -sizeof(unsigned long);
@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
                        if (err)
                                break;
                        err = lookup_last(nd, &path);
+                       if (!err && gr_handle_symlink_owner(&link, nd->inode))
+                               err = -EACCES;
                        put_link(nd, &link, cookie);
                }
        }
@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
        if (!err)
                err = complete_walk(nd);
 
+       if (!err && !(nd->flags & LOOKUP_PARENT)) {
+               if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
+                       path_put(&nd->path);
+                       err = -ENOENT;
+               }
+       }
+
        if (!err && nd->flags & LOOKUP_DIRECTORY) {
                if (!d_can_lookup(nd->path.dentry)) {
                        path_put(&nd->path);
@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
                retval = path_lookupat(dfd, name->name,
                                                flags | LOOKUP_REVAL, nd);
 
-       if (likely(!retval))
+       if (likely(!retval)) {
                audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
+               if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
+                       if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
+                               path_put(&nd->path);
+                               return -ENOENT;
+                       }
+               }
+       }
        return retval;
 }
 
@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
        if (flag & O_NOATIME && !inode_owner_or_capable(inode))
                return -EPERM;
 
+       if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
+               return -EPERM;
+       if (gr_handle_rawio(inode))
+               return -EPERM;
+       if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
+               return -EACCES;
+
        return 0;
 }
 
@@ -2826,7 +2864,7 @@ looked_up:
  * cleared otherwise prior to returning.
  */
 static int lookup_open(struct nameidata *nd, struct path *path,
-                       struct file *file,
+                       struct path *link, struct file *file,
                        const struct open_flags *op,
                        bool got_write, int *opened)
 {
@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
        /* Negative dentry, just create the file */
        if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
                umode_t mode = op->mode;
+
+               if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
+                       error = -EACCES;
+                       goto out_dput;
+               }
+
+               if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
+                       error = -EACCES;
+                       goto out_dput;
+               }
+
                if (!IS_POSIXACL(dir->d_inode))
                        mode &= ~current_umask();
                /*
@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
                                   nd->flags & LOOKUP_EXCL);
                if (error)
                        goto out_dput;
+               else
+                       gr_handle_create(dentry, nd->path.mnt);
        }
 out_no_open:
        path->dentry = dentry;
@@ -2896,7 +2947,7 @@ out_dput:
 /*
  * Handle the last step of open()
  */
-static int do_last(struct nameidata *nd, struct path *path,
+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
                   struct file *file, const struct open_flags *op,
                   int *opened, struct filename *name)
 {
@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (error)
                        return error;
 
+               if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
+                       error = -ENOENT;
+                       goto out;
+               }
+               if (link && gr_handle_symlink_owner(link, nd->inode)) {
+                       error = -EACCES;
+                       goto out;
+               }
+
                audit_inode(name, dir, LOOKUP_PARENT);
                error = -EISDIR;
                /* trailing slashes? */
@@ -2965,7 +3025,7 @@ retry_lookup:
                 */
        }
        mutex_lock(&dir->d_inode->i_mutex);
-       error = lookup_open(nd, path, file, op, got_write, opened);
+       error = lookup_open(nd, path, link, file, op, got_write, opened);
        mutex_unlock(&dir->d_inode->i_mutex);
 
        if (error <= 0) {
@@ -2989,11 +3049,28 @@ retry_lookup:
                goto finish_open_created;
        }
 
+       if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
+               error = -ENOENT;
+               goto exit_dput;
+       }
+       if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
+               error = -EACCES;
+               goto exit_dput;
+       }
+
        /*
         * create/update audit record if it already exists.
         */
-       if (d_is_positive(path->dentry))
+       if (d_is_positive(path->dentry)) {
+               /* only check if O_CREAT is specified, all other checks need to go
+                  into may_open */
+               if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
+                       error = -EACCES;
+                       goto exit_dput;
+               }
+
                audit_inode(name, path->dentry, 0);
+       }
 
        /*
         * If atomic_open() acquired write access it is dropped now due to
@@ -3034,6 +3111,11 @@ finish_lookup:
                        }
                }
                BUG_ON(inode != path->dentry->d_inode);
+               /* if we're resolving a symlink to another symlink */
+               if (link && gr_handle_symlink_owner(link, inode)) {
+                       error = -EACCES;
+                       goto out;
+               }
                return 1;
        }
 
@@ -3053,7 +3135,18 @@ finish_open:
                path_put(&save_parent);
                return error;
        }
+
+       if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
+               error = -ENOENT;
+               goto out;
+       }
+       if (link && gr_handle_symlink_owner(link, nd->inode)) {
+               error = -EACCES;
+               goto out;
+       }
+
        audit_inode(name, nd->path.dentry, 0);
+
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
                goto out;
@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
        if (unlikely(error))
                goto out;
 
-       error = do_last(nd, &path, file, op, &opened, pathname);
+       error = do_last(nd, &path, NULL, file, op, &opened, pathname);
        while (unlikely(error > 0)) { /* trailing symlink */
                struct path link = path;
                void *cookie;
@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
                error = follow_link(&link, nd, &cookie);
                if (unlikely(error))
                        break;
-               error = do_last(nd, &path, file, op, &opened, pathname);
+               error = do_last(nd, &path, &link, file, op, &opened, pathname);
                put_link(nd, &link, cookie);
        }
 out:
@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
                goto unlock;
 
        error = -EEXIST;
-       if (d_is_positive(dentry))
+       if (d_is_positive(dentry)) {
+               if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
+                       error = -ENOENT;
                goto fail;
-
+       }
        /*
         * Special case - lookup gave negative, but... we had foo/bar/
         * From the vfs_mknod() POV we just have a negative dentry -
@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
 }
 EXPORT_SYMBOL(user_path_create);
 
+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
+{
+       struct filename *tmp = getname(pathname);
+       struct dentry *res;
+       if (IS_ERR(tmp))
+               return ERR_CAST(tmp);
+       res = kern_path_create(dfd, tmp->name, path, lookup_flags);
+       if (IS_ERR(res))
+               putname(tmp);
+       else
+               *to = tmp;
+       return res;
+}
+
 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
        int error = may_create(dir, dentry);
@@ -3446,6 +3555,17 @@ retry:
 
        if (!IS_POSIXACL(path.dentry->d_inode))
                mode &= ~current_umask();
+
+       if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
+               error = -EPERM;
+               goto out;
+       }
+
+       if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
+               error = -EACCES;
+               goto out;
+       }
+
        error = security_path_mknod(&path, dentry, mode, dev);
        if (error)
                goto out;
@@ -3461,6 +3581,8 @@ retry:
                        error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
                        break;
        }
+       if (!error)
+               gr_handle_create(dentry, path.mnt);
 out:
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
@@ -3515,9 +3637,16 @@ retry:
 
        if (!IS_POSIXACL(path.dentry->d_inode))
                mode &= ~current_umask();
+       if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
+               error = -EACCES;
+               goto out;
+       }
        error = security_path_mkdir(&path, dentry, mode);
        if (!error)
                error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+       if (!error)
+               gr_handle_create(dentry, path.mnt);
+out:
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
        struct filename *name;
        struct dentry *dentry;
        struct nameidata nd;
+       u64 saved_ino = 0;
+       dev_t saved_dev = 0;
        unsigned int lookup_flags = 0;
 retry:
        name = user_path_parent(dfd, pathname, &nd, lookup_flags);
@@ -3633,10 +3764,21 @@ retry:
                error = -ENOENT;
                goto exit3;
        }
+
+       saved_ino = gr_get_ino_from_dentry(dentry);
+       saved_dev = gr_get_dev_from_dentry(dentry);
+
+       if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
+               error = -EACCES;
+               goto exit3;
+       }
+
        error = security_path_rmdir(&nd.path, dentry);
        if (error)
                goto exit3;
        error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+       if (!error && (saved_dev || saved_ino))
+               gr_handle_delete(saved_ino, saved_dev);
 exit3:
        dput(dentry);
 exit2:
@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
        struct nameidata nd;
        struct inode *inode = NULL;
        struct inode *delegated_inode = NULL;
+       u64 saved_ino = 0;
+       dev_t saved_dev = 0;
        unsigned int lookup_flags = 0;
 retry:
        name = user_path_parent(dfd, pathname, &nd, lookup_flags);
@@ -3755,10 +3899,22 @@ retry_deleg:
                if (d_is_negative(dentry))
                        goto slashes;
                ihold(inode);
+
+               if (inode->i_nlink <= 1) {
+                       saved_ino = gr_get_ino_from_dentry(dentry);
+                       saved_dev = gr_get_dev_from_dentry(dentry);
+               }
+               if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
+                       error = -EACCES;
+                       goto exit2;
+               }
+
                error = security_path_unlink(&nd.path, dentry);
                if (error)
                        goto exit2;
                error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
+               if (!error && (saved_ino || saved_dev))
+                       gr_handle_delete(saved_ino, saved_dev);
 exit2:
                dput(dentry);
        }
@@ -3847,9 +4003,17 @@ retry:
        if (IS_ERR(dentry))
                goto out_putname;
 
+       if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
+               error = -EACCES;
+               goto out;
+       }
+
        error = security_path_symlink(&path, dentry, from->name);
        if (!error)
                error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
+       if (!error)
+               gr_handle_create(dentry, path.mnt);
+out:
        done_path_create(&path, dentry);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
        struct dentry *new_dentry;
        struct path old_path, new_path;
        struct inode *delegated_inode = NULL;
+       struct filename *to = NULL;
        int how = 0;
        int error;
 
@@ -3976,7 +4141,7 @@ retry:
        if (error)
                return error;
 
-       new_dentry = user_path_create(newdfd, newname, &new_path,
+       new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
                                        (how & LOOKUP_REVAL));
        error = PTR_ERR(new_dentry);
        if (IS_ERR(new_dentry))
@@ -3988,11 +4153,28 @@ retry:
        error = may_linkat(&old_path);
        if (unlikely(error))
                goto out_dput;
+
+       if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
+                              old_path.dentry->d_inode,
+                              old_path.dentry->d_inode->i_mode, to)) {
+               error = -EACCES;
+               goto out_dput;
+       }
+
+       if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
+                               old_path.dentry, old_path.mnt, to)) {
+               error = -EACCES;
+               goto out_dput;
+       }
+
        error = security_path_link(old_path.dentry, &new_path, new_dentry);
        if (error)
                goto out_dput;
        error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
+       if (!error)
+               gr_handle_create(new_dentry, new_path.mnt);
 out_dput:
+       putname(to);
        done_path_create(&new_path, new_dentry);
        if (delegated_inode) {
                error = break_deleg_wait(&delegated_inode);
@@ -4308,6 +4490,20 @@ retry_deleg:
        if (new_dentry == trap)
                goto exit5;
 
+       if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
+               /* use EXDEV error to cause 'mv' to switch to an alternative
+                * method for usability
+                */
+               error = -EXDEV;
+               goto exit5;
+       }
+
+       error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
+                                    old_dentry, old_dir->d_inode, oldnd.path.mnt,
+                                    to, flags);
+       if (error)
+               goto exit5;
+
        error = security_path_rename(&oldnd.path, old_dentry,
                                     &newnd.path, new_dentry, flags);
        if (error)
@@ -4315,6 +4511,9 @@ retry_deleg:
        error = vfs_rename(old_dir->d_inode, old_dentry,
                           new_dir->d_inode, new_dentry,
                           &delegated_inode, flags);
+       if (!error)
+               gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
+                                new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
 exit5:
        dput(new_dentry);
 exit4:
@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
 
 int readlink_copy(char __user *buffer, int buflen, const char *link)
 {
+       char tmpbuf[64];
+       const char *newlink;
        int len = PTR_ERR(link);
+
        if (IS_ERR(link))
                goto out;
 
        len = strlen(link);
        if (len > (unsigned) buflen)
                len = buflen;
-       if (copy_to_user(buffer, link, len))
+
+       if (len < sizeof(tmpbuf)) {
+               memcpy(tmpbuf, link, len);
+               newlink = tmpbuf;
+       } else
+               newlink = link;
+
+       if (copy_to_user(buffer, newlink, len))
                len = -EFAULT;
 out:
        return len;
index cd1e9681a0cf5d0bdfd64d84ae90c34bd10f265b..e64ff1604b1fc6481b915028c41b152ec2c473bd 100644 (file)
@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
                if (!(sb->s_flags & MS_RDONLY))
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
                up_write(&sb->s_umount);
+
+               gr_log_remount(mnt->mnt_devname, retval);
+
                return retval;
        }
 
@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
        }
        unlock_mount_hash();
        namespace_unlock();
+
+       gr_log_unmount(mnt->mnt_devname, retval);
+
        return retval;
 }
 
@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  */
 
-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
 {
        struct path path;
        struct mount *mnt;
@@ -1565,7 +1571,7 @@ out:
 /*
  *     The 2.0 compatible umount. No flags.
  */
-SYSCALL_DEFINE1(oldumount, char __user *, name)
+SYSCALL_DEFINE1(oldumount, const char __user *, name)
 {
        return sys_umount(name, 0);
 }
@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
                   MS_STRICTATIME);
 
+       if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
+               retval = -EPERM;
+               goto dput_out;
+       }
+
+       if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
+               retval = -EPERM;
+               goto dput_out;
+       }
+
        if (flags & MS_REMOUNT)
                retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
                                    data_page);
@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                retval = do_new_mount(&path, type_page, flags, mnt_flags,
                                      dev_name, data_page);
 dput_out:
+       gr_log_mount(dev_name, &path, retval);
+
        path_put(&path);
+
        return retval;
 }
 
@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
  * number incrementing at 10Ghz will take 12,427 years to wrap which
  * is effectively never, so we can ignore the possibility.
  */
-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
 
 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
 {
@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
                return ERR_PTR(ret);
        }
        new_ns->ns.ops = &mntns_operations;
-       new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
+       new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
        atomic_set(&new_ns->count, 1);
        new_ns->root = NULL;
        INIT_LIST_HEAD(&new_ns->list);
@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
        return new_ns;
 }
 
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
                struct user_namespace *user_ns, struct fs_struct *new_fs)
 {
        struct mnt_namespace *new_ns;
@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
 }
 EXPORT_SYMBOL(mount_subtree);
 
-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
-               char __user *, type, unsigned long, flags, void __user *, data)
+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
+               const char __user *, type, unsigned long, flags, void __user *, data)
 {
        int ret;
        char *kernel_type;
@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        if (error)
                goto out2;
 
+       if (gr_handle_chroot_pivot()) {
+               error = -EPERM;
+               goto out2;
+       }
+
        get_fs_root(current->fs, &root);
        old_mp = lock_mount(&old);
        error = PTR_ERR(old_mp);
@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
            !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (fs->users != 1)
+       if (atomic_read(&fs->users) != 1)
                return -EINVAL;
 
        get_mnt_ns(mnt_ns);
index 02f8d09e119f4a0bd9d965b994871ff2abc4e210..a5c25d13f37c82a21a7cfe4e16491d109f934d1b 100644 (file)
@@ -51,7 +51,7 @@ struct callback_op {
        callback_decode_arg_t decode_args;
        callback_encode_res_t encode_res;
        long res_maxsize;
-};
+} __do_const;
 
 static struct callback_op callback_ops[];
 
index 2211f6ba873628485fcabf874adf0c3c985a6d84..30d0950eb7a99b398c4c874ff7221976cd0c8d61 100644 (file)
@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
        return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
 }
 
-static atomic_long_t nfs_attr_generation_counter;
+static atomic_long_unchecked_t nfs_attr_generation_counter;
 
 static unsigned long nfs_read_attr_generation_counter(void)
 {
-       return atomic_long_read(&nfs_attr_generation_counter);
+       return atomic_long_read_unchecked(&nfs_attr_generation_counter);
 }
 
 unsigned long nfs_inc_attr_generation_counter(void)
 {
-       return atomic_long_inc_return(&nfs_attr_generation_counter);
+       return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
 }
 
 void nfs_fattr_init(struct nfs_fattr *fattr)
index ac71d13c69ef374020002be3cdcca0e4939d54b3..a2e590ab431ba74ac3fe09c4bfba7873061e17ab 100644 (file)
@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
        nfsd4op_rsize op_rsize_bop;
        stateid_getter op_get_currentstateid;
        stateid_setter op_set_currentstateid;
-};
+} __do_const;
 
 static struct nfsd4_operation nfsd4_ops[];
 
index 15f7b73e0c0fd8e49012134ca2d4afe83671012c..00e230b8076d757ce9fa59f16eaf0c43e996e949 100644 (file)
@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
 
 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
 
-static nfsd4_dec nfsd4_dec_ops[] = {
+static const nfsd4_dec nfsd4_dec_ops[] = {
        [OP_ACCESS]             = (nfsd4_dec)nfsd4_decode_access,
        [OP_CLOSE]              = (nfsd4_dec)nfsd4_decode_close,
        [OP_COMMIT]             = (nfsd4_dec)nfsd4_decode_commit,
index 83a9694ec485b0593e3de847464243640fcd7186..6b7f928fd0d76730926382c416fe666fadf4a0d4 100644 (file)
@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
        struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
        u32             hash;
        struct nfsd_drc_bucket *b;
-       int             len;
+       long            len;
        size_t          bufsize = 0;
 
        if (!rp)
@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
        hash = nfsd_cache_hash(rp->c_xid);
        b = &drc_hashtbl[hash];
 
-       len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
-       len >>= 2;
+       if (statp) {
+               len = (char*)statp - (char*)resv->iov_base;
+               len = resv->iov_len - len;
+               len >>= 2;
+       }
 
        /* Don't cache excessive amounts of data and XDR failures */
-       if (!statp || len > (256 >> 2)) {
+       if (!statp || len > (256 >> 2) || len < 0) {
                nfsd_reply_cache_free(b, rp);
                return;
        }
@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
        switch (cachetype) {
        case RC_REPLSTAT:
                if (len != 1)
-                       printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
+                       printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
                rp->c_replstat = *statp;
                break;
        case RC_REPLBUFF:
index 5685c679dd93d4371626de7d6107a9ac66a98093..73029ef37680a156e61ce497f5acc851acf809ef 100644 (file)
@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
 
        oldfs = get_fs();
        set_fs(KERNEL_DS);
-       host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
+       host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
        set_fs(oldfs);
        return nfsd_finish_read(file, count, host_err);
 }
@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
 
        /* Write the data. */
        oldfs = get_fs(); set_fs(KERNEL_DS);
-       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
+       host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
        set_fs(oldfs);
        if (host_err < 0)
                goto out_nfserr;
@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
         */
 
        oldfs = get_fs(); set_fs(KERNEL_DS);
-       host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
+       host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
        set_fs(oldfs);
 
        if (host_err < 0)
index 52ccd34b1e792370597b77f7d228c4d79bf96600..7a6b20240cb6bbaa474935a8121156810f4ac510 100644 (file)
@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
 
 int __register_nls(struct nls_table *nls, struct module *owner)
 {
-       struct nls_table ** tmp = &tables;
+       struct nls_table *tmp = tables;
 
        if (nls->next)
                return -EBUSY;
 
-       nls->owner = owner;
+       pax_open_kernel();
+       *(void **)&nls->owner = owner;
+       pax_close_kernel();
        spin_lock(&nls_lock);
-       while (*tmp) {
-               if (nls == *tmp) {
+       while (tmp) {
+               if (nls == tmp) {
                        spin_unlock(&nls_lock);
                        return -EBUSY;
                }
-               tmp = &(*tmp)->next;
+               tmp = tmp->next;
        }
-       nls->next = tables;
+       pax_open_kernel();
+       *(struct nls_table **)&nls->next = tables;
+       pax_close_kernel();
        tables = nls;
        spin_unlock(&nls_lock);
        return 0;       
@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
 
 int unregister_nls(struct nls_table * nls)
 {
-       struct nls_table ** tmp = &tables;
+       struct nls_table * const * tmp = &tables;
 
        spin_lock(&nls_lock);
        while (*tmp) {
                if (nls == *tmp) {
-                       *tmp = nls->next;
+                       pax_open_kernel();
+                       *(struct nls_table **)tmp = nls->next;
+                       pax_close_kernel();
                        spin_unlock(&nls_lock);
                        return 0;
                }
@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
        return -EINVAL;
 }
 
-static struct nls_table *find_nls(char *charset)
+static struct nls_table *find_nls(const char *charset)
 {
        struct nls_table *nls;
        spin_lock(&nls_lock);
@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
        return nls;
 }
 
-struct nls_table *load_nls(char *charset)
+struct nls_table *load_nls(const char *charset)
 {
        return try_then_request_module(find_nls(charset), "nls_%s", charset);
 }
index 162b3f160353c6ab076644079c861a2b4f1fd736..6076a7c6084ddc659a6c1f0484cec3c62b487641 100644 (file)
@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
        p_nls = load_nls("cp932");
 
        if (p_nls) {
-               table.charset2upper = p_nls->charset2upper;
-               table.charset2lower = p_nls->charset2lower;
+               pax_open_kernel();
+               *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
+               *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
+               pax_close_kernel();
                return register_nls(&table);
        }
 
index a80a741a8676dc5a6ead28c9a584169050fc3c79..7b96e1b06d0cb5265098e6f7ac7678d073dd7885 100644 (file)
@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
        p_nls = load_nls("koi8-u");
 
        if (p_nls) {
-               table.charset2upper = p_nls->charset2upper;
-               table.charset2lower = p_nls->charset2lower;
+               pax_open_kernel();
+               *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
+               *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
+               pax_close_kernel();
                return register_nls(&table);
        }
 
index bff8567aa42d1b04cd85e6f2863de3b1f4d9a2db..83281c61a31c1cdf5c4fd1faf89b284d148c0b0d 100644 (file)
@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
 
        fd = fanotify_event_metadata.fd;
        ret = -EFAULT;
-       if (copy_to_user(buf, &fanotify_event_metadata,
-                        fanotify_event_metadata.event_len))
+       if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
+           copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
                goto out_close_fd;
 
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
index a95d8e037aebe24ba36421861d3abaad6f7dfc89..a91a5fdb58b6965d5ac92d88e94e78620b1bc4e1 100644 (file)
@@ -48,7 +48,7 @@
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
 
-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
 
 /**
  * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
  */
 u32 fsnotify_get_cookie(void)
 {
-       return atomic_inc_return(&fsnotify_sync_cookie);
+       return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
 }
 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
 
index 9e38dafa3bc78ec71acc7acec733d9cd52c4f40a..5727cae8e3ae61b940201c00e45c7aef0882bbb6 100644 (file)
@@ -1310,7 +1310,7 @@ find_next_index_buffer:
        ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
                        ~(s64)(ndir->itype.index.block_size - 1)));
        /* Bounds checks. */
-       if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+       if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
                ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
                                "inode 0x%lx or driver bug.", vdir->i_ino);
                goto err_out;
index 643faa44f22b0b0a97b493cbbce0db6f745670d2..ef9027ec74f3b53ffc9ce8ca48e85ed195d04cab 100644 (file)
@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
        char *addr;
        size_t total = 0;
        unsigned len;
-       int left;
+       unsigned left;
 
        do {
                len = PAGE_CACHE_SIZE - ofs;
index 9e1e112074fb7cd8c7d5de10b43ac60af686017b..241a52ad355b207d26782741452a2f046625ef09 100644 (file)
@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
                if (!silent)
                        ntfs_error(sb, "Primary boot sector is invalid.");
        } else if (!silent)
-               ntfs_error(sb, read_err_str, "primary");
+               ntfs_error(sb, read_err_str, "%s", "primary");
        if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
                if (bh_primary)
                        brelse(bh_primary);
@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
                        goto hotfix_primary_boot_sector;
                brelse(bh_backup);
        } else if (!silent)
-               ntfs_error(sb, read_err_str, "backup");
+               ntfs_error(sb, read_err_str, "%s", "backup");
        /* Try to read NT3.51- backup boot sector. */
        if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
                if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
                                        "sector.");
                brelse(bh_backup);
        } else if (!silent)
-               ntfs_error(sb, read_err_str, "backup");
+               ntfs_error(sb, read_err_str, "%s", "backup");
        /* We failed. Cleanup and return. */
        if (bh_primary)
                brelse(bh_primary);
index 0440134556216d4b12a00168253b9b9446ff7312..d52c93adbc9cf08ecd73ce659db39652f29afc01 100644 (file)
@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
                goto bail;
        }
 
-       atomic_inc(&osb->alloc_stats.moves);
+       atomic_inc_unchecked(&osb->alloc_stats.moves);
 
 bail:
        if (handle)
index 7d6b7d09045292887c7f0411c1a6672cf81f3db6..5fb529a86c14940374200df265460b2f8b6516a4 100644 (file)
@@ -242,11 +242,11 @@ enum ocfs2_vol_state
 
 struct ocfs2_alloc_stats
 {
-       atomic_t moves;
-       atomic_t local_data;
-       atomic_t bitmap_data;
-       atomic_t bg_allocs;
-       atomic_t bg_extends;
+       atomic_unchecked_t moves;
+       atomic_unchecked_t local_data;
+       atomic_unchecked_t bitmap_data;
+       atomic_unchecked_t bg_allocs;
+       atomic_unchecked_t bg_extends;
 };
 
 enum ocfs2_local_alloc_state
index 0cb889a17ae14db8ffc7e0688543be132a126ea6..6a26b2487ddcf9016146ae782fafeadb5a2f9093 100644 (file)
@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
                                mlog_errno(status);
                        goto bail;
                }
-               atomic_inc(&osb->alloc_stats.bg_extends);
+               atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
 
                /* You should never ask for this much metadata */
                BUG_ON(bits_wanted >
@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
                mlog_errno(status);
                goto bail;
        }
-       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
        *suballoc_loc = res.sr_bg_blkno;
        *suballoc_bit_start = res.sr_bit_offset;
@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
        trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
                                           res->sr_bits);
 
-       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
        BUG_ON(res->sr_bits != 1);
 
@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
                mlog_errno(status);
                goto bail;
        }
-       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+       atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
        BUG_ON(res.sr_bits != 1);
 
@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
                                                      cluster_start,
                                                      num_clusters);
                if (!status)
-                       atomic_inc(&osb->alloc_stats.local_data);
+                       atomic_inc_unchecked(&osb->alloc_stats.local_data);
        } else {
                if (min_clusters > (osb->bitmap_cpg - 1)) {
                        /* The only paths asking for contiguousness
@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
                                ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
                                                                 res.sr_bg_blkno,
                                                                 res.sr_bit_offset);
-                       atomic_inc(&osb->alloc_stats.bitmap_data);
+                       atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
                        *num_clusters = res.sr_bits;
                }
        }
index 83723179e1eccf6ec37edae769841285de12999a..ec86e79b273679456678bd38d0eaba95c53b5ee3 100644 (file)
@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
                        "%10s => GlobalAllocs: %d  LocalAllocs: %d  "
                        "SubAllocs: %d  LAWinMoves: %d  SAExtends: %d\n",
                        "Stats",
-                       atomic_read(&osb->alloc_stats.bitmap_data),
-                       atomic_read(&osb->alloc_stats.local_data),
-                       atomic_read(&osb->alloc_stats.bg_allocs),
-                       atomic_read(&osb->alloc_stats.moves),
-                       atomic_read(&osb->alloc_stats.bg_extends));
+                       atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
+                       atomic_read_unchecked(&osb->alloc_stats.local_data),
+                       atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
+                       atomic_read_unchecked(&osb->alloc_stats.moves),
+                       atomic_read_unchecked(&osb->alloc_stats.bg_extends));
 
        out += snprintf(buf + out, len - out,
                        "%10s => State: %u  Descriptor: %llu  Size: %u bits  "
@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
 
        mutex_init(&osb->system_file_mutex);
 
-       atomic_set(&osb->alloc_stats.moves, 0);
-       atomic_set(&osb->alloc_stats.local_data, 0);
-       atomic_set(&osb->alloc_stats.bitmap_data, 0);
-       atomic_set(&osb->alloc_stats.bg_allocs, 0);
-       atomic_set(&osb->alloc_stats.bg_extends, 0);
+       atomic_set_unchecked(&osb->alloc_stats.moves, 0);
+       atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
+       atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
+       atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
+       atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
 
        /* Copy the blockcheck stats from the superblock probe */
        osb->osb_ecc_stats = *stats;
index 813be037b412907e040fe2b94861cfcbf14fc91f..781941d3c045febc1384321e251845e257f72fee 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -32,6 +32,8 @@
 #include <linux/dnotify.h>
 #include <linux/compat.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/fs.h>
 #include "internal.h"
 
 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
        error = locks_verify_truncate(inode, NULL, length);
        if (!error)
                error = security_path_truncate(path);
+       if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
+               error = -EACCES;
        if (!error)
                error = do_truncate(path->dentry, length, 0, NULL);
 
@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
        error = locks_verify_truncate(inode, f.file, length);
        if (!error)
                error = security_path_truncate(&f.file->f_path);
+       if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
+               error = -EACCES;
        if (!error)
                error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
        sb_end_write(inode->i_sb);
@@ -392,6 +398,9 @@ retry:
        if (__mnt_is_readonly(path.mnt))
                res = -EROFS;
 
+       if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
+               res = -EACCES;
+
 out_path_release:
        path_put(&path);
        if (retry_estale(res, lookup_flags)) {
@@ -423,6 +432,8 @@ retry:
        if (error)
                goto dput_and_out;
 
+       gr_log_chdir(path.dentry, path.mnt);
+
        set_fs_pwd(current->fs, &path);
 
 dput_and_out:
@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
                goto out_putf;
 
        error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
+
+       if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
+               error = -EPERM;
+
+       if (!error)
+               gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
+
        if (!error)
                set_fs_pwd(current->fs, &f.file->f_path);
 out_putf:
@@ -481,7 +499,13 @@ retry:
        if (error)
                goto dput_and_out;
 
+       if (gr_handle_chroot_chroot(path.dentry, path.mnt))
+               goto dput_and_out;
+
        set_fs_root(current->fs, &path);
+
+       gr_handle_chroot_chdir(&path);
+
        error = 0;
 dput_and_out:
        path_put(&path);
@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
                return error;
 retry_deleg:
        mutex_lock(&inode->i_mutex);
+
+       if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
+               error = -EACCES;
+               goto out_unlock;
+       }
+       if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
+               error = -EACCES;
+               goto out_unlock;
+       }
+
        error = security_path_chmod(path, mode);
        if (error)
                goto out_unlock;
@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
        uid = make_kuid(current_user_ns(), user);
        gid = make_kgid(current_user_ns(), group);
 
+       if (!gr_acl_handle_chown(path->dentry, path->mnt))
+               return -EACCES;
+
        newattrs.ia_valid =  ATTR_CTIME;
        if (user != (uid_t) -1) {
                if (!uid_valid(uid))
@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
                } else {
                        fsnotify_open(f);
                        fd_install(fd, f);
+                       trace_do_sys_open(tmp->name, flags, mode);
                }
        }
        putname(tmp);
index 21981e58e2a634c09b9ebb9b327860d849fb6b53..3d5f55c4dcce3cadbcffac1f370cd4aa0ffc0ad7 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
 
 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
 {
-       if (pipe->files)
+       if (atomic_read(&pipe->files))
                mutex_lock_nested(&pipe->mutex, subclass);
 }
 
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
 
 void pipe_unlock(struct pipe_inode_info *pipe)
 {
-       if (pipe->files)
+       if (atomic_read(&pipe->files))
                mutex_unlock(&pipe->mutex);
 }
 EXPORT_SYMBOL(pipe_unlock);
@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
                }
                if (bufs)       /* More to do? */
                        continue;
-               if (!pipe->writers)
+               if (!atomic_read(&pipe->writers))
                        break;
-               if (!pipe->waiting_writers) {
+               if (!atomic_read(&pipe->waiting_writers)) {
                        /* syscall merging: Usually we must not sleep
                         * if O_NONBLOCK is set, or if we got some data.
                         * But if a writer sleeps in kernel space, then
@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 
        __pipe_lock(pipe);
 
-       if (!pipe->readers) {
+       if (!atomic_read(&pipe->readers)) {
                send_sig(SIGPIPE, current, 0);
                ret = -EPIPE;
                goto out;
@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
        for (;;) {
                int bufs;
 
-               if (!pipe->readers) {
+               if (!atomic_read(&pipe->readers)) {
                        send_sig(SIGPIPE, current, 0);
                        if (!ret)
                                ret = -EPIPE;
@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
                        kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                        do_wakeup = 0;
                }
-               pipe->waiting_writers++;
+               atomic_inc(&pipe->waiting_writers);
                pipe_wait(pipe);
-               pipe->waiting_writers--;
+               atomic_dec(&pipe->waiting_writers);
        }
 out:
        __pipe_unlock(pipe);
@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
        mask = 0;
        if (filp->f_mode & FMODE_READ) {
                mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
-               if (!pipe->writers && filp->f_version != pipe->w_counter)
+               if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
                        mask |= POLLHUP;
        }
 
@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
                 * Most Unices do not set POLLERR for FIFOs but on Linux they
                 * behave exactly like pipes for poll().
                 */
-               if (!pipe->readers)
+               if (!atomic_read(&pipe->readers))
                        mask |= POLLERR;
        }
 
@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
        int kill = 0;
 
        spin_lock(&inode->i_lock);
-       if (!--pipe->files) {
+       if (atomic_dec_and_test(&pipe->files)) {
                inode->i_pipe = NULL;
                kill = 1;
        }
@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
 
        __pipe_lock(pipe);
        if (file->f_mode & FMODE_READ)
-               pipe->readers--;
+               atomic_dec(&pipe->readers);
        if (file->f_mode & FMODE_WRITE)
-               pipe->writers--;
+               atomic_dec(&pipe->writers);
 
-       if (pipe->readers || pipe->writers) {
+       if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
                wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
        kfree(pipe);
 }
 
-static struct vfsmount *pipe_mnt __read_mostly;
+struct vfsmount *pipe_mnt __read_mostly;
 
 /*
  * pipefs_dname() is called from d_path().
@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
                goto fail_iput;
 
        inode->i_pipe = pipe;
-       pipe->files = 2;
-       pipe->readers = pipe->writers = 1;
+       atomic_set(&pipe->files, 2);
+       atomic_set(&pipe->readers, 1);
+       atomic_set(&pipe->writers, 1);
        inode->i_fop = &pipefifo_fops;
 
        /*
@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
        spin_lock(&inode->i_lock);
        if (inode->i_pipe) {
                pipe = inode->i_pipe;
-               pipe->files++;
+               atomic_inc(&pipe->files);
                spin_unlock(&inode->i_lock);
        } else {
                spin_unlock(&inode->i_lock);
                pipe = alloc_pipe_info();
                if (!pipe)
                        return -ENOMEM;
-               pipe->files = 1;
+               atomic_set(&pipe->files, 1);
                spin_lock(&inode->i_lock);
                if (unlikely(inode->i_pipe)) {
-                       inode->i_pipe->files++;
+                       atomic_inc(&inode->i_pipe->files);
                        spin_unlock(&inode->i_lock);
                        free_pipe_info(pipe);
                        pipe = inode->i_pipe;
@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
         *  opened, even when there is no process writing the FIFO.
         */
                pipe->r_counter++;
-               if (pipe->readers++ == 0)
+               if (atomic_inc_return(&pipe->readers) == 1)
                        wake_up_partner(pipe);
 
-               if (!is_pipe && !pipe->writers) {
+               if (!is_pipe && !atomic_read(&pipe->writers)) {
                        if ((filp->f_flags & O_NONBLOCK)) {
                                /* suppress POLLHUP until we have
                                 * seen a writer */
@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
         *  errno=ENXIO when there is no process reading the FIFO.
         */
                ret = -ENXIO;
-               if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
+               if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
                        goto err;
 
                pipe->w_counter++;
-               if (!pipe->writers++)
+               if (atomic_inc_return(&pipe->writers) == 1)
                        wake_up_partner(pipe);
 
-               if (!is_pipe && !pipe->readers) {
+               if (!is_pipe && !atomic_read(&pipe->readers)) {
                        if (wait_for_partner(pipe, &pipe->r_counter))
                                goto err_wr;
                }
@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
         *  the process can at least talk to itself.
         */
 
-               pipe->readers++;
-               pipe->writers++;
+               atomic_inc(&pipe->readers);
+               atomic_inc(&pipe->writers);
                pipe->r_counter++;
                pipe->w_counter++;
-               if (pipe->readers == 1 || pipe->writers == 1)
+               if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
                        wake_up_partner(pipe);
                break;
 
@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
        return 0;
 
 err_rd:
-       if (!--pipe->readers)
+       if (atomic_dec_and_test(&pipe->readers))
                wake_up_interruptible(&pipe->wait);
        ret = -ERESTARTSYS;
        goto err;
 
 err_wr:
-       if (!--pipe->writers)
+       if (atomic_dec_and_test(&pipe->writers))
                wake_up_interruptible(&pipe->wait);
        ret = -ERESTARTSYS;
        goto err;
index 0855f772cd41599d6c1d1091e7da616d32cccf53..6787d50905c9ce68c3b3fc04cf94eaea322094e6 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/xattr.h>
 #include <linux/export.h>
 #include <linux/user_namespace.h>
+#include <linux/grsecurity.h>
 
 struct posix_acl **acl_by_type(struct inode *inode, int type)
 {
@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
                }
        }
         if (mode_p)
-                *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+                *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
         return not_equiv;
 }
 EXPORT_SYMBOL(posix_acl_equiv_mode);
@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
                mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
        }
 
-       *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+       *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
         return not_equiv;
 }
 
@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
        struct posix_acl *clone = posix_acl_clone(*acl, gfp);
        int err = -ENOMEM;
        if (clone) {
+               *mode_p &= ~gr_acl_umask();
+
                err = posix_acl_create_masq(clone, mode_p);
                if (err < 0) {
                        posix_acl_release(clone);
@@ -659,11 +662,12 @@ struct posix_acl *
 posix_acl_from_xattr(struct user_namespace *user_ns,
                     const void *value, size_t size)
 {
-       posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
-       posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+       const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
+       const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
        int count;
        struct posix_acl *acl;
        struct posix_acl_entry *acl_e;
+       umode_t umask = gr_acl_umask();
 
        if (!value)
                return NULL;
@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
 
                switch(acl_e->e_tag) {
                        case ACL_USER_OBJ:
+                               acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
+                               break;
                        case ACL_GROUP_OBJ:
                        case ACL_MASK:
+                               acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
+                               break;
                        case ACL_OTHER:
+                               acl_e->e_perm &= ~(umask & S_IRWXO);
                                break;
 
                        case ACL_USER:
+                               acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
                                acl_e->e_uid =
                                        make_kuid(user_ns,
                                                  le32_to_cpu(entry->e_id));
@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
                                        goto fail;
                                break;
                        case ACL_GROUP:
+                               acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
                                acl_e->e_gid =
                                        make_kgid(user_ns,
                                                  le32_to_cpu(entry->e_id));
index 2183fcf41d5933d576724a4016cccd3adb4ce645..3c32a98537d02423e05eeee5bebba60525ac31d8 100644 (file)
@@ -30,7 +30,7 @@ config PROC_FS
 
 config PROC_KCORE
        bool "/proc/kcore support" if !ARM
-       depends on PROC_FS && MMU
+       depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
        help
          Provides a virtual ELF core file of the live kernel.  This can
          be read with gdb and other ELF tools.  No modifications can be
@@ -38,8 +38,8 @@ config PROC_KCORE
 
 config PROC_VMCORE
        bool "/proc/vmcore support"
-       depends on PROC_FS && CRASH_DUMP
-       default y
+       depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
+       default n
         help
         Exports the dump image of crashed kernel in ELF format.
 
@@ -63,8 +63,8 @@ config PROC_SYSCTL
          limited in memory.
 
 config PROC_PAGE_MONITOR
-       default y
-       depends on PROC_FS && MMU
+       default n
+       depends on PROC_FS && MMU && !GRKERNSEC
        bool "Enable /proc page monitoring" if EXPERT
        help
          Various /proc files exist to monitor process memory utilization:
index bd117d065b822fff63c4b9a1fce8b7704d49d3f9..e6872d796ffc4908f29643dc23944b447aa04fdd 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/tty.h>
 #include <linux/string.h>
 #include <linux/mman.h>
+#include <linux/grsecurity.h>
 #include <linux/proc_fs.h>
 #include <linux/ioport.h>
 #include <linux/uaccess.h>
@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
        seq_putc(m, '\n');
 }
 
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static inline void task_pax(struct seq_file *m, struct task_struct *p)
+{
+       if (p->mm)
+               seq_printf(m, "PaX:\t%c%c%c%c%c\n",
+                          p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
+                          p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
+                          p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
+                          p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
+                          p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
+       else
+               seq_printf(m, "PaX:\t-----\n");
+}
+#endif
+
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
 {
@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
        task_cpus_allowed(m, task);
        cpuset_task_status_allowed(m, task);
        task_context_switch_counts(m, task);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+       task_pax(m, task);
+#endif
+
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+       task_grsec_rbac(m, task);
+#endif
+
        return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
+                             _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task, int whole)
 {
@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        char tcomm[sizeof(task->comm)];
        unsigned long flags;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (current->exec_id != m->exec_id) {
+               gr_log_badprocpid("stat");
+               return 0;
+       }
+#endif
+
        state = *get_task_state(task);
        vsize = eip = esp = 0;
        permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                gtime = task_gtime(task);
        }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (PAX_RAND_FLAGS(mm)) {
+               eip = 0;
+               esp = 0;
+               wchan = 0;
+       }
+#endif
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       wchan = 0;
+       eip =0;
+       esp =0;
+#endif
+
        /* scale priority and nice values from timeslices to -20..20 */
        /* to make it look like a "normal" Unix priority/nice value  */
        priority = task_prio(task);
@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', vsize);
        seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
        seq_put_decimal_ull(m, ' ', rsslim);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
+       seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
+       seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
+#else
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
        seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
        seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
+#endif
        seq_put_decimal_ull(m, ' ', esp);
        seq_put_decimal_ull(m, ' ', eip);
        /* The signal information here is obsolete.
@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
        seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
 
-       if (mm && permitted) {
+       if (mm && permitted
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+               && !PAX_RAND_FLAGS(mm)
+#endif
+          ) {
                seq_put_decimal_ull(m, ' ', mm->start_data);
                seq_put_decimal_ull(m, ' ', mm->end_data);
                seq_put_decimal_ull(m, ' ', mm->start_brk);
@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
 {
        unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
-       struct mm_struct *mm = get_task_mm(task);
+       struct mm_struct *mm;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (current->exec_id != m->exec_id) {
+               gr_log_badprocpid("statm");
+               return 0;
+       }
+#endif
+       mm = get_task_mm(task);
        if (mm) {
                size = task_statm(mm, &shared, &text, &data, &resident);
                mmput(mm);
@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
+{
+       unsigned long flags;
+       u32 curr_ip = 0;
+
+       if (lock_task_sighand(task, &flags)) {
+               curr_ip = task->signal->curr_ip;
+               unlock_task_sighand(task, &flags);
+       }
+       return seq_printf(m, "%pI4\n", &curr_ip);
+}
+#endif
+
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static struct pid *
 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
index 3f3d7aeb071257a15afaa9a65f88642e307f80f3..68de1091864700d0af50daa6d02113962495e068 100644 (file)
@@ -113,6 +113,14 @@ struct pid_entry {
        union proc_op op;
 };
 
+struct getdents_callback {
+       struct linux_dirent __user * current_dir;
+       struct linux_dirent __user * previous;
+       struct file * file;
+       int count;
+       int error;
+};
+
 #define NOD(NAME, MODE, IOP, FOP, OP) {                        \
        .name = (NAME),                                 \
        .len  = sizeof(NAME) - 1,                       \
@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
+                             _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
                         struct pid *pid, struct task_struct *task)
 {
        struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
        if (mm && !IS_ERR(mm)) {
                unsigned int nwords = 0;
+
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+               /* allow if we're currently ptracing this task */
+               if (PAX_RAND_FLAGS(mm) &&
+                   (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
+                       mmput(mm);
+                       return 0;
+               }
+#endif
+
                do {
                        nwords += 2;
                } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
 }
 
 
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 /*
  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
  * Returns the resolved symbol.  If that fails, simply return the address.
@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
        mutex_unlock(&task->signal->cred_guard_mutex);
 }
 
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 
 #define MAX_STACK_TRACE_DEPTH  64
 
@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
        return 0;
 }
 
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
                            struct pid *pid, struct task_struct *task)
 {
@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
 /************************************************************************/
 
 /* permission checks */
-static int proc_fd_access_allowed(struct inode *inode)
+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
 {
        struct task_struct *task;
        int allowed = 0;
@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
         */
        task = get_proc_task(inode);
        if (task) {
-               allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+               if (log)
+                       allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+               else
+                       allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
                put_task_struct(task);
        }
        return allowed;
@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
                                 struct task_struct *task,
                                 int hide_pid_min)
 {
+       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+               return false;
+
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       rcu_read_lock();
+       {
+               const struct cred *tmpcred = current_cred();
+               const struct cred *cred = __task_cred(task);
+
+               if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                       || in_group_p(grsec_proc_gid)
+#endif
+               ) {
+                       rcu_read_unlock();
+                       return true;
+               }
+       }
+       rcu_read_unlock();
+
+       if (!pid->hide_pid)
+               return false;
+#endif
+
        if (pid->hide_pid < hide_pid_min)
                return true;
        if (in_group_p(pid->pid_gid))
                return true;
+
        return ptrace_may_access(task, PTRACE_MODE_READ);
 }
 
@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
        put_task_struct(task);
 
        if (!has_perms) {
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+               {
+#else
                if (pid->hide_pid == 2) {
+#endif
                        /*
                         * Let's make getdents(), stat(), and open()
                         * consistent with each other.  If a process
@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
 
        if (task) {
                mm = mm_access(task, mode);
+               if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
+                       mmput(mm);
+                       mm = ERR_PTR(-EPERM);
+               }
                put_task_struct(task);
 
                if (!IS_ERR_OR_NULL(mm)) {
@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
                return PTR_ERR(mm);
 
        file->private_data = mm;
+
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       file->f_version = current->exec_id;
+#endif
+
        return 0;
 }
 
@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
        ssize_t copied;
        char *page;
 
+#ifdef CONFIG_GRKERNSEC
+       if (write)
+               return -EPERM;
+#endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (file->f_version != current->exec_id) {
+               gr_log_badprocpid("mem");
+               return 0;
+       }
+#endif
+
        if (!mm)
                return 0;
 
@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
                goto free;
 
        while (count > 0) {
-               int this_len = min_t(int, count, PAGE_SIZE);
+               ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
 
                if (write && copy_from_user(page, buf, this_len)) {
                        copied = -EFAULT;
@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        if (!mm)
                return 0;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (file->f_version != current->exec_id) {
+               gr_log_badprocpid("environ");
+               return 0;
+       }
+#endif
+
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
                return -ENOMEM;
@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                goto free;
        while (count > 0) {
                size_t this_len, max_len;
-               int retval;
+               ssize_t retval;
 
                if (src >= (mm->env_end - mm->env_start))
                        break;
@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
        int error = -EACCES;
 
        /* Are we allowed to snoop on the tasks file descriptors? */
-       if (!proc_fd_access_allowed(inode))
+       if (!proc_fd_access_allowed(inode, 0))
                goto out;
 
        error = PROC_I(inode)->op.proc_get_link(dentry, &path);
@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
        struct path path;
 
        /* Are we allowed to snoop on the tasks file descriptors? */
-       if (!proc_fd_access_allowed(inode))
-               goto out;
+       /* logging this is needed for learning on chromium to work properly,
+          but we don't want to flood the logs from 'ps' which does a readlink
+          on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
+          CAP_SYS_PTRACE as it's not necessary for its basic functionality
+        */
+       if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
+               if (!proc_fd_access_allowed(inode,0))
+                       goto out;
+       } else {
+               if (!proc_fd_access_allowed(inode,1))
+                       goto out;
+       }
 
        error = PROC_I(inode)->op.proc_get_link(dentry, &path);
        if (error)
@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
                rcu_read_lock();
                cred = __task_cred(task);
                inode->i_uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+               inode->i_gid = grsec_proc_gid;
+#else
                inode->i_gid = cred->egid;
+#endif
                rcu_read_unlock();
        }
        security_task_to_inode(task, inode);
@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
                        return -ENOENT;
                }
                if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
+#endif
                    task_dumpable(task)) {
                        cred = __task_cred(task);
                        stat->uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                       stat->gid = grsec_proc_gid;
+#else
                        stat->gid = cred->egid;
+#endif
                }
        }
        rcu_read_unlock();
@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
 
        if (task) {
                if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+                   (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
+#endif
                    task_dumpable(task)) {
                        rcu_read_lock();
                        cred = __task_cred(task);
                        inode->i_uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                       inode->i_gid = grsec_proc_gid;
+#else
                        inode->i_gid = cred->egid;
+#endif
                        rcu_read_unlock();
                } else {
                        inode->i_uid = GLOBAL_ROOT_UID;
@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
        if (!task)
                goto out_no_task;
 
+       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+               goto out;
+
        /*
         * Yes, it does not scale. And it should not. Don't add
         * new entries into /proc/<tgid>/ without very good reasons.
@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
        if (!task)
                return -ENOENT;
 
+       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+               goto out;
+
        if (!dir_emit_dots(file, ctx))
                goto out;
 
@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
 #endif
        REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
        ONE("syscall",    S_IRUSR, proc_pid_syscall),
 #endif
        ONE("cmdline",    S_IRUGO, proc_pid_cmdline),
@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_SECURITY
        DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
        ONE("wchan",      S_IRUGO, proc_pid_wchan),
 #endif
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
        ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_HARDWALL
        ONE("hardwall",   S_IRUGO, proc_pid_hardwall),
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+       ONE("ipaddr",     S_IRUSR, proc_pid_ipaddr),
+#endif
 #ifdef CONFIG_USER_NS
        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
        if (!inode)
                goto out;
 
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       inode->i_gid = grsec_proc_gid;
+       inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
+#else
        inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+#endif
        inode->i_op = &proc_tgid_base_inode_operations;
        inode->i_fop = &proc_tgid_base_operations;
        inode->i_flags|=S_IMMUTABLE;
@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
        if (!task)
                goto out;
 
+       if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+               goto out_put_task;
+
        result = proc_pid_instantiate(dir, dentry, task, NULL);
+out_put_task:
        put_task_struct(task);
 out:
        return ERR_PTR(result);
@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
        REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
        ONE("syscall",   S_IRUSR, proc_pid_syscall),
 #endif
        ONE("cmdline",   S_IRUGO, proc_pid_cmdline),
@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
 #ifdef CONFIG_SECURITY
        DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
        ONE("wchan",     S_IRUGO, proc_pid_wchan),
 #endif
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
        ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
index cbd82dff7e81aeecadacb219ba96ec01ae4f5202..c0407d23e87916b17aab1e07683a4bf62774f032 100644 (file)
@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
 
 static int __init proc_cmdline_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+       proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
+#else
        proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
+#endif
        return 0;
 }
 fs_initcall(proc_cmdline_init);
index 50493edc30e56e157a6f37407cce84ff60a05de2..248166b06966830d4a8525ef45b4d692e5b49de5 100644 (file)
@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
 
 static int __init proc_devices_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+       proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
+#else
        proc_create("devices", 0, NULL, &proc_devinfo_operations);
+#endif
        return 0;
 }
 fs_initcall(proc_devices_init);
index 8e5ad83b629aad2ebe8f1d1bc01c764147f69be1..1f07a8c4639a1ea6f31a50edf86a2c3f663ab066 100644 (file)
@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
        if (!task)
                return -ENOENT;
 
-       files = get_files_struct(task);
+       if (!gr_acl_handle_procpidmem(task))
+               files = get_files_struct(task);
        put_task_struct(task);
 
        if (files) {
@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
  */
 int proc_fd_permission(struct inode *inode, int mask)
 {
+       struct task_struct *task;
        int rv = generic_permission(inode, mask);
-       if (rv == 0)
-               return 0;
+
        if (task_tgid(current) == proc_pid(inode))
                rv = 0;
+
+       task = get_proc_task(inode);
+       if (task == NULL)
+               return rv;
+
+       if (gr_acl_handle_procpidmem(task))
+               rv = -EACCES;
+
+       put_task_struct(task);
+
        return rv;
 }
 
index b502bba0f9fddc4c36b2f7093c71ea654ee17e3b..849e2164018ddb013ce8b3be53dfeb0c61dbcd53 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
+#include <linux/grsecurity.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
        return proc_lookup_de(PDE(dir), dir, dentry);
 }
 
+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
+               unsigned int flags)
+{
+       if (gr_proc_is_restricted())
+               return ERR_PTR(-EACCES);
+
+       return proc_lookup_de(PDE(dir), dir, dentry);
+}
+
 /*
  * This returns non-zero if at EOF, so that the /proc
  * root directory can use this and check if it should
@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
        return proc_readdir_de(PDE(inode), file, ctx);
 }
 
+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
+{
+       struct inode *inode = file_inode(file);
+
+       if (gr_proc_is_restricted())
+               return -EACCES;
+
+       return proc_readdir_de(PDE(inode), file, ctx);
+}
+
 /*
  * These are the generic /proc directory operations. They
  * use the in-memory "struct proc_dir_entry" tree to parse
@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
        .iterate                = proc_readdir,
 };
 
+static const struct file_operations proc_dir_restricted_operations = {
+       .llseek                 = generic_file_llseek,
+       .read                   = generic_read_dir,
+       .iterate                = proc_readdir_restrict,
+};
+
 /*
  * proc directories can do almost nothing..
  */
@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
        .setattr        = proc_notify_change,
 };
 
+static const struct inode_operations proc_dir_restricted_inode_operations = {
+       .lookup         = proc_lookup_restrict,
+       .getattr        = proc_getattr,
+       .setattr        = proc_notify_change,
+};
+
 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
 {
        int ret;
@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
                return ret;
 
        if (S_ISDIR(dp->mode)) {
-               dp->proc_fops = &proc_dir_operations;
-               dp->proc_iops = &proc_dir_inode_operations;
+               if (dp->restricted) {
+                       dp->proc_fops = &proc_dir_restricted_operations;
+                       dp->proc_iops = &proc_dir_restricted_inode_operations;
+               } else {
+                       dp->proc_fops = &proc_dir_operations;
+                       dp->proc_iops = &proc_dir_inode_operations;
+               }
                dir->nlink++;
        } else if (S_ISLNK(dp->mode)) {
                dp->proc_iops = &proc_link_inode_operations;
@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
 }
 EXPORT_SYMBOL_GPL(proc_mkdir_data);
 
+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
+               struct proc_dir_entry *parent, void *data)
+{
+       struct proc_dir_entry *ent;
+
+       if (mode == 0)
+               mode = S_IRUGO | S_IXUGO;
+
+       ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
+       if (ent) {
+               ent->data = data;
+               ent->restricted = 1;
+               if (proc_register(parent, ent) < 0) {
+                       kfree(ent);
+                       ent = NULL;
+               }
+       }
+       return ent;
+}
+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
+
 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
                                       struct proc_dir_entry *parent)
 {
@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
 }
 EXPORT_SYMBOL(proc_mkdir);
 
+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
+               struct proc_dir_entry *parent)
+{
+       return proc_mkdir_data_restrict(name, 0, parent, NULL);
+}
+EXPORT_SYMBOL(proc_mkdir_restrict);
+
 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
                                        struct proc_dir_entry *parent,
                                        const struct file_operations *proc_fops,
index 3b0f8384ab216e32733033af4cbf6954a495f27b..a0e0f63e5c17d344ac5e506b12e1067298ee37dc 100644 (file)
 #include <linux/mount.h>
 #include <linux/magic.h>
 #include <linux/namei.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
 #include "internal.h"
 
+#ifdef CONFIG_PROC_SYSCTL
+extern const struct inode_operations proc_sys_inode_operations;
+extern const struct inode_operations proc_sys_dir_operations;
+#endif
+
 static void proc_evict_inode(struct inode *inode)
 {
        struct proc_dir_entry *de;
@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
                RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
                sysctl_head_put(head);
        }
+
+#ifdef CONFIG_PROC_SYSCTL
+       if (inode->i_op == &proc_sys_inode_operations ||
+           inode->i_op == &proc_sys_dir_operations)
+               gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
+#endif
+
 }
 
 static struct kmem_cache * proc_inode_cachep;
@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
                if (de->mode) {
                        inode->i_mode = de->mode;
                        inode->i_uid = de->uid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                       inode->i_gid = grsec_proc_gid;
+#else
                        inode->i_gid = de->gid;
+#endif
                }
                if (de->size)
                        inode->i_size = de->size;
index c835b94c0cd3afec0bea4017ca8bacd63b32ff8e..c9e01a36279c06ffe29b415128b228031f35970a 100644 (file)
@@ -47,9 +47,10 @@ struct proc_dir_entry {
        struct completion *pde_unload_completion;
        struct list_head pde_openers;   /* who did ->open, but not ->release */
        spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
+       u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
        u8 namelen;
        char name[];
-};
+} __randomize_layout;
 
 union proc_op {
        int (*proc_get_link)(struct dentry *, struct path *);
@@ -67,7 +68,7 @@ struct proc_inode {
        struct ctl_table *sysctl_entry;
        const struct proc_ns_operations *ns_ops;
        struct inode vfs_inode;
-};
+} __randomize_layout;
 
 /*
  * General functions
@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
                           struct pid *, struct task_struct *);
 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
                          struct pid *, struct task_struct *);
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
+                         struct pid *, struct task_struct *);
+#endif
 
 /*
  * base.c
@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
  * generic.c
  */
 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
                                     struct dentry *);
 extern int proc_readdir(struct file *, struct dir_context *);
+extern int proc_readdir_restrict(struct file *, struct dir_context *);
 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
 
 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
index a352d5703b4196112c9e581905ef467f28d4f133..cb94a5cc1ed16195802b84bf78ed28b585ce38b8 100644 (file)
@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
 
 static int __init proc_interrupts_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+       proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
+#else
        proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
+#endif
        return 0;
 }
 fs_initcall(proc_interrupts_init);
index 91a4e6426321885eaa226081be2c39ad35a95f74..cb007c07b601e94b6074c3cb3cf203cee6a046f3 100644 (file)
@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
         * the addresses in the elf_phdr on our list.
         */
        start = kc_offset_to_vaddr(*fpos - elf_buflen);
-       if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
+       tsz = PAGE_SIZE - (start & ~PAGE_MASK);
+       if (tsz > buflen)
                tsz = buflen;
-               
+
        while (buflen) {
                struct kcore_list *m;
 
@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        kfree(elf_buf);
                } else {
                        if (kern_addr_valid(start)) {
-                               unsigned long n;
-
-                               n = copy_to_user(buffer, (char *)start, tsz);
-                               /*
-                                * We cannot distinguish between fault on source
-                                * and fault on destination. When this happens
-                                * we clear too and hope it will trigger the
-                                * EFAULT again.
-                                */
-                               if (n) { 
-                                       if (clear_user(buffer + tsz - n,
-                                                               n))
+                               char *elf_buf;
+                               mm_segment_t oldfs;
+
+                               elf_buf = kmalloc(tsz, GFP_KERNEL);
+                               if (!elf_buf)
+                                       return -ENOMEM;
+                               oldfs = get_fs();
+                               set_fs(KERNEL_DS);
+                               if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
+                                       set_fs(oldfs);
+                                       if (copy_to_user(buffer, elf_buf, tsz)) {
+                                               kfree(elf_buf);
                                                return -EFAULT;
+                                       }
                                }
+                               set_fs(oldfs);
+                               kfree(elf_buf);
                        } else {
                                if (clear_user(buffer, tsz))
                                        return -EFAULT;
@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 
 static int open_kcore(struct inode *inode, struct file *filp)
 {
+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
+       return -EPERM;
+#endif
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
        if (kcore_need_update)
index d3ebf2e618535bd2fa7daa0bfe948cb1a9bfc062..6ad42d19377e03c7113ffc42b9368e2a2c57debb 100644 (file)
@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                vmi.used >> 10,
                vmi.largest_chunk >> 10
 #ifdef CONFIG_MEMORY_FAILURE
-               , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+               , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
 #endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
                , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
index d4a35746cab91f8967dc83b7466fd572a77ae8d4..b421ce97aaad9341c5d4afdaf4caf25d1847460f 100644 (file)
@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
 
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "");
+               seq_path(m, &file->f_path, "\n\\");
        }
 
        seq_putc(m, '\n');
index 1bde894bc6248f4c8eba370ae062fa6115e6a492..22ac7eb61070f2a8df9c301aeffce6bf6a5dc9fa 100644 (file)
 #include <linux/nsproxy.h>
 #include <net/net_namespace.h>
 #include <linux/seq_file.h>
+#include <linux/grsecurity.h>
 
 #include "internal.h"
 
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct seq_operations *ipv6_seq_ops_addr;
+
+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
+{
+       ipv6_seq_ops_addr = addr;
+}
+
+void unregister_ipv6_seq_ops_addr(void)
+{
+       ipv6_seq_ops_addr = NULL;
+}
+
+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
+#endif
+
 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
 {
        return pde->parent->data;
@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
        return maybe_get_net(PDE_NET(PDE(inode)));
 }
 
+extern const struct seq_operations dev_seq_ops;
+
 int seq_open_net(struct inode *ino, struct file *f,
                 const struct seq_operations *ops, int size)
 {
@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
 
        BUG_ON(size < sizeof(*p));
 
+       /* only permit access to /proc/net/dev */
+       if (
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+           ops != ipv6_seq_ops_addr && 
+#endif
+           ops != &dev_seq_ops && gr_proc_is_restricted())
+               return -EACCES;
+
        net = get_proc_net(ino);
        if (net == NULL)
                return -ENXIO;
@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
        int err;
        struct net *net;
 
+       if (gr_proc_is_restricted())
+               return -EACCES;
+
        err = -ENXIO;
        net = get_proc_net(inode);
        if (net == NULL)
index f92d5dd578a4a5289aab9e29dcd37b09ee775c46..26398acd523b7075a5aa8c589a53f7ca5191eb54 100644 (file)
 #include <linux/namei.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/nsproxy.h>
+#ifdef CONFIG_GRKERNSEC
+#include <net/net_namespace.h>
+#endif
 #include "internal.h"
 
+extern int gr_handle_chroot_sysctl(const int op);
+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
+                               const int op);
+
 static const struct dentry_operations proc_sys_dentry_operations;
 static const struct file_operations proc_sys_file_operations;
-static const struct inode_operations proc_sys_inode_operations;
+const struct inode_operations proc_sys_inode_operations;
 static const struct file_operations proc_sys_dir_file_operations;
-static const struct inode_operations proc_sys_dir_operations;
+const struct inode_operations proc_sys_dir_operations;
 
 void proc_sys_poll_notify(struct ctl_table_poll *poll)
 {
@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
 
        err = NULL;
        d_set_d_op(dentry, &proc_sys_dentry_operations);
+
+       gr_handle_proc_create(dentry, inode);
+
        d_add(dentry, inode);
 
 out:
@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
        struct inode *inode = file_inode(filp);
        struct ctl_table_header *head = grab_header(inode);
        struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+       int op = write ? MAY_WRITE : MAY_READ;
        ssize_t error;
        size_t res;
 
@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
         * and won't be until we finish.
         */
        error = -EPERM;
-       if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
+       if (sysctl_perm(head, table, op))
                goto out;
 
        /* if that can happen at all, it should be -EINVAL, not -EISDIR */
@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
        if (!table->proc_handler)
                goto out;
 
+#ifdef CONFIG_GRKERNSEC
+       error = -EPERM;
+       if (gr_handle_chroot_sysctl(op))
+               goto out;
+       dget(filp->f_path.dentry);
+       if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
+               dput(filp->f_path.dentry);
+               goto out;
+       }
+       dput(filp->f_path.dentry);
+       if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
+               goto out;
+       if (write) {
+               if (current->nsproxy->net_ns != table->extra2) {
+                       if (!capable(CAP_SYS_ADMIN))
+                               goto out;
+               } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
+                       goto out;
+       }
+#endif
+
        /* careful: calling conventions are nasty here */
        res = count;
        error = table->proc_handler(table, write, buf, &res, ppos);
@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
                                return false;
                        } else {
                                d_set_d_op(child, &proc_sys_dentry_operations);
+
+                               gr_handle_proc_create(child, inode);
+
                                d_add(child, inode);
                        }
                } else {
@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
        if ((*pos)++ < ctx->pos)
                return true;
 
+       if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
+               return 0;
+
        if (unlikely(S_ISLNK(table->mode)))
                res = proc_sys_link_fill_cache(file, ctx, head, table);
        else
@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
        if (IS_ERR(head))
                return PTR_ERR(head);
 
+       if (table && !gr_acl_handle_hidden_file(dentry, mnt))
+               return -ENOENT;
+
        generic_fillattr(inode, stat);
        if (table)
                stat->mode = (stat->mode & S_IFMT) | table->mode;
@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
        .llseek         = generic_file_llseek,
 };
 
-static const struct inode_operations proc_sys_inode_operations = {
+const struct inode_operations proc_sys_inode_operations = {
        .permission     = proc_sys_permission,
        .setattr        = proc_sys_setattr,
        .getattr        = proc_sys_getattr,
 };
 
-static const struct inode_operations proc_sys_dir_operations = {
+const struct inode_operations proc_sys_dir_operations = {
        .lookup         = proc_sys_lookup,
        .permission     = proc_sys_permission,
        .setattr        = proc_sys_setattr,
@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
 static struct ctl_dir *new_dir(struct ctl_table_set *set,
                               const char *name, int namelen)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
        struct ctl_dir *new;
        struct ctl_node *node;
        char *new_name;
@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
                return NULL;
 
        node = (struct ctl_node *)(new + 1);
-       table = (struct ctl_table *)(node + 1);
+       table = (ctl_table_no_const *)(node + 1);
        new_name = (char *)(table + 2);
        memcpy(new_name, name, namelen);
        new_name[namelen] = '\0';
@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
        struct ctl_table_root *link_root)
 {
-       struct ctl_table *link_table, *entry, *link;
+       ctl_table_no_const *link_table, *link;
+       struct ctl_table *entry;
        struct ctl_table_header *links;
        struct ctl_node *node;
        char *link_name;
@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
                return NULL;
 
        node = (struct ctl_node *)(links + 1);
-       link_table = (struct ctl_table *)(node + nr_entries);
+       link_table = (ctl_table_no_const *)(node + nr_entries);
        link_name = (char *)&link_table[nr_entries + 1];
 
        for (link = link_table, entry = table; entry->procname; link++, entry++) {
@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
        struct ctl_table_header ***subheader, struct ctl_table_set *set,
        struct ctl_table *table)
 {
-       struct ctl_table *ctl_table_arg = NULL;
-       struct ctl_table *entry, *files;
+       ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
+       struct ctl_table *entry;
        int nr_files = 0;
        int nr_dirs = 0;
        int err = -ENOMEM;
@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
                        nr_files++;
        }
 
-       files = table;
        /* If there are mixed files and directories we need a new table */
        if (nr_dirs && nr_files) {
-               struct ctl_table *new;
+               ctl_table_no_const *new;
                files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
                                GFP_KERNEL);
                if (!files)
@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
        /* Register everything except a directory full of subdirectories */
        if (nr_files || !nr_dirs) {
                struct ctl_table_header *header;
-               header = __register_sysctl_table(set, path, files);
+               header = __register_sysctl_table(set, path, files ? files : table);
                if (!header) {
                        kfree(ctl_table_arg);
                        goto out;
index e74ac9f1a2c09cdec0621b244b0d9abc8de44699..35e89f4b071f5fdaa16de75af81197667fba5f6e 100644 (file)
@@ -188,7 +188,15 @@ void __init proc_root_init(void)
        proc_mkdir("openprom", NULL);
 #endif
        proc_tty_init();
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
+#endif
+#else
        proc_mkdir("bus", NULL);
+#endif
        proc_sys_init();
 }
 
index 510413eb25b8bbff35853232c5b72f0881eaf78c..34d9a8c5f08828e0d4d0856380f6552e4d20fed8 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/irqnr.h>
 #include <linux/cputime.h>
 #include <linux/tick.h>
+#include <linux/grsecurity.h>
 
 #ifndef arch_irq_stat_cpu
 #define arch_irq_stat_cpu(cpu) 0
@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
        u64 sum_softirq = 0;
        unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
        struct timespec boottime;
+       int unrestricted = 1;
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+               && !in_group_p(grsec_proc_gid)
+#endif
+       )
+               unrestricted = 0;
+#endif
+#endif
 
        user = nice = system = idle = iowait =
                irq = softirq = steal = 0;
@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
                nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
                system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
                idle += get_idle_time(i);
-               iowait += get_iowait_time(i);
-               irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
-               softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
-               steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
-               guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
-               guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
-               sum += kstat_cpu_irqs_sum(i);
-               sum += arch_irq_stat_cpu(i);
-
-               for (j = 0; j < NR_SOFTIRQS; j++) {
-                       unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
-
-                       per_softirq_sums[j] += softirq_stat;
-                       sum_softirq += softirq_stat;
+               if (unrestricted) {
+                       iowait += get_iowait_time(i);
+                       irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+                       softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+                       steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+                       guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+                       guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+                       sum += kstat_cpu_irqs_sum(i);
+                       sum += arch_irq_stat_cpu(i);
+                       for (j = 0; j < NR_SOFTIRQS; j++) {
+                               unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
+
+                               per_softirq_sums[j] += softirq_stat;
+                               sum_softirq += softirq_stat;
+                       }
                }
        }
-       sum += arch_irq_stat();
+       if (unrestricted)
+               sum += arch_irq_stat();
 
        seq_puts(p, "cpu ");
        seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
                nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
                system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
                idle = get_idle_time(i);
-               iowait = get_iowait_time(i);
-               irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
-               softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
-               steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
-               guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
-               guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+               if (unrestricted) {
+                       iowait = get_iowait_time(i);
+                       irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+                       softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+                       steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+                       guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+                       guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+               }
                seq_printf(p, "cpu%d", i);
                seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
                seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
 
        /* sum again ? it could be updated? */
        for_each_irq_nr(j)
-               seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
+               seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
 
        seq_printf(p,
                "\nctxt %llu\n"
@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
                "processes %lu\n"
                "procs_running %lu\n"
                "procs_blocked %lu\n",
-               nr_context_switches(),
+               unrestricted ? nr_context_switches() : 0ULL,
                (unsigned long)jif,
-               total_forks,
-               nr_running(),
-               nr_iowait());
+               unrestricted ? total_forks : 0UL,
+               unrestricted ? nr_running() : 0UL,
+               unrestricted ? nr_iowait() : 0UL);
 
        seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
 
index 88f9b8352742c727851e8e96e7b2d8d9888ad92f..314064c5c1aabde612179ac323b3839b2569551b 100644 (file)
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
+#include <linux/grsecurity.h>
 
 #include <asm/elf.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+                            (_mm->pax_flags & MF_PAX_RANDMMAP || \
+                             _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
        unsigned long data, text, lib, swap;
@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                "VmExe:\t%8lu kB\n"
                "VmLib:\t%8lu kB\n"
                "VmPTE:\t%8lu kB\n"
-               "VmSwap:\t%8lu kB\n",
-               hiwater_vm << (PAGE_SHIFT-10),
+               "VmSwap:\t%8lu kB\n"
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+               "CsBase:\t%8lx\nCsLim:\t%8lx\n"
+#endif
+
+               ,hiwater_vm << (PAGE_SHIFT-10),
                total_vm << (PAGE_SHIFT-10),
                mm->locked_vm << (PAGE_SHIFT-10),
                mm->pinned_vm << (PAGE_SHIFT-10),
@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
                (PTRS_PER_PTE * sizeof(pte_t) *
                 atomic_long_read(&mm->nr_ptes)) >> 10,
-               swap << (PAGE_SHIFT-10));
+               swap << (PAGE_SHIFT-10)
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+               , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
+               , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
+#else
+               , mm->context.user_cs_base
+               , mm->context.user_cs_limit
+#endif
+#endif
+
+       );
 }
 
 unsigned long task_vsize(struct mm_struct *mm)
@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
        }
 
-       /* We don't show the stack guard page in /proc/maps */
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
+       end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
+#else
        start = vma->vm_start;
-       if (stack_guard_page_start(vma, start))
-               start += PAGE_SIZE;
        end = vma->vm_end;
-       if (stack_guard_page_end(vma, end))
-               end -= PAGE_SIZE;
+#endif
 
        seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                        flags & VM_WRITE ? 'w' : '-',
                        flags & VM_EXEC ? 'x' : '-',
                        flags & VM_MAYSHARE ? 's' : 'p',
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+                       PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
+#else
                        pgoff,
+#endif
                        MAJOR(dev), MINOR(dev), ino);
 
        /*
@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
         */
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "\n");
+               seq_path(m, &file->f_path, "\n\\");
                goto done;
        }
 
@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                         * Thread stack in /proc/PID/task/TID/maps or
                         * the main process stack.
                         */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack)) {
+                       if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
+                           (vma->vm_start <= mm->start_stack &&
+                            vma->vm_end >= mm->start_stack)) {
                                name = "[stack]";
                        } else {
                                /* Thread stack in /proc/PID/maps */
@@ -359,6 +388,12 @@ done:
 
 static int show_map(struct seq_file *m, void *v, int is_pid)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (current->exec_id != m->exec_id) {
+               gr_log_badprocpid("maps");
+               return 0;
+       }
+#endif
        show_map_vma(m, v, is_pid);
        m_cache_vma(m, v);
        return 0;
@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                .private = &mss,
        };
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (current->exec_id != m->exec_id) {
+               gr_log_badprocpid("smaps");
+               return 0;
+       }
+#endif
        memset(&mss, 0, sizeof mss);
-       mss.vma = vma;
-       /* mmap_sem is held in m_start */
-       if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-               walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
-
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (!PAX_RAND_FLAGS(vma->vm_mm)) {
+#endif
+               mss.vma = vma;
+               /* mmap_sem is held in m_start */
+               if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+                       walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       }
+#endif
        show_map_vma(m, vma, is_pid);
 
        seq_printf(m,
@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                   "KernelPageSize: %8lu kB\n"
                   "MMUPageSize:    %8lu kB\n"
                   "Locked:         %8lu kB\n",
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+                  PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
+#else
                   (vma->vm_end - vma->vm_start) >> 10,
+#endif
                   mss.resident >> 10,
                   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
                   mss.shared_clean  >> 10,
@@ -1486,6 +1536,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
        char buffer[64];
        int nid;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (current->exec_id != m->exec_id) {
+               gr_log_badprocpid("numa_maps");
+               return 0;
+       }
+#endif
+
        if (!mm)
                return 0;
 
@@ -1507,11 +1564,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
                mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
        }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
+#else
        seq_printf(m, "%08lx %s", vma->vm_start, buffer);
+#endif
 
        if (file) {
                seq_puts(m, " file=");
-               seq_path(m, &file->f_path, "\n\t= ");
+               seq_path(m, &file->f_path, "\n\t\\= ");
        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
                seq_puts(m, " heap");
        } else {
index 599ec2e201043ea8f210a7c15a13c4e7eab22440..f1413ae33f432473a19c504f2eb2b585ad5d0d21 100644 (file)
@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
        else
                bytes += kobjsize(mm);
        
-       if (current->fs && current->fs->users > 1)
+       if (current->fs && atomic_read(&current->fs->users) > 1)
                sbytes += kobjsize(current->fs);
        else
                bytes += kobjsize(current->fs);
@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
 
        if (file) {
                seq_pad(m, ' ');
-               seq_path(m, &file->f_path, "");
+               seq_path(m, &file->f_path, "\n\\");
        } else if (mm) {
                pid_t tid = pid_of_stack(priv, vma, is_pid);
 
index a90d6d3541992552d30d2b4a2bac062d352f576d..d08047c3d520d074c0c8dff04c88264468541554 100644 (file)
@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
                        nr_bytes = count;
 
                /* If pfn is not ram, return zeros for sparse dump files */
-               if (pfn_is_ram(pfn) == 0)
-                       memset(buf, 0, nr_bytes);
-               else {
+               if (pfn_is_ram(pfn) == 0) {
+                       if (userbuf) {
+                               if (clear_user((char __force_user *)buf, nr_bytes))
+                                       return -EFAULT;
+                       } else
+                               memset(buf, 0, nr_bytes);
+               } else {
                        tmp = copy_oldmem_page(pfn, buf, nr_bytes,
                                                offset, userbuf);
                        if (tmp < 0)
@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 static int copy_to(void *target, void *src, size_t size, int userbuf)
 {
        if (userbuf) {
-               if (copy_to_user((char __user *) target, src, size))
+               if (copy_to_user((char __force_user *) target, src, size))
                        return -EFAULT;
        } else {
                memcpy(target, src, size);
@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
                if (*fpos < m->offset + m->size) {
                        tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
                        start = m->paddr + *fpos - m->offset;
-                       tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
+                       tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
                        if (tmp < 0)
                                return tmp;
                        buflen -= tsz;
@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 static ssize_t read_vmcore(struct file *file, char __user *buffer,
                           size_t buflen, loff_t *fpos)
 {
-       return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+       return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
 }
 
 /*
index d3fb2b698800298184e72db2489f491d91948ba9..43a8140a0b477ab37553b9d636e0516c951bfa36 100644 (file)
@@ -74,7 +74,7 @@ enum {
        BYTESEX_BE,
 };
 
-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
 {
        if (sbi->s_bytesex == BYTESEX_LE)
                return le64_to_cpu((__force __le64)n);
@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
                return (__force __fs64)cpu_to_be64(n);
 }
 
-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
 {
        if (sbi->s_bytesex == BYTESEX_LE)
                return le32_to_cpu((__force __le32)n);
index bb2869f5dfd89528f6986528369438a8cc565efd..d34ada8364926e946835a99495a28517cc2531b6 100644 (file)
@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
 void quota_send_warning(struct kqid qid, dev_t dev,
                        const char warntype)
 {
-       static atomic_t seq;
+       static atomic_unchecked_t seq;
        struct sk_buff *skb;
        void *msg_head;
        int ret;
@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
                  "VFS: Not enough memory to send quota warning.\n");
                return;
        }
-       msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
+       msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
                        &quota_genl_family, 0, QUOTA_NL_C_WARNING);
        if (!msg_head) {
                printk(KERN_ERR
index c0805c93b6fa18d34b9b579d5c470af2a5a66b1b..d39f2eb1b2dc9958062fb6f790e77b88de9a95bd 100644 (file)
@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
 
        old_fs = get_fs();
        set_fs(get_ds());
-       p = (__force const char __user *)buf;
+       p = (const char __force_user *)buf;
        if (count > MAX_RW_COUNT)
                count =  MAX_RW_COUNT;
        if (file->f_op->write)
index ced679179cac0686407c3743cff177289bfc3959..936687bf4bd3b3dabddcf745d315ad550375e4fd 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -71,6 +72,7 @@ struct old_linux_dirent {
 struct readdir_callback {
        struct dir_context ctx;
        struct old_linux_dirent __user * dirent;
+       struct file * file;
        int result;
 };
 
@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
                buf->result = -EOVERFLOW;
                return -EOVERFLOW;
        }
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        buf->result++;
        dirent = buf->dirent;
        if (!access_ok(VERIFY_WRITE, dirent,
@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (buf.result)
                error = buf.result;
@@ -145,6 +152,7 @@ struct getdents_callback {
        struct dir_context ctx;
        struct linux_dirent __user * current_dir;
        struct linux_dirent __user * previous;
+       struct file * file;
        int count;
        int error;
 };
@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                buf->error = -EOVERFLOW;
                return -EOVERFLOW;
        }
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        dirent = buf->previous;
        if (dirent) {
                if (__put_user(offset, &dirent->d_off))
@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
@@ -230,6 +243,7 @@ struct getdents_callback64 {
        struct dir_context ctx;
        struct linux_dirent64 __user * current_dir;
        struct linux_dirent64 __user * previous;
+       struct file *file;
        int count;
        int error;
 };
@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
+
+       if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+               return 0;
+
        dirent = buf->previous;
        if (dirent) {
                if (__put_user(offset, &dirent->d_off))
@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        if (!f.file)
                return -EBADF;
 
+       buf.file = f.file;
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
index 9c02d96d3a424cefd997471c1e0b1fdc0407495d..6562c1082fe50d1734d40af1ba65a3ea14755ed3 100644 (file)
@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
                return;
        }
 
-       atomic_inc(&fs_generation(tb->tb_sb));
+       atomic_inc_unchecked(&fs_generation(tb->tb_sb));
        do_balance_starts(tb);
 
        /*
index aca73dd739066477a8f4eb6293122cfe84f3f055..e3c558d1b78c02c711ae9868fe409d349127103f 100644 (file)
@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
 }
 
 static struct item_operations errcatch_ops = {
-       errcatch_bytes_number,
-       errcatch_decrement_key,
-       errcatch_is_left_mergeable,
-       errcatch_print_item,
-       errcatch_check_item,
-
-       errcatch_create_vi,
-       errcatch_check_left,
-       errcatch_check_right,
-       errcatch_part_size,
-       errcatch_unit_num,
-       errcatch_print_vi
+       .bytes_number = errcatch_bytes_number,
+       .decrement_key = errcatch_decrement_key,
+       .is_left_mergeable = errcatch_is_left_mergeable,
+       .print_item = errcatch_print_item,
+       .check_item = errcatch_check_item,
+
+       .create_vi = errcatch_create_vi,
+       .check_left = errcatch_check_left,
+       .check_right = errcatch_check_right,
+       .part_size = errcatch_part_size,
+       .unit_num = errcatch_unit_num,
+       .print_vi = errcatch_print_vi
 };
 
 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
index 621b9f381fe1faed2925d1117e13ee4452129730..af527fddc44d08722dcfbcf6d696790304b647fc 100644 (file)
@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
                   "SMALL_TAILS " : "NO_TAILS ",
                   replay_only(sb) ? "REPLAY_ONLY " : "",
                   convert_reiserfs(sb) ? "CONV " : "",
-                  atomic_read(&r->s_generation_counter),
+                  atomic_read_unchecked(&r->s_generation_counter),
                   SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
                   SF(s_do_balance), SF(s_unneeded_left_neighbor),
                   SF(s_good_search_by_key_reada), SF(s_bmaps),
index bb79cddf0a1f15691cb59252448ffd4a02ecad43..fcf49ef9c17a464fd123c93c60e60bbb9d59c848 100644 (file)
@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
        /* Comment? -Hans */
        wait_queue_head_t s_wait;
        /* increased by one every time the  tree gets re-balanced */
-       atomic_t s_generation_counter;
+       atomic_unchecked_t s_generation_counter;
 
        /* File system properties. Currently holds on-disk FS format */
        unsigned long s_properties;
@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
 #define REISERFS_USER_MEM              1       /* user memory mode */
 
 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
-#define get_generation(s) atomic_read (&fs_generation(s))
+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
 #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
 #define __fs_changed(gen,s) (gen != get_generation (s))
 #define fs_changed(gen,s)              \
index 71fbbe3e2dab310481ef97ca12885a4a2137e8cf..eff29ba9dd4a4305b13a16f5900e330f2535c987 100644 (file)
@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
        sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
        sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
+#ifdef CONFIG_REISERFS_FS_XATTR
+       /* turn on user xattrs by default */
+       sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
+#endif
        /* no preallocation minimum, be smart in reiserfs_file_write instead */
        sbi->s_alloc_options.preallocmin = 0;
        /* Preallocate by 16 blocks (17-1) at once */
index 467bb1cb3ea5e0da46d7d4b305b1d7fb2c143208..cf9d65acc075e9c77da8500929e2ea6374612955 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
+#include <linux/security.h>
 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
 #include <linux/file.h>
 #include <linux/fdtable.h>
@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
        struct poll_list *walk = head;
        unsigned long todo = nfds;
 
+       gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
        if (nfds > rlimit(RLIMIT_NOFILE))
                return -EINVAL;
 
index dbf3a59c86bbbb081464830585f216623ab9f112..daf023f79a8f688820a1b9ee5cefa7d8ec732336 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/slab.h>
 #include <linux/cred.h>
 #include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
 
 static void *seq_buf_alloc(unsigned long size)
 {
-       void *buf;
-
-       /*
-        * __GFP_NORETRY to avoid oom-killings with high-order allocations -
-        * it's better to fall back to vmalloc() than to kill things.
-        */
-       buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
-       if (!buf && size > PAGE_SIZE)
-               buf = vmalloc(size);
-       return buf;
+       return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
 }
 
 /**
@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
 #ifdef CONFIG_USER_NS
        p->user_ns = file->f_cred->user_ns;
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       p->exec_id = current->exec_id;
+#endif
 
        /*
         * Wrappers around seq_open(e.g. swaps_open) need to be
@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
 }
 EXPORT_SYMBOL(seq_open);
 
+
+int seq_open_restrict(struct file *file, const struct seq_operations *op)
+{
+       if (gr_proc_is_restricted())
+               return -EACCES;
+
+       return seq_open(file, op);
+}
+EXPORT_SYMBOL(seq_open_restrict);
+
 static int traverse(struct seq_file *m, loff_t offset)
 {
        loff_t pos = 0, index;
@@ -158,7 +164,7 @@ Eoverflow:
 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 {
        struct seq_file *m = file->private_data;
-       size_t copied = 0;
+       ssize_t copied = 0;
        loff_t pos;
        size_t n;
        void *p;
@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
                void *data)
 {
-       struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
+       seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
        int res = -ENOMEM;
 
        if (op) {
@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
 }
 EXPORT_SYMBOL(single_open_size);
 
+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
+               void *data)
+{
+       if (gr_proc_is_restricted())
+               return -EACCES;
+
+       return single_open(file, show, data);
+}
+EXPORT_SYMBOL(single_open_restrict);
+
+
 int single_release(struct inode *inode, struct file *file)
 {
        const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
index 75c6058eabf2e37b27df85fafe6e6b2b5b9379dd..770d40c4ddeed9873b6a4c71470c415f65d70523 100644 (file)
@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
        pipe_lock(pipe);
 
        for (;;) {
-               if (!pipe->readers) {
+               if (!atomic_read(&pipe->readers)) {
                        send_sig(SIGPIPE, current, 0);
                        if (!ret)
                                ret = -EPIPE;
@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
                        page_nr++;
                        ret += buf->len;
 
-                       if (pipe->files)
+                       if (atomic_read(&pipe->files))
                                do_wakeup = 1;
 
                        if (!--spd->nr_pages)
@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
                        do_wakeup = 0;
                }
 
-               pipe->waiting_writers++;
+               atomic_inc(&pipe->waiting_writers);
                pipe_wait(pipe);
-               pipe->waiting_writers--;
+               atomic_dec(&pipe->waiting_writers);
        }
 
        pipe_unlock(pipe);
@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
        old_fs = get_fs();
        set_fs(get_ds());
        /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
+       res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
        set_fs(old_fs);
 
        return res;
@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
        old_fs = get_fs();
        set_fs(get_ds());
        /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_write(file, (__force const char __user *)buf, count, &pos);
+       res = vfs_write(file, (const char __force_user *)buf, count, &pos);
        set_fs(old_fs);
 
        return res;
@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
                        goto err;
 
                this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
-               vec[i].iov_base = (void __user *) page_address(page);
+               vec[i].iov_base = (void __force_user *) page_address(page);
                vec[i].iov_len = this_len;
                spd.pages[i] = page;
                spd.nr_pages++;
@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
                        ops->release(pipe, buf);
                        pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
                        pipe->nrbufs--;
-                       if (pipe->files)
+                       if (atomic_read(&pipe->files))
                                sd->need_wakeup = true;
                }
 
@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
 {
        while (!pipe->nrbufs) {
-               if (!pipe->writers)
+               if (!atomic_read(&pipe->writers))
                        return 0;
 
-               if (!pipe->waiting_writers && sd->num_spliced)
+               if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
                        return 0;
 
                if (sd->flags & SPLICE_F_NONBLOCK)
@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
                                ops->release(pipe, buf);
                                pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
                                pipe->nrbufs--;
-                               if (pipe->files)
+                               if (atomic_read(&pipe->files))
                                        sd.need_wakeup = true;
                        } else {
                                buf->offset += ret;
@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
                 * out of the pipe right after the splice_to_pipe(). So set
                 * PIPE_READERS appropriately.
                 */
-               pipe->readers = 1;
+               atomic_set(&pipe->readers, 1);
 
                current->splice_pipe = pipe;
        }
@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
 
                        partial[buffers].offset = off;
                        partial[buffers].len = plen;
+                       partial[buffers].private = 0;
 
                        off = 0;
                        len -= plen;
@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
                        ret = -ERESTARTSYS;
                        break;
                }
-               if (!pipe->writers)
+               if (!atomic_read(&pipe->writers))
                        break;
-               if (!pipe->waiting_writers) {
+               if (!atomic_read(&pipe->waiting_writers)) {
                        if (flags & SPLICE_F_NONBLOCK) {
                                ret = -EAGAIN;
                                break;
@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
        pipe_lock(pipe);
 
        while (pipe->nrbufs >= pipe->buffers) {
-               if (!pipe->readers) {
+               if (!atomic_read(&pipe->readers)) {
                        send_sig(SIGPIPE, current, 0);
                        ret = -EPIPE;
                        break;
@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
                        ret = -ERESTARTSYS;
                        break;
                }
-               pipe->waiting_writers++;
+               atomic_inc(&pipe->waiting_writers);
                pipe_wait(pipe);
-               pipe->waiting_writers--;
+               atomic_dec(&pipe->waiting_writers);
        }
 
        pipe_unlock(pipe);
@@ -1818,14 +1819,14 @@ retry:
        pipe_double_lock(ipipe, opipe);
 
        do {
-               if (!opipe->readers) {
+               if (!atomic_read(&opipe->readers)) {
                        send_sig(SIGPIPE, current, 0);
                        if (!ret)
                                ret = -EPIPE;
                        break;
                }
 
-               if (!ipipe->nrbufs && !ipipe->writers)
+               if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
                        break;
 
                /*
@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
        pipe_double_lock(ipipe, opipe);
 
        do {
-               if (!opipe->readers) {
+               if (!atomic_read(&opipe->readers)) {
                        send_sig(SIGPIPE, current, 0);
                        if (!ret)
                                ret = -EPIPE;
@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
         * return EAGAIN if we have the potential of some data in the
         * future, otherwise just return 0
         */
-       if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
+       if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
                ret = -EAGAIN;
 
        pipe_unlock(ipipe);
index ae0c3cef9927e64fb1f21ccf1848155825fc79ef..9ee641cc1657a77917a0621e982a59cde8480c79 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
        stat->gid = inode->i_gid;
        stat->rdev = inode->i_rdev;
        stat->size = i_size_read(inode);
-       stat->atime = inode->i_atime;
-       stat->mtime = inode->i_mtime;
+       if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
+               stat->atime = inode->i_ctime;
+               stat->mtime = inode->i_ctime;
+       } else {
+               stat->atime = inode->i_atime;
+               stat->mtime = inode->i_mtime;
+       }
        stat->ctime = inode->i_ctime;
        stat->blksize = (1 << inode->i_blkbits);
        stat->blocks = inode->i_blocks;
@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
 {
        struct inode *inode = path->dentry->d_inode;
+       int retval;
 
-       if (inode->i_op->getattr)
-               return inode->i_op->getattr(path->mnt, path->dentry, stat);
+       if (inode->i_op->getattr) {
+               retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
+               if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
+                       stat->atime = stat->ctime;
+                       stat->mtime = stat->ctime;
+               }
+               return retval;
+       }
 
        generic_fillattr(inode, stat);
        return 0;
index 0b45ff42f3741123a15f58d426e201a78d20b3d0..847de5bf1cdc82bacf495e812c0e3d187c560400 100644 (file)
@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
 {
        struct kernfs_node *parent, *kn;
+       const char *name;
+       umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
+       const char *parent_name;
+#endif
 
        BUG_ON(!kobj);
 
+       name = kobject_name(kobj);
+
        if (kobj->parent)
                parent = kobj->parent->sd;
        else
@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
        if (!parent)
                return -ENOENT;
 
-       kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
-                                 S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
+       parent_name = parent->name;
+       mode = S_IRWXU;
+
+       if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
+           (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
+           (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
+           (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
+               mode = S_IRWXU | S_IRUGO | S_IXUGO;
+#endif
+
+       kn = kernfs_create_dir_ns(parent, name,
+                                 mode, kobj, ns);
        if (IS_ERR(kn)) {
                if (PTR_ERR(kn) == -EEXIST)
-                       sysfs_warn_dup(parent, kobject_name(kobj));
+                       sysfs_warn_dup(parent, name);
                return PTR_ERR(kn);
        }
 
index 69d488986cce4923860c6d1f4c7ab325370fc5ce..a810bd4a04d7cde6a8ce36b2b6735a868a1fab10 100644 (file)
@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
 #endif
 }
 
-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
 {
        if (sbi->s_bytesex == BYTESEX_PDP)
                return PDP_swab((__force __u32)n);
index fb08b0c514b68d5c226b986b6a946b77d14be255..65fcc7edfc2315a8b40a0e22ee2cd4f42a4acead 100644 (file)
@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
        return err;
 }
 
-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
 {
        int err;
 
index c175b4dabc14e576eea191fbb0f923f4735171e4..8f36a162727bd0826ecea8e0f629a851426b54ec 100644 (file)
@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
 
 u8 udf_tag_checksum(const struct tag *t)
 {
-       u8 *data = (u8 *)t;
+       const u8 *data = (const u8 *)t;
        u8 checksum = 0;
        int i;
        for (i = 0; i < sizeof(struct tag); ++i)
index 8d974c4fd18b1eea45ddc849803f01904f7b8abb..b82f6ecca521c171231a0a02c900a6afd804071e 100644 (file)
@@ -22,7 +22,7 @@ enum {
        BYTESEX_BE
 };
 
-static inline u64
+static inline u64 __intentional_overflow(-1)
 fs64_to_cpu(struct super_block *sbp, __fs64 n)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
                return (__force __fs64)cpu_to_be64(n);
 }
 
-static inline u32
+static inline u32 __intentional_overflow(-1)
 fs32_to_cpu(struct super_block *sbp, __fs32 n)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
index aa138d64560a6a3c2133bc70d57e367cc8c1476d..5f3a81123795dae8d83dbf115d94ccd340c78f9f 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/compiler.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/security.h>
 #include <linux/linkage.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
                }
        }
 retry_deleg:
+
+       if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
+               error = -EACCES;
+               goto mnt_drop_write_and_out;
+       }
+
        mutex_lock(&inode->i_mutex);
        error = notify_change(path->dentry, &newattrs, &delegated_inode);
        mutex_unlock(&inode->i_mutex);
index 4ef698549e31c686ca593642436ba9b5311ad402..a6cd65679a099e12e88b953f5b5486cfbbe96a16 100644 (file)
@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
        return rc;
 }
 
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t
+pax_getxattr(struct dentry *dentry, void *value, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       ssize_t error;
+
+       error = inode_permission(inode, MAY_EXEC);
+       if (error)
+               return error;
+
+       if (inode->i_op->getxattr)
+               error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
+       else
+               error = -EOPNOTSUPP;
+
+       return error;
+}
+EXPORT_SYMBOL(pax_getxattr);
+#endif
+
 ssize_t
 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
 {
@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
  * Extended attribute SET operations
  */
 static long
-setxattr(struct dentry *d, const char __user *name, const void __user *value,
+setxattr(struct path *path, const char __user *name, const void __user *value,
         size_t size, int flags)
 {
        int error;
@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
                        posix_acl_fix_xattr_from_user(kvalue, size);
        }
 
-       error = vfs_setxattr(d, kname, kvalue, size, flags);
+       if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
+               error = -EACCES;
+               goto out;
+       }
+
+       error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
 out:
        if (vvalue)
                vfree(vvalue);
@@ -376,7 +402,7 @@ retry:
                return error;
        error = mnt_want_write(path.mnt);
        if (!error) {
-               error = setxattr(path.dentry, name, value, size, flags);
+               error = setxattr(&path, name, value, size, flags);
                mnt_drop_write(path.mnt);
        }
        path_put(&path);
@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
        audit_file(f.file);
        error = mnt_want_write_file(f.file);
        if (!error) {
-               error = setxattr(f.file->f_path.dentry, name, value, size, flags);
+               error = setxattr(&f.file->f_path, name, value, size, flags);
                mnt_drop_write_file(f.file);
        }
        fdput(f);
@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
  * Extended attribute REMOVE operations
  */
 static long
-removexattr(struct dentry *d, const char __user *name)
+removexattr(struct path *path, const char __user *name)
 {
        int error;
        char kname[XATTR_NAME_MAX + 1];
@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
        if (error < 0)
                return error;
 
-       return vfs_removexattr(d, kname);
+       if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
+               return -EACCES;
+
+       return vfs_removexattr(path->dentry, kname);
 }
 
 static int path_removexattr(const char __user *pathname,
@@ -623,7 +652,7 @@ retry:
                return error;
        error = mnt_want_write(path.mnt);
        if (!error) {
-               error = removexattr(path.dentry, name);
+               error = removexattr(&path, name);
                mnt_drop_write(path.mnt);
        }
        path_put(&path);
@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
        struct fd f = fdget(fd);
+       struct path *path;
        int error = -EBADF;
 
        if (!f.file)
                return error;
+       path = &f.file->f_path;
        audit_file(f.file);
        error = mnt_want_write_file(f.file);
        if (!error) {
-               error = removexattr(f.file->f_path.dentry, name);
+               error = removexattr(path, name);
                mnt_drop_write_file(f.file);
        }
        fdput(f);
index 4e20fe7497b3252d02d357f671e2c5990b2ab58a..6d1a55ae0d114965a210e48946b519de6e8aa112 100644 (file)
@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
 
 #else
 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)                do { } while (0)
-#define        xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+#define        xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)    do { } while (0)
 #endif /* DEBUG */
 
 /*
index 098cd78fe708433fdf77306637626d6b69400582..724d3f8b54c7a01399ac67cdc0879e28b939f7b8 100644 (file)
@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
                ino = dp->d_ops->sf_get_ino(sfp, sfep);
                filetype = dp->d_ops->sf_get_ftype(sfep);
                ctx->pos = off & 0x7fffffff;
-               if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
+               if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
+                       char name[sfep->namelen];
+                       memcpy(name, sfep->name, sfep->namelen);
+                       if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
+                               return 0;
+               } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
                            xfs_dir3_get_dtype(dp->i_mount, filetype)))
                        return 0;
                sfep = dp->d_ops->sf_nextentry(sfp, sfep);
index a1831980a68e13adec9ac54a322cb835f81bb969..6b52f52c1b9f160200d22d13a52d3446be0e3d46 100644 (file)
@@ -119,7 +119,7 @@ xfs_find_handle(
        }
 
        error = -EFAULT;
-       if (copy_to_user(hreq->ohandle, &handle, hsize) ||
+       if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
            copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
                goto out_put;
 
index c31d2c2eadc4445606d53cbd0c744b9143345375..6ec8f625d039d1066c4d5864cee2454dda42a79f 100644 (file)
@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
  * of the compiler which do not like us using do_div in the middle
  * of large functions.
  */
-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
 {
        __u32   mod;
 
@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
        return 0;
 }
 #else
-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
 {
        __u32   mod;
 
index 77ff547730af53f8da5850f05ee70866b720f4b9..181834fd886f0b0474564e4fa5c9da476eacb1cf 100644 (file)
 #define pmd_alloc(mm, pud, address) \
        ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
                NULL: pmd_offset(pud, address))
+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
 
 #define pud_alloc(mm, pgd, address)    (pgd)
+#define pud_alloc_kernel(mm, pgd, address)     pud_alloc((mm), (pgd), (address))
 #define pud_offset(pgd, start)         (pgd)
 #define pud_none(pud)                  0
 #define pud_bad(pud)                   0
index b7babf0206b8709bf751506104a06748a0dcdd65..1e4b4f19c69161ce9df0796db569fe29f8564c71 100644 (file)
 
 typedef atomic64_t atomic_long_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef atomic64_unchecked_t atomic_long_unchecked_t;
+#else
+typedef atomic64_t atomic_long_unchecked_t;
+#endif
+
 #define ATOMIC_LONG_INIT(i)    ATOMIC64_INIT(i)
 
 static inline long atomic_long_read(atomic_long_t *l)
@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
        return (long)atomic64_read(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       return (long)atomic64_read_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_set(atomic_long_t *l, long i)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
        atomic64_set(v, i);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       atomic64_set_unchecked(v, i);
+}
+#endif
+
 static inline void atomic_long_inc(atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
        atomic64_inc(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       atomic64_inc_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_dec(atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
        atomic64_dec(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       atomic64_dec_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
        atomic64_add(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       atomic64_add_unchecked(i, v);
+}
+#endif
+
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
        atomic64_sub(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       atomic64_sub_unchecked(i, v);
+}
+#endif
+
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
        return atomic64_add_negative(i, v);
 }
 
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
 
        return (long)atomic64_add_return(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       return (long)atomic64_add_return_unchecked(i, v);
+}
+#endif
+
 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
        return (long)atomic64_inc_return(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+       return (long)atomic64_inc_return_unchecked(v);
+}
+#endif
+
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
        atomic64_t *v = (atomic64_t *)l;
@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 
 typedef atomic_t atomic_long_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef atomic_unchecked_t atomic_long_unchecked_t;
+#else
+typedef atomic_t atomic_long_unchecked_t;
+#endif
+
 #define ATOMIC_LONG_INIT(i)    ATOMIC_INIT(i)
 static inline long atomic_long_read(atomic_long_t *l)
 {
@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
        return (long)atomic_read(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       return (long)atomic_read_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_set(atomic_long_t *l, long i)
 {
        atomic_t *v = (atomic_t *)l;
@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
        atomic_set(v, i);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       atomic_set_unchecked(v, i);
+}
+#endif
+
 static inline void atomic_long_inc(atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
        atomic_inc(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       atomic_inc_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_dec(atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
        atomic_dec(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       atomic_dec_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
        atomic_add(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       atomic_add_unchecked(i, v);
+}
+#endif
+
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
        atomic_sub(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       atomic_sub_unchecked(i, v);
+}
+#endif
+
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
        return atomic_add_negative(i, v);
 }
 
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
 
        return (long)atomic_add_return(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       return (long)atomic_add_return_unchecked(i, v);
+}
+
+#endif
+
 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
        return (long)atomic_inc_return(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
+{
+       atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+       return (long)atomic_inc_return_unchecked(v);
+}
+#endif
+
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
        atomic_t *v = (atomic_t *)l;
@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 
 #endif  /*  BITS_PER_LONG == 64  */
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void pax_refcount_needs_these_functions(void)
+{
+       atomic_read_unchecked((atomic_unchecked_t *)NULL);
+       atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
+       atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
+       atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
+       atomic_inc_unchecked((atomic_unchecked_t *)NULL);
+       (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
+       atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
+       atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
+       atomic_dec_unchecked((atomic_unchecked_t *)NULL);
+       atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
+       (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
+#ifdef CONFIG_X86
+       atomic_clear_mask_unchecked(0, NULL);
+       atomic_set_mask_unchecked(0, NULL);
+#endif
+
+       atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
+       atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
+       atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
+       atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
+       atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
+       atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
+       atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
+       atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
+}
+#else
+#define atomic_read_unchecked(v) atomic_read(v)
+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+#define atomic_inc_unchecked(v) atomic_inc(v)
+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
+#define atomic_dec_unchecked(v) atomic_dec(v)
+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
+
+#define atomic_long_read_unchecked(v) atomic_long_read(v)
+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
+#endif
+
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
index 30ad9c86cebb62b1c695afb195fcdfb9ad027bee..c70c170da71985c20feb6195851bf2f8d1ab6730 100644 (file)
@@ -16,6 +16,8 @@ typedef struct {
        long long counter;
 } atomic64_t;
 
+typedef atomic64_t atomic64_unchecked_t;
+
 #define ATOMIC64_INIT(i)       { (i) }
 
 extern long long atomic64_read(const atomic64_t *v);
@@ -51,4 +53,14 @@ extern int    atomic64_add_unless(atomic64_t *v, long long a, long long u);
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
+#define atomic64_read_unchecked(v) atomic64_read(v)
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
index f5c40b0fadc2a50be563304727db2e7ad7fe6699..e902f9d319a2c2226ea84d6f0edb5d52c42b2f30 100644 (file)
@@ -82,7 +82,7 @@
 do {                                                                   \
        compiletime_assert_atomic_type(*p);                             \
        smp_mb();                                                       \
-       ACCESS_ONCE(*p) = (v);                                          \
+       ACCESS_ONCE_RW(*p) = (v);                                       \
 } while (0)
 
 #define smp_load_acquire(p)                                            \
index a60a7ccb6782d869b9d47503a158f86a92b84374..0fe12f27b04d11e34090185f18ea6deb0dfe4f78 100644 (file)
@@ -9,7 +9,7 @@
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
 {
        int num = BITS_PER_LONG - 1;
 
index 0576d1f42f43fc34fb5efa8e27969afc5dbdf0b4..dad6c713ecbdcd474facc5df7acc786d62f701e4 100644 (file)
@@ -9,7 +9,7 @@
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 
-static __always_inline int fls(int x)
+static __always_inline int __intentional_overflow(-1) fls(int x)
 {
        int r = 32;
 
index b097cf8444e3f99811ee395144b05a0a237519f3..3d40e14aadd5a2e75ea189d5b6d7321a12c81480 100644 (file)
@@ -15,7 +15,7 @@
  * at position 64.
  */
 #if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
 {
        __u32 h = x >> 32;
        if (h)
@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
        return fls(x);
 }
 #elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
 {
        if (x == 0)
                return 0;
index 1bfcfe5c223703673633f20e7a9211b5d5ed6123..e04c5c9490d7b028a7f663d61d4485b8616eb262 100644 (file)
@@ -6,7 +6,7 @@
  * cache lines need to provide their own cache.h.
  */
 
-#define L1_CACHE_SHIFT         5
-#define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT         5UL
+#define L1_CACHE_BYTES         (1UL << L1_CACHE_SHIFT)
 
 #endif /* __ASM_GENERIC_CACHE_H */
index 0d68a1eae98528b3c2f711774ecf01b4c2a21dbb..b74a761cae9da73812b5e6a2fe5267e030ec0ef6 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
 #define _ASM_GENERIC_EMERGENCY_RESTART_H
 
-static inline void machine_emergency_restart(void)
+static inline __noreturn void machine_emergency_restart(void)
 {
        machine_restart(NULL);
 }
index 90f99c74dd386b5412ae36ba0a9ba6ba14e63e55..00ce236266259eac18478d290281578f0a02611a 100644 (file)
@@ -2,9 +2,9 @@
 #define _ASM_GENERIC_KMAP_TYPES_H
 
 #ifdef __WITH_KM_FENCE
-# define KM_TYPE_NR 41
+# define KM_TYPE_NR 42
 #else
-# define KM_TYPE_NR 20
+# define KM_TYPE_NR 21
 #endif
 
 #endif
index 9ceb03b4f4661d3ade8c0ca9fbe98ac87f31bc89..62b0b8f1e7b113db0234caee94854de8c39db6fd 100644 (file)
@@ -23,24 +23,37 @@ typedef struct
        atomic_long_t a;
 } local_t;
 
+typedef struct {
+       atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)  { ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)  atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)        atomic_long_read_unchecked(&(l)->a)
 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
+#define local_set_unchecked(l,i)       atomic_long_set_unchecked((&(l)->a),(i))
 #define local_inc(l)   atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)   atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_add_unchecked(i,l)       atomic_long_add_unchecked((i),(&(l)->a))
 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_sub_unchecked(i,l)       atomic_long_sub_unchecked((i),(&(l)->a))
 
 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
 
 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
index 725612b793ce1d55462de9ab2158a795ab3b4faa..9cc513a7b3b2ed5962b1b33d400943e60306bbac 100644 (file)
@@ -1,14 +1,19 @@
 #ifndef _PGTABLE_NOPMD_H
 #define _PGTABLE_NOPMD_H
 
-#ifndef __ASSEMBLY__
-
 #include <asm-generic/pgtable-nopud.h>
 
-struct mm_struct;
-
 #define __PAGETABLE_PMD_FOLDED
 
+#define PMD_SHIFT      PUD_SHIFT
+#define PTRS_PER_PMD   1
+#define PMD_SIZE       (_AC(1,UL) << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
  * us to conceptually access the pud entry that this pmd is folded into
@@ -16,11 +21,6 @@ struct mm_struct;
  */
 typedef struct { pud_t pud; } pmd_t;
 
-#define PMD_SHIFT      PUD_SHIFT
-#define PTRS_PER_PMD   1
-#define PMD_SIZE       (1UL << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE-1))
-
 /*
  * The "pud_xxx()" functions here are trivial for a folded two-level
  * setup: the pmd is never bad, and a pmd always exists (as it's folded
index 810431d8351b16c14c3d1954ddc2890866c41658..0ec4804f406a29a2da47e521c94435676685686f 100644 (file)
@@ -1,10 +1,15 @@
 #ifndef _PGTABLE_NOPUD_H
 #define _PGTABLE_NOPUD_H
 
-#ifndef __ASSEMBLY__
-
 #define __PAGETABLE_PUD_FOLDED
 
+#define PUD_SHIFT      PGDIR_SHIFT
+#define PTRS_PER_PUD   1
+#define PUD_SIZE       (_AC(1,UL) << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
  * us to conceptually access the pgd entry that this pud is folded into
  */
 typedef struct { pgd_t pgd; } pud_t;
 
-#define PUD_SHIFT      PGDIR_SHIFT
-#define PTRS_PER_PUD   1
-#define PUD_SIZE       (1UL << PUD_SHIFT)
-#define PUD_MASK       (~(PUD_SIZE-1))
-
 /*
  * The "pgd_xxx()" functions here are trivial for a folded two-level
  * setup: the pud is never bad, and a pud always exists (as it's folded
@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd)      { }
 #define pud_ERROR(pud)                         (pgd_ERROR((pud).pgd))
 
 #define pgd_populate(mm, pgd, pud)             do { } while (0)
+#define pgd_populate_kernel(mm, pgd, pud)      do { } while (0)
 /*
  * (puds are folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
index 177d5973b132fb70ceba10684da7a600c9ec9dac..2826237e45152d2e8a3c0248db88288fa5bbe04a 100644 (file)
@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires pax_open_kernel
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+#endif
+#endif
+
+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires pax_close_kernel
+#else
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+#endif
+
 #endif /* CONFIG_MMU */
 
 #endif /* !__ASSEMBLY__ */
index 72d8803832ff64ef5e53b666abdcb7ea21d44a61..cb9749c24550e279548dbda31d31a4383d8fcd19 100644 (file)
@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
        return __clear_user(to, n);
 }
 
+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#error UDEREF requires pax_open_userland
+#else
+static inline unsigned long pax_open_userland(void) { return 0; }
+#endif
+#endif
+
+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#error UDEREF requires pax_close_userland
+#else
+static inline unsigned long pax_close_userland(void) { return 0; }
+#endif
+#endif
+
 #endif /* __ASM_GENERIC_UACCESS_H */
index bee5d683074da849f467c1ac23416b8687c5c47f..8d362d194ad3c983d1bc91cb2c4bad7c9d1cae0e 100644 (file)
        .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
                VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
+               *(.data..read_only)                                     \
                *(__vermagic)           /* Kernel version magic */      \
                . = ALIGN(8);                                           \
                VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
  * section in the linker script will go there too.  @phdr should have
  * a leading colon.
  *
- * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * Note that this macros defines per_cpu_load as an absolute symbol.
  * If there is no need to put the percpu section at a predetermined
  * address, use PERCPU_SECTION.
  */
 #define PERCPU_VADDR(cacheline, vaddr, phdr)                           \
-       VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
-       .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)         \
+       per_cpu_load = .;                                               \
+       .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load)           \
                                - LOAD_OFFSET) {                        \
+               VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load;      \
                PERCPU_INPUT(cacheline)                                 \
        } phdr                                                          \
-       . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
+       . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
 
 /**
  * PERCPU_SECTION - define output section for percpu area, simple version
index 623a59c1ff5a6ef45d4cebe4ae8c0b7462aaff76..1e79ab981732a07f0cec5321111e001b96dccace 100644 (file)
@@ -34,7 +34,7 @@ struct crypto_type {
        unsigned int maskclear;
        unsigned int maskset;
        unsigned int tfmsize;
-};
+} __do_const;
 
 struct crypto_instance {
        struct crypto_alg alg;
index e1b2e8b98af7cde2c7276e915cd90ea4794b317e..2697bd28ba75c2a878e24b905fd558c6749bafab 100644 (file)
@@ -59,6 +59,7 @@
 
 #include <asm/mman.h>
 #include <asm/pgalloc.h>
+#include <asm/local.h>
 #include <asm/uaccess.h>
 
 #include <uapi/drm/drm.h>
@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
  * \param cmd command.
  * \param arg argument.
  */
-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 
-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
                               unsigned long arg);
 
 #define DRM_IOCTL_NR(n)                _IOC_NR(n)
@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
 struct drm_ioctl_desc {
        unsigned int cmd;
        int flags;
-       drm_ioctl_t *func;
+       drm_ioctl_t func;
        unsigned int cmd_drv;
        const char *name;
-};
+} __do_const;
 
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
@@ -629,7 +632,8 @@ struct drm_info_list {
        int (*show)(struct seq_file*, void*); /** show callback */
        u32 driver_features; /**< Required driver features for this entry */
        void *data;
-};
+} __do_const;
+typedef struct drm_info_list __no_const drm_info_list_no_const;
 
 /**
  * debugfs node structure. This structure represents a debugfs file.
@@ -713,7 +717,7 @@ struct drm_device {
 
        /** \name Usage Counters */
        /*@{ */
-       int open_count;                 /**< Outstanding files open, protected by drm_global_mutex. */
+       local_t open_count;             /**< Outstanding files open, protected by drm_global_mutex. */
        spinlock_t buf_lock;            /**< For drm_device::buf_use and a few other things. */
        int buf_use;                    /**< Buffers in use -- cannot alloc */
        atomic_t buf_alloc;             /**< Buffer allocation in progress */
index 7adbb65ea8aeaa92eb750bab5dad3a2c8fef28eb..2a1eb1f01be2dd03e73702d600fe17d24e7bac62 100644 (file)
@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
                                            struct drm_connector *connector);
        /* disable encoder when not in use - more explicit than dpms off */
        void (*disable)(struct drm_encoder *encoder);
-};
+} __no_const;
 
 /**
  * drm_connector_helper_funcs - helper operations for connectors
index d016dc57f0073eede1a5467b798fa35dfa779da5..3951fe054608e8a7e0cdb32cb75f4b967a52201b 100644 (file)
@@ -37,7 +37,7 @@
  */
 #define INTEL_VGA_DEVICE(id, info) {           \
        0x8086, id,                             \
-       ~0, ~0,                                 \
+       PCI_ANY_ID, PCI_ANY_ID,                 \
        0x030000, 0xff0000,                     \
        (unsigned long) info }
 
index 72dcbe81dd0706ee1e7e8c95c5f7d06ef38158da..8db58d72621234d76ae95e778ced2715f1e2ae1c 100644 (file)
@@ -48,7 +48,7 @@
 
 struct ttm_mem_shrink {
        int (*do_shrink) (struct ttm_mem_shrink *);
-};
+} __no_const;
 
 /**
  * struct ttm_mem_global - Global memory accounting structure.
index 49a828425fa2d984615b6352812681a455d7d99b..9643967eb549576156f99b537f79fedbb8e6677d 100644 (file)
@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
  */
 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 
+struct device;
 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
index 4b840e822209d7d6894f81476267655498d530f2..155d235decb9254b14317034435c5c69f410abb4 100644 (file)
@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
        /* Verify the signature on a key of this subtype (optional) */
        int (*verify_signature)(const struct key *key,
                                const struct public_key_signature *sig);
-};
+} __do_const;
 
 /**
  * asymmetric_key_subtype - Get the subtype from an asymmetric key
index c1da539f5e28a965702c39d21375923e8271f67f..1dcec5522f112603dc13d27de50205a9e1ab295f 100644 (file)
@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
 #endif
 
 struct k_atm_aal_stats {
-#define __HANDLE_ITEM(i) atomic_t i
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
        __AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 };
@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
        int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
        int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
        struct module *owner;
-};
+} __do_const ;
 
 struct atmphy_ops {
        int (*start)(struct atm_dev *dev);
index 5b08a8540ecfc3e6bcb5bf8b6fbef4d19df93f55..60922fb7b6a3387952614f01e9c3ead98da4a5ad 100644 (file)
@@ -12,7 +12,7 @@
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
 {
        return __atomic_add_unless(v, a, u) != u;
 }
index af84234e1f6e2f3741ccb42f1a89e325f17749e8..4177a40004c1f6ef57cf8f68031083c30b2524f1 100644 (file)
@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
 extern unsigned int audit_serial(void);
 extern int auditsc_get_stamp(struct audit_context *ctx,
                              struct timespec *t, unsigned int *serial);
-extern int audit_set_loginuid(kuid_t loginuid);
+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
 
 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
 {
index 576e4639ca609e8bfb8686474406073937e912e9..28fd9269a0b19b25a9a1035d8a8c50c4d6678bd1 100644 (file)
@@ -44,7 +44,7 @@ struct linux_binprm {
        unsigned interp_flags;
        unsigned interp_data;
        unsigned long loader, exec;
-};
+} __randomize_layout;
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
@@ -77,8 +77,10 @@ struct linux_binfmt {
        int (*load_binary)(struct linux_binprm *);
        int (*load_shlib)(struct file *);
        int (*core_dump)(struct coredump_params *cprm);
+       void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
+       void (*handle_mmap)(struct file *);
        unsigned long min_coredump;     /* minimal dump size */
-};
+} __do_const __randomize_layout;
 
 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
 
index 202e4034fe26c41157dda26af230aba553891f1f..16e661786f5811c4cd91e9537e0d8753ada5c2c3 100644 (file)
@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
                return __bitmap_full(src, nbits);
 }
 
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
 {
        if (small_const_nbits(nbits))
                return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
index 5d858e02997f5248d8a50150d870248e8146f4e6..336c1d9dc788434fa1bdfc5572f7782f8c48fdbf 100644 (file)
@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
  * @word: value to rotate
  * @shift: bits to roll
  */
-static inline __u32 rol32(__u32 word, unsigned int shift)
+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
 {
        return (word << shift) | (word >> (32 - shift));
 }
@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
  * @word: value to rotate
  * @shift: bits to roll
  */
-static inline __u32 ror32(__u32 word, unsigned int shift)
+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
 {
        return (word >> shift) | (word << (32 - shift));
 }
@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
        return (__s32)(value << shift) >> shift;
 }
 
-static inline unsigned fls_long(unsigned long l)
+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
 {
        if (sizeof(l) == 4)
                return fls(l);
index 92f4b4b288dd57005b30c2b76d7ce5aed070c0d4..483d537023511592986512586da8c8919385146f 100644 (file)
@@ -1613,7 +1613,7 @@ struct block_device_operations {
        /* this callback is with swap_lock and sometimes page table lock held */
        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
        struct module *owner;
-};
+} __do_const;
 
 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
                                 unsigned long);
index afc1343df3c7ab3775c8fc2e822c4e619bf658c1..9735539e10da372392ce80e82ce51f4ed86f3f58 100644 (file)
@@ -25,7 +25,7 @@ struct blk_trace {
        struct dentry *dropped_file;
        struct dentry *msg_file;
        struct list_head running_list;
-       atomic_t dropped;
+       atomic_unchecked_t dropped;
 };
 
 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
index 17e7e82d2aa758f9888419a9c03aa4059e16b247..1d7da26df1f9034659abdb7a4c1a0b7675064d40 100644 (file)
 #define __read_mostly
 #endif
 
+#ifndef __read_only
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires __read_only
+#else
+#define __read_only __read_mostly
+#endif
+#endif
+
 #ifndef ____cacheline_aligned
 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
index aa93e5ef594c15eb5a733c005dd7c5590e56b00e..985a1b0bab23d4a92bc0ebbb34c38551c266dc75 100644 (file)
@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
 extern bool capable(int cap);
 extern bool ns_capable(struct user_namespace *ns, int cap);
 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+extern bool capable_nolog(int cap);
+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
 
+extern int is_privileged_binary(const struct dentry *dentry);
+
 #endif /* !_LINUX_CAPABILITY_H */
index 8609d577bb66df1a50c92fc6ef2873f30b6f3e30..86e4d795e347e1f70cb31d5bfa670b7ef60830a0 100644 (file)
@@ -87,7 +87,6 @@ struct cdrom_device_ops {
 
 /* driver specifications */
        const int capability;   /* capability flags */
-       int n_minors;           /* number of active minor devices */
        /* handle uniform packets for scsi type devices (scsi,atapi) */
        int (*generic_packet) (struct cdrom_device_info *,
                               struct packet_command *);
index 4ce9056b31a8d96f5d8b2696555eea2dbffd45a3..86caac69c6d1dda1a36dca8c2c95a2c2cf49f0a7 100644 (file)
@@ -31,7 +31,7 @@ struct cleancache_ops {
        void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
        void (*invalidate_inode)(int, struct cleancache_filekey);
        void (*invalidate_fs)(int);
-};
+} __no_const;
 
 extern struct cleancache_ops *
        cleancache_register_ops(struct cleancache_ops *ops);
index d936409520f8db609994f7ddab629a99981883dc..ce9f8422315c190249beba4a7834bb77d1be9270 100644 (file)
@@ -191,6 +191,7 @@ struct clk_ops {
        void            (*init)(struct clk_hw *hw);
        int             (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
 };
+typedef struct clk_ops __no_const clk_ops_no_const;
 
 /**
  * struct clk_init_data - holds init data that's common to all clocks and is
index 7450ca2ac1fc6a5d585084dadde7851ea0c35464..a824b81c3a85ca20353874032f2f8b759641a8f7 100644 (file)
@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
                           compat_size_t __user *len_ptr);
 
 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
                compat_ssize_t msgsz, int msgflg);
@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                               compat_ulong_t addr, compat_ulong_t data);
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-                                 compat_long_t addr, compat_long_t data);
+                                 compat_ulong_t addr, compat_ulong_t data);
 
 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
 /*
index d1a558239b1a189768d804c13b39617677cd93b4..4424efa9755ff09a7e46fe2fbd18b98a82a78c4d 100644 (file)
 # define __compiletime_warning(message) __attribute__((warning(message)))
 # define __compiletime_error(message) __attribute__((error(message)))
 #endif /* __CHECKER__ */
+
+#define __alloc_size(...)      __attribute((alloc_size(__VA_ARGS__)))
+#define __bos(ptr, arg)                __builtin_object_size((ptr), (arg))
+#define __bos0(ptr)            __bos((ptr), 0)
+#define __bos1(ptr)            __bos((ptr), 1)
 #endif /* GCC_VERSION >= 40300 */
 
 #if GCC_VERSION >= 40500
+
+#ifdef RANDSTRUCT_PLUGIN
+#define __randomize_layout __attribute__((randomize_layout))
+#define __no_randomize_layout __attribute__((no_randomize_layout))
+#endif
+
+#ifdef CONSTIFY_PLUGIN
+#define __no_const __attribute__((no_const))
+#define __do_const __attribute__((do_const))
+#endif
+
+#ifdef SIZE_OVERFLOW_PLUGIN
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
+#endif
+
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
index c8c565952548e6ca834425b51dce65c34e5255e7..d09f2adfb38c88b5cf0e053cfe7538c3caf3b837 100644 (file)
 # define __compiletime_error(message) __attribute__((error(message)))
 #endif /* __CHECKER__ */
 
+#define __alloc_size(...)      __attribute((alloc_size(__VA_ARGS__)))
+#define __bos(ptr, arg)                __builtin_object_size((ptr), (arg))
+#define __bos0(ptr)            __bos((ptr), 0)
+#define __bos1(ptr)            __bos((ptr), 1)
+
+#ifdef CONSTIFY_PLUGIN
+#error not yet
+#define __no_const __attribute__((no_const))
+#define __do_const __attribute__((do_const))
+#endif
+
+#ifdef SIZE_OVERFLOW_PLUGIN
+#error not yet
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
+#endif
+
+#ifdef LATENT_ENTROPY_PLUGIN
+#error not yet
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
index fa6a31441d0425a41636f76236b80f7a3e4f7786..752a6ef4127c498d6a43fc4c5fc56084a52cd34c 100644 (file)
@@ -5,11 +5,14 @@
 
 #ifdef __CHECKER__
 # define __user                __attribute__((noderef, address_space(1)))
+# define __force_user  __force __user
 # define __kernel      __attribute__((address_space(0)))
+# define __force_kernel        __force __kernel
 # define __safe                __attribute__((safe))
 # define __force       __attribute__((force))
 # define __nocast      __attribute__((nocast))
 # define __iomem       __attribute__((noderef, address_space(2)))
+# define __force_iomem __force __iomem
 # define __must_hold(x)        __attribute__((context(x,1,1)))
 # define __acquires(x) __attribute__((context(x,0,1)))
 # define __releases(x) __attribute__((context(x,1,0)))
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
 # define __percpu      __attribute__((noderef, address_space(3)))
+# define __force_percpu        __force __percpu
 #ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu         __attribute__((noderef, address_space(4)))
+# define __force_rcu   __force __rcu
 #else
 # define __rcu
+# define __force_rcu
 #endif
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
 #else
-# define __user
-# define __kernel
+# ifdef CHECKER_PLUGIN
+//#  define __user
+//#  define __force_user
+//#  define __kernel
+//#  define __force_kernel
+# else
+#  ifdef STRUCTLEAK_PLUGIN
+#   define __user __attribute__((user))
+#  else
+#   define __user
+#  endif
+#  define __force_user
+#  define __kernel
+#  define __force_kernel
+# endif
 # define __safe
 # define __force
 # define __nocast
 # define __iomem
+# define __force_iomem
 # define __chk_user_ptr(x) (void)0
 # define __chk_io_ptr(x) (void)0
 # define __builtin_warning(x, y...) (1)
@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 # define __release(x) (void)0
 # define __cond_lock(x,c) (c)
 # define __percpu
+# define __force_percpu
 # define __rcu
+# define __force_rcu
 #endif
 
 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
        switch (size) {
-       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
-       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
-       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+       case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
+       case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
+       case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
 #ifdef CONFIG_64BIT
-       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+       case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
 #endif
        default:
                barrier();
-               __builtin_memcpy((void *)res, (const void *)p, size);
+               __builtin_memcpy(res, (const void *)p, size);
                data_access_exceeds_word_size();
                barrier();
        }
 }
 
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
 {
        switch (size) {
-       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
-       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
-       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+       case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
+       case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
+       case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
 #ifdef CONFIG_64BIT
-       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+       case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
 #endif
        default:
                barrier();
-               __builtin_memcpy((void *)p, (const void *)res, size);
+               __builtin_memcpy((void *)p, res, size);
                data_access_exceeds_word_size();
                barrier();
        }
@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 # define __attribute_const__   /* unimplemented */
 #endif
 
+#ifndef __randomize_layout
+# define __randomize_layout
+#endif
+
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
+#endif
+
+#ifndef __no_const
+# define __no_const
+#endif
+
+#ifndef __do_const
+# define __do_const
+#endif
+
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
+
+#ifndef __intentional_overflow
+# define __intentional_overflow(...)
+#endif
+
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
 /*
  * Tell gcc if a function is cold. The compiler will assume any path
  * directly leading to the call is unlikely.
@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 #define __cold
 #endif
 
+#ifndef __alloc_size
+#define __alloc_size(...)
+#endif
+
+#ifndef __bos
+#define __bos(ptr, arg)
+#endif
+
+#ifndef __bos0
+#define __bos0(ptr)
+#endif
+
+#ifndef __bos1
+#define __bos1(ptr)
+#endif
+
 /* Simple shorthand for a section definition */
 #ifndef __section
 # define __section(S) __attribute__ ((__section__(#S)))
@@ -462,8 +528,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  */
 #define __ACCESS_ONCE(x) ({ \
         __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
-       (volatile typeof(x) *)&(x); })
+       (volatile const typeof(x) *)&(x); })
 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
 
 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
 #ifdef CONFIG_KPROBES
index 5d5aaae3af433ff62b6e03f107c7dfe88a06b522..0ea9b844e9b86463cca2cee4ae1e02124859ae47 100644 (file)
@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
 
 extern void wait_for_completion(struct completion *);
 extern void wait_for_completion_io(struct completion *);
-extern int wait_for_completion_interruptible(struct completion *x);
-extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
 extern unsigned long wait_for_completion_timeout(struct completion *x,
-                                                  unsigned long timeout);
+                                                  unsigned long timeout) __intentional_overflow(-1);
 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
-                                                   unsigned long timeout);
+                                                   unsigned long timeout) __intentional_overflow(-1);
 extern long wait_for_completion_interruptible_timeout(
-       struct completion *x, unsigned long timeout);
+       struct completion *x, unsigned long timeout) __intentional_overflow(-1);
 extern long wait_for_completion_killable_timeout(
-       struct completion *x, unsigned long timeout);
+       struct completion *x, unsigned long timeout) __intentional_overflow(-1);
 extern bool try_wait_for_completion(struct completion *x);
 extern bool completion_done(struct completion *x);
 
index 34025df6182962952e094f9c98e7a38b3c25bab4..d94bbbc1e4fb019d838663e2b0c9856073a4cd0a 100644 (file)
@@ -125,7 +125,7 @@ struct configfs_attribute {
        const char              *ca_name;
        struct module           *ca_owner;
        umode_t                 ca_mode;
-};
+} __do_const;
 
 /*
  * Users often need to create attribute structures for their configurable
index 4d078cebafd2dd5f059c65bd75a683a81b80e15f..c970f4deddceedcc1aaf1eff1aecd5725c4f89e9 100644 (file)
@@ -206,6 +206,7 @@ struct global_attr {
        ssize_t (*store)(struct kobject *a, struct attribute *b,
                         const char *c, size_t count);
 };
+typedef struct global_attr __no_const global_attr_no_const;
 
 #define define_one_global_ro(_name)            \
 static struct global_attr _name =              \
@@ -277,7 +278,7 @@ struct cpufreq_driver {
        bool            boost_supported;
        bool            boost_enabled;
        int             (*set_boost)(int state);
-};
+} __do_const;
 
 /* flags */
 #define CPUFREQ_STICKY         (1 << 0)        /* driver isn't removed even if
index ab70f3bc44ad7a2c1ddf2454dac51f7a80e95f41..3ef7771f93c207dda05ad006aeb4bc71aa257e7e 100644 (file)
@@ -50,7 +50,8 @@ struct cpuidle_state {
                        int index);
 
        int (*enter_dead) (struct cpuidle_device *dev, int index);
-};
+} __do_const;
+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
@@ -206,7 +207,7 @@ struct cpuidle_governor {
        void (*reflect)         (struct cpuidle_device *dev, int index);
 
        struct module           *owner;
-};
+} __do_const;
 
 #ifdef CONFIG_CPU_IDLE
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
index b950e9d6008b1e611f0092117160d92c88ed76d4..63810aaa341e9140b8958a20020457320472827a 100644 (file)
@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
 }
 
 /* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
 {
        return n+1;
 }
 
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
 {
        return n+1;
 }
 
-static inline unsigned int cpumask_next_and(int n,
+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
                                            const struct cpumask *srcp,
                                            const struct cpumask *andp)
 {
@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
  *
  * Returns >= nr_cpu_ids if no further cpus set.
  */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
 {
        /* -1 is a legal arg here. */
        if (n != -1)
@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
  *
  * Returns >= nr_cpu_ids if no further cpus unset.
  */
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
 {
        /* -1 is a legal arg here. */
        if (n != -1)
@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
        return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
 }
 
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
 
@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
  * cpumask_weight - Count of bits in *srcp
  * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
  */
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
 {
        return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
 }
index 2fb2ca2127ed0f031fb9639d41f56b7d3fdee88b..d6a3340e1ce86b0efc2f789dddbd6f97f4994ff3 100644 (file)
@@ -35,7 +35,7 @@ struct group_info {
        int             nblocks;
        kgid_t          small_block[NGROUPS_SMALL];
        kgid_t          *blocks[0];
-};
+} __randomize_layout;
 
 /**
  * get_group_info - Get a reference to a group info structure
@@ -137,7 +137,7 @@ struct cred {
        struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
        struct rcu_head rcu;            /* RCU deletion hook */
-};
+} __randomize_layout;
 
 extern void __put_cred(struct cred *);
 extern void exit_creds(struct task_struct *);
@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
 static inline void validate_process_creds(void)
 {
 }
+static inline void validate_task_creds(struct task_struct *task)
+{
+}
 #endif
 
 /**
@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
 
 #define task_uid(task)         (task_cred_xxx((task), uid))
 #define task_euid(task)                (task_cred_xxx((task), euid))
+#define task_securebits(task)  (task_cred_xxx((task), securebits))
 
 #define current_cred_xxx(xxx)                  \
 ({                                             \
index 9c8776d0ada87bcf9fa70bc401ce69e336b93d74..8c526c20622607a3b91b289d228dc6010f3452e5 100644 (file)
@@ -626,7 +626,7 @@ struct cipher_tfm {
                          const u8 *key, unsigned int keylen);
        void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
        void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
+} __no_const;
 
 struct hash_tfm {
        int (*init)(struct hash_desc *desc);
@@ -647,13 +647,13 @@ struct compress_tfm {
        int (*cot_decompress)(struct crypto_tfm *tfm,
                              const u8 *src, unsigned int slen,
                              u8 *dst, unsigned int *dlen);
-};
+} __no_const;
 
 struct rng_tfm {
        int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
                              unsigned int dlen);
        int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
+} __no_const;
 
 #define crt_ablkcipher crt_u.ablkcipher
 #define crt_aead       crt_u.aead
index 653589e3e30e8b03dd1844011bae7ca19e41136b..4ef254a8fcd4332f7071564acedba98d24353fcb 100644 (file)
@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
  * Fast implementation of tolower() for internal usage. Do not use in your
  * code.
  */
-static inline char _tolower(const char c)
+static inline unsigned char _tolower(const unsigned char c)
 {
        return c | 0x20;
 }
index 5a813988e6d4736cab172b8dfe6000db1b4eb1c6..6bbee3049577f2a9ceae06a60e1be63287b3d9e9 100644 (file)
@@ -123,6 +123,9 @@ struct dentry {
        unsigned long d_time;           /* used by d_revalidate */
        void *d_fsdata;                 /* fs-specific data */
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
+       atomic_t chroot_refcnt;         /* tracks use of directory in chroot */
+#endif
        struct list_head d_lru;         /* LRU list */
        struct list_head d_child;       /* child of parent list */
        struct list_head d_subdirs;     /* our children */
@@ -133,7 +136,7 @@ struct dentry {
                struct hlist_node d_alias;      /* inode alias list */
                struct rcu_head d_rcu;
        } d_u;
-};
+} __randomize_layout;
 
 /*
  * dentry->d_lock spinlock nesting subclasses:
index 7925bf0ee83627e8b58fb7bc895c04f27e858772..d5143d2fb61ceafb150fa6c3d4a660c79154f69e 100644 (file)
@@ -77,7 +77,7 @@ static void free(void *where)
  * warnings when not needed (indeed large_malloc / large_free are not
  * needed by inflate */
 
-#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define malloc(a) kmalloc((a), GFP_KERNEL)
 #define free(a) kfree(a)
 
 #define large_malloc(a) vmalloc(a)
index ce447f0f1bad49e24351cec2c1ae0e2d2620ad4a..83c66bd5a31fc6a9228e85450af4f496bbf9efb2 100644 (file)
@@ -114,7 +114,7 @@ struct devfreq_governor {
        int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
        int (*event_handler)(struct devfreq *devfreq,
                                unsigned int event, void *data);
-};
+} __do_const;
 
 /**
  * struct devfreq - Device devfreq structure
index fb506738f7b74134aa84699fd960ebff251f044b..ec0b35ba9ad746715846d65cb5953f3310ce2d7b 100644 (file)
@@ -311,7 +311,7 @@ struct subsys_interface {
        struct list_head node;
        int (*add_dev)(struct device *dev, struct subsys_interface *sif);
        int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
-};
+} __do_const;
 
 int subsys_interface_register(struct subsys_interface *sif);
 void subsys_interface_unregister(struct subsys_interface *sif);
@@ -507,7 +507,7 @@ struct device_type {
        void (*release)(struct device *dev);
 
        const struct dev_pm_ops *pm;
-};
+} __do_const;
 
 /* interface for exporting device attributes */
 struct device_attribute {
@@ -517,11 +517,12 @@ struct device_attribute {
        ssize_t (*store)(struct device *dev, struct device_attribute *attr,
                         const char *buf, size_t count);
 };
+typedef struct device_attribute __no_const device_attribute_no_const;
 
 struct dev_ext_attribute {
        struct device_attribute attr;
        void *var;
-};
+} __do_const;
 
 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
                          char *buf);
index c3007cb4bfa66fda55b2883f4e08f3fc341176ab..43efc8c3eea0a2bf772e5d4df386029a27000586 100644 (file)
@@ -60,7 +60,7 @@ struct dma_map_ops {
        u64 (*get_required_mask)(struct device *dev);
 #endif
        int is_phys;
-};
+} __do_const;
 
 #define DMA_BIT_MASK(n)        (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 
index 40cd75e21ea2145fc535a255bfbd292f2a5e8a07..38572a93a969fac785c5bc49bb0032b6f0c10479 100644 (file)
@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
 
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
        struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
        struct dma_pinned_list *pinned_list, struct page *page,
        unsigned int offset, size_t len);
 
index 0238d612750e9f14b7b5d2cb15e6560061adcc0e..34a758f3322f45f04660eb4192577f3f16d5823f 100644 (file)
@@ -1054,6 +1054,7 @@ struct efivar_operations {
        efi_set_variable_nonblocking_t *set_variable_nonblocking;
        efi_query_variable_store_t *query_variable_store;
 };
+typedef struct efivar_operations __no_const efivar_operations_no_const;
 
 struct efivars {
        /*
index 20fa8d8ae31335744c4566eb8b6305997b44db8b..3d0dd189a2bd48e547b34104d21ffdec3c8093a7 100644 (file)
@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
 #define elf_note       elf32_note
 #define elf_addr_t     Elf32_Off
 #define Elf_Half       Elf32_Half
+#define elf_dyn                Elf32_Dyn
 
 #else
 
@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
 #define elf_note       elf64_note
 #define elf_addr_t     Elf64_Off
 #define Elf_Half       Elf64_Half
+#define elf_dyn                Elf64_Dyn
 
 #endif
 
index a729120644d59d639ff00f4ad5f095e6b024df28..6ede2c9debf1c9597dc0668e10cc5ebbcc1673a6 100644 (file)
 
 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
-static inline void * __must_check ERR_PTR(long error)
+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
 {
        return (void *) error;
 }
 
-static inline long __must_check PTR_ERR(__force const void *ptr)
+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
 {
        return (long) ptr;
 }
index 36f49c405dfb334bf03db9d6f886e6b6fa2d3667..a2a1f4c0715001b6a1afb8402c3922fda8ee27d4 100644 (file)
@@ -135,7 +135,7 @@ struct extcon_dev {
        /* /sys/class/extcon/.../mutually_exclusive/... */
        struct attribute_group attr_g_muex;
        struct attribute **attrs_muex;
-       struct device_attribute *d_attrs_muex;
+       device_attribute_no_const *d_attrs_muex;
 };
 
 /**
index 09bb7a18d28775c676965e94ef28186793839e4a..d98870a5f1738a44b7318e758896884f962774b9 100644 (file)
@@ -305,7 +305,7 @@ struct fb_ops {
        /* called at KDB enter and leave time to prepare the console */
        int (*fb_debug_enter)(struct fb_info *info);
        int (*fb_debug_leave)(struct fb_info *info);
-};
+} __do_const;
 
 #ifdef CONFIG_FB_TILEBLITTING
 #define FB_TILE_CURSOR_NONE        0
index 230f87bdf5ad02008ff622e65bc761e41d4b22e0..1fd0485bb8f95ebe857f0696fb32e790364fec83 100644 (file)
@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
 void put_files_struct(struct files_struct *fs);
 void reset_files_struct(struct files_struct *);
 int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
 void do_close_on_exec(struct files_struct *);
 int iterate_fd(struct files_struct *, unsigned,
                int (*)(const void *, struct file *, unsigned),
index 8293262401de39db28832ddca2d6e5bc02fd5923..2b3b8bdc5c1918e85e271d614a8096c85d6271d1 100644 (file)
@@ -11,7 +11,7 @@ struct frontswap_ops {
        int (*load)(unsigned, pgoff_t, struct page *);
        void (*invalidate_page)(unsigned, pgoff_t);
        void (*invalidate_area)(unsigned);
-};
+} __no_const;
 
 extern bool frontswap_enabled;
 extern struct frontswap_ops *
index 42efe13077b6c1b8dd139c6cea7a241e5d6b320d..72d42eed008c307a6a47e4a87d949aa76afc2471 100644 (file)
@@ -413,7 +413,7 @@ struct address_space {
        spinlock_t              private_lock;   /* for use by the address_space */
        struct list_head        private_list;   /* ditto */
        void                    *private_data;  /* ditto */
-} __attribute__((aligned(sizeof(long))));
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
        /*
         * On most architectures that alignment is already the case; but
         * must be enforced here for CRIS, to let the least significant bit
@@ -456,7 +456,7 @@ struct block_device {
        int                     bd_fsfreeze_count;
        /* Mutex for freeze */
        struct mutex            bd_fsfreeze_mutex;
-};
+} __randomize_layout;
 
 /*
  * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
@@ -642,7 +642,7 @@ struct inode {
 #endif
 
        void                    *i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
 
 static inline int inode_unhashed(struct inode *inode)
 {
@@ -837,7 +837,7 @@ struct file {
        struct list_head        f_tfile_llink;
 #endif /* #ifdef CONFIG_EPOLL */
        struct address_space    *f_mapping;
-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+} __attribute__((aligned(4))) __randomize_layout;      /* lest something weird decides that 2 is OK */
 
 struct file_handle {
        __u32 handle_bytes;
@@ -962,7 +962,7 @@ struct file_lock {
                        int state;              /* state of grant or error if -ve */
                } afs;
        } fl_u;
-};
+} __randomize_layout;
 
 /* The following constant reflects the upper bound of the file/locking space */
 #ifndef OFFSET_MAX
@@ -1305,7 +1305,7 @@ struct super_block {
         * Indicates how deep in a filesystem stack this SB is
         */
        int s_stack_depth;
-};
+} __randomize_layout;
 
 extern struct timespec current_fs_time(struct super_block *sb);
 
@@ -1536,7 +1536,8 @@ struct file_operations {
        long (*fallocate)(struct file *file, int mode, loff_t offset,
                          loff_t len);
        void (*show_fdinfo)(struct seq_file *m, struct file *f);
-};
+} __do_const __randomize_layout;
+typedef struct file_operations __no_const file_operations_no_const;
 
 struct inode_operations {
        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
        return !IS_DEADDIR(inode);
 }
 
+static inline bool is_sidechannel_device(const struct inode *inode)
+{
+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
+       umode_t mode = inode->i_mode;
+       return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
+#else
+       return false;
+#endif
+}
+
 #endif /* _LINUX_FS_H */
index 0efc3e62843ae74592128dc7e34bd11fbaca4a3c..fd236105cae6027c1d5347004cbfd9d35f098490 100644 (file)
@@ -6,13 +6,13 @@
 #include <linux/seqlock.h>
 
 struct fs_struct {
-       int users;
+       atomic_t users;
        spinlock_t lock;
        seqcount_t seq;
        int umask;
        int in_exec;
        struct path root, pwd;
-};
+} __randomize_layout;
 
 extern struct kmem_cache *fs_cachep;
 
index 771484993ca7c662e6dc07c115e421050459256a..a4a5c7a68d25b55c6cc87e4ab8daec9b636da6c2 100644 (file)
@@ -113,7 +113,7 @@ struct fscache_operation {
        fscache_operation_release_t release;
 };
 
-extern atomic_t fscache_op_debug_id;
+extern atomic_unchecked_t fscache_op_debug_id;
 extern void fscache_op_work_func(struct work_struct *work);
 
 extern void fscache_enqueue_operation(struct fscache_operation *);
@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
        INIT_WORK(&op->work, fscache_op_work_func);
        atomic_set(&op->usage, 1);
        op->state = FSCACHE_OP_ST_INITIALISED;
-       op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+       op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
        op->processor = processor;
        op->release = release;
        INIT_LIST_HEAD(&op->pend_link);
index 115bb81912ccc8759a54d206487cc8936e2a90c7..e7b812b950c8dc01608cae78d9439f2c42deb1cd 100644 (file)
@@ -152,7 +152,7 @@ struct fscache_cookie_def {
         * - this is mandatory for any object that may have data
         */
        void (*now_uncached)(void *cookie_netfs_data);
-};
+} __do_const;
 
 /*
  * fscache cached network filesystem type
index 7ee1774edee51c7f872a8a9cc5c49fe0e8d83714..72505b83e5997f22cd6c5b1726b938cc93f658b7 100644 (file)
@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
        struct inode *inode = file_inode(file);
        __u32 mask = FS_ACCESS;
 
+       if (is_sidechannel_device(inode))
+               return;
+
        if (S_ISDIR(inode->i_mode))
                mask |= FS_ISDIR;
 
@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
        struct inode *inode = file_inode(file);
        __u32 mask = FS_MODIFY;
 
+       if (is_sidechannel_device(inode))
+               return;
+
        if (S_ISDIR(inode->i_mode))
                mask |= FS_ISDIR;
 
@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
  */
 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
 {
-       return kstrdup(name, GFP_KERNEL);
+       return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
 }
 
 /*
index ec274e0f4ed28d1834bb7b3115bd1010f92396a8..e6781599706bb5f62638be2c0bb6e1be9b8d69d8 100644 (file)
@@ -194,7 +194,7 @@ struct gendisk {
        struct kobject *slave_dir;
 
        struct timer_rand_state *random;
-       atomic_t sync_io;               /* RAID */
+       atomic_unchecked_t sync_io;     /* RAID */
        struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
        struct blk_integrity *integrity;
@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
 
 /* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
 extern void rand_initialize_disk(struct gendisk *disk);
 
 static inline sector_t get_start_sect(struct block_device *bdev)
index 667c31101b8b91f0b1d17a99a08edd9cf8d29d63..abac2a74a62c680d7049e461b704643c891485d3 100644 (file)
@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
 },
 
 #define ZZZ_genl_ops           CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
+static struct genl_ops ZZZ_genl_ops[] = {
 #include GENL_MAGIC_INCLUDE_FILE
 };
 
index b840e3b2770dd726ef3b4d4e4b07f2f41061f8e7..aeaeef9625fc4cadff668f9080bf127d6aeebe3c 100644 (file)
@@ -34,6 +34,13 @@ struct vm_area_struct;
 #define ___GFP_NO_KSWAPD       0x400000u
 #define ___GFP_OTHER_NODE      0x800000u
 #define ___GFP_WRITE           0x1000000u
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define ___GFP_USERCOPY                0x2000000u
+#else
+#define ___GFP_USERCOPY                0
+#endif
+
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -90,6 +97,7 @@ struct vm_area_struct;
 #define __GFP_NO_KSWAPD        ((__force gfp_t)___GFP_NO_KSWAPD)
 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
 #define __GFP_WRITE    ((__force gfp_t)___GFP_WRITE)   /* Allocator intends to dirty page */
+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
 
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
@@ -97,7 +105,7 @@ struct vm_area_struct;
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 25    /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26    /* Room for N __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -152,6 +160,8 @@ struct vm_area_struct;
 /* 4GB DMA on some platforms */
 #define GFP_DMA32      __GFP_DMA32
 
+#define GFP_USERCOPY   __GFP_USERCOPY
+
 /* Convert GFP flags to their corresponding migrate type */
 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
 {
index 9286a46b7d69b539f027bcc890b3be976d20f228..373f27f7eaf80272985d039a744e88d087b46b57 100644 (file)
@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
        kunmap_atomic(kaddr);
 }
 
+static inline void sanitize_highpage(struct page *page)
+{
+       void *kaddr;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       kaddr = kmap_atomic(page);
+       clear_page(kaddr);
+       kunmap_atomic(kaddr);
+       local_irq_restore(flags);
+}
+
 static inline void zero_user_segments(struct page *page,
        unsigned start1, unsigned end1,
        unsigned start2, unsigned end2)
index 1c7b89ae6bdcb3977899c21b152271189980118a..7dda4003a1a6ab9d913c9805b8dac2607234efb9 100644 (file)
@@ -25,7 +25,8 @@
 struct sensor_device_attribute{
        struct device_attribute dev_attr;
        int index;
-};
+} __do_const;
+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
 #define to_sensor_dev_attr(_dev_attr) \
        container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
 
@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
        struct device_attribute dev_attr;
        u8 index;
        u8 nr;
-};
+} __do_const;
+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
 #define to_sensor_dev_attr_2(_dev_attr) \
        container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
 
index 7c7695940dddeae9d3d22129ce4a14eaf70e1a5e..153e5975a4a4a78dadc3d98b803801ca667f817f 100644 (file)
@@ -413,6 +413,7 @@ struct i2c_algorithm {
        int (*unreg_slave)(struct i2c_client *client);
 #endif
 };
+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
 
 /**
  * struct i2c_bus_recovery_info - I2C bus recovery information
index d23c3c20b2014e665ba4754b8168018e4b7862cc..eb63c81e118c7069f84e532cd53729ad7853be54 100644 (file)
@@ -565,7 +565,7 @@ struct i2o_controller {
        struct i2o_device *exec;        /* Executive */
 #if BITS_PER_LONG == 64
        spinlock_t context_list_lock;   /* lock for context_list */
-       atomic_t context_list_counter;  /* needed for unique contexts */
+       atomic_unchecked_t context_list_counter;        /* needed for unique contexts */
        struct list_head context_list;  /* list of context id's
                                           and pointers */
 #endif
index aff7ad8a4ea3cdea45daca2049a7b83dc8151cc3..3942bbd9af29889875ce50a4a267fde7f60e97f2 100644 (file)
@@ -76,7 +76,7 @@ struct pppox_proto {
        int             (*ioctl)(struct socket *sock, unsigned int cmd,
                                 unsigned long arg);
        struct module   *owner;
-};
+} __do_const;
 
 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
 extern void unregister_pppox_proto(int proto_num);
index 2df8e8dd10a483d55d723b6ac4875de807ab2044..3e1280d0ebc03e9f8a01a00ffa7bc2face477f93 100644 (file)
  * section.
  */
 
+#define add_init_latent_entropy __latent_entropy
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define add_meminit_latent_entropy
+#else
+#define add_meminit_latent_entropy __latent_entropy
+#endif
+
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init         __section(.init.text) __cold notrace
+#define __init         __section(.init.text) __cold notrace add_init_latent_entropy
 #define __initdata     __section(.init.data)
 #define __initconst    __constsection(.init.rodata)
 #define __exitdata     __section(.exit.data)
 #define __cpuexitconst
 
 /* Used for MEMORY_HOTPLUG */
-#define __meminit        __section(.meminit.text) __cold notrace
+#define __meminit        __section(.meminit.text) __cold notrace add_meminit_latent_entropy
 #define __meminitdata    __section(.meminit.data)
 #define __meminitconst   __constsection(.meminit.rodata)
 #define __memexit        __section(.memexit.text) __exitused __cold notrace
index 3037fc085e8e16817f37a57621fde22659f64012..c6527ce9d3d42b258a2b5b00eaa1ed94ea3f4363 100644 (file)
@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
 
 #define INIT_TASK_COMM "swapper"
 
+#ifdef CONFIG_X86
+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
+#else
+#define INIT_TASK_THREAD_INFO
+#endif
+
 #ifdef CONFIG_RT_MUTEXES
 # define INIT_RT_MUTEXES(tsk)                                          \
        .pi_waiters = RB_ROOT,                                          \
@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
        RCU_POINTER_INITIALIZER(cred, &init_cred),                      \
        .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
+       INIT_TASK_THREAD_INFO                                           \
        .fs             = &init_fs,                                     \
        .files          = &init_files,                                  \
        .signal         = &init_signals,                                \
index d9b05b5bf8c7954f63ace548e0a6348f5fa0e327..e5f5b7bace07fd50436b9e384e436572467d8114 100644 (file)
@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
 
 struct softirq_action
 {
-       void    (*action)(struct softirq_action *);
-};
+       void    (*action)(void);
+} __no_const;
 
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
 }
 #endif
 
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
 extern void softirq_init(void);
 extern void __raise_softirq_irqoff(unsigned int nr);
 
index 38daa453f2e532f0c36d7ba81b10860ccc28b478..4de4317b8fd30e6e851431d16fb35bbcbd6b6c5c 100644 (file)
@@ -147,7 +147,7 @@ struct iommu_ops {
 
        unsigned long pgsize_bitmap;
        void *priv;
-};
+} __do_const;
 
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE          1 /* Device added */
 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE          2 /* Pre Device removed */
index 2c5250222278069dabb31cf4dfbb8ce41fea95ca..345b106934656459a889506c67164dbbc0a45748 100644 (file)
@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
 int adjust_resource(struct resource *res, resource_size_t start,
                    resource_size_t size);
 resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(const struct resource *res)
+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
 {
        return res->end - res->start + 1;
 }
index 1eee6bcfcf76a43856088b6314c2a52c0b2f10e1..9cf4912233f89b56b951b9e7b202f3c22f2852a4 100644 (file)
@@ -60,7 +60,7 @@ struct ipc_namespace {
        struct user_namespace *user_ns;
 
        struct ns_common ns;
-};
+} __randomize_layout;
 
 extern struct ipc_namespace init_ipc_ns;
 extern atomic_t nr_ipc_ns;
index d09ec7a1243e767dd95df2a4cc1d54775da0eb25..f373eb596f2b64f9aea1c04223706e01c8315eff 100644 (file)
@@ -364,7 +364,8 @@ struct irq_chip {
        void            (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
 
        unsigned long   flags;
-};
+} __do_const;
+typedef struct irq_chip __no_const irq_chip_no_const;
 
 /*
  * irq_chip specific flags
index 71d706d5f16922a5b95edfd6e6f93971139e7d81..817cdecf4d7a92ad48db1368d63cb296b11267ec 100644 (file)
@@ -95,7 +95,7 @@
 
 struct device_node;
 
-extern struct irq_chip gic_arch_extn;
+extern irq_chip_no_const gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
                    u32 offset, struct device_node *);
index faf433af425e41e2da532939af63ec258f8fd619..7dcb1863621d17577cd6f4d9cd5baea9c3e3c843 100644 (file)
@@ -61,7 +61,7 @@ struct irq_desc {
        unsigned int            irq_count;      /* For detecting broken IRQs */
        unsigned long           last_unhandled; /* Aging timer for unhandled count */
        unsigned int            irqs_unhandled;
-       atomic_t                threads_handled;
+       atomic_unchecked_t      threads_handled;
        int                     threads_handled_last;
        raw_spinlock_t          lock;
        struct cpumask          *percpu_enabled;
index c367cbdf73ab1a5b83f1af48c848be21b466167d..c9b79e6df2e1990b022a7c475a475cbb4df580ba 100644 (file)
@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
 /*
  * Convert various time units to each other:
  */
-extern unsigned int jiffies_to_msecs(const unsigned long j);
-extern unsigned int jiffies_to_usecs(const unsigned long j);
+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
 
-static inline u64 jiffies_to_nsecs(const unsigned long j)
+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
 {
        return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
 extern unsigned long timespec_to_jiffies(const struct timespec *value);
 extern void jiffies_to_timespec(const unsigned long jiffies,
-                               struct timespec *value);
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
+                               struct timespec *value) __intentional_overflow(-1);
+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
 extern void jiffies_to_timeval(const unsigned long jiffies,
                               struct timeval *value);
 
index 6883e197acb9e939156c4934d9cc7150b1b107f5..e854fcb3d2a32920537f02034b7d56d0460ecaa4 100644 (file)
@@ -15,7 +15,8 @@
 
 struct module;
 
-#ifdef CONFIG_KALLSYMS
+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 /* Lookup the address for a symbol. Returns 0 if not found. */
 unsigned long kallsyms_lookup_name(const char *name);
 
@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
 /* Stupid that this does nothing, but I didn't create this mess. */
 #define __print_symbol(fmt, addr)
 #endif /*CONFIG_KALLSYMS*/
+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
+       arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
+extern unsigned long kallsyms_lookup_name(const char *name);
+extern void __print_symbol(const char *fmt, unsigned long address);
+extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
+const char *kallsyms_lookup(unsigned long addr,
+                           unsigned long *symbolsize,
+                           unsigned long *offset,
+                           char **modname, char *namebuf);
+extern int kallsyms_lookup_size_offset(unsigned long addr,
+                                 unsigned long *symbolsize,
+                                 unsigned long *offset);
+#endif
 
 /* This macro allows us to keep printk typechecking */
 static __printf(1, 2)
index 64ce58bee6f5a74356f612a48730c35455ae6662..6bcdbfab46ebee610a4d72f03d339b1358a31875 100644 (file)
@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
 /* Obsolete, do not use.  Use kstrto<foo> instead */
 
 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
 extern long long simple_strtoll(const char *,char **,unsigned int);
 
index ff9f1d3942356ffd1f16ab777cb8c7ed201b35e8..6712be5342e3a88514168e76acc05f69bdab1819 100644 (file)
@@ -152,7 +152,7 @@ struct key_type {
        /* internal fields */
        struct list_head        link;           /* link in types list */
        struct lock_class_key   lock_class;     /* key->sem lock class */
-};
+} __do_const;
 
 extern struct key_type key_type_keyring;
 
index e465bb15912d98cd1136f985a110bf0347667bd1..19f605fd212d52c37f6d4f0799618ae088d9c0a8 100644 (file)
@@ -52,7 +52,7 @@ extern int kgdb_connected;
 extern int kgdb_io_module_registered;
 
 extern atomic_t                        kgdb_setting_breakpoint;
-extern atomic_t                        kgdb_cpu_doing_single_step;
+extern atomic_unchecked_t      kgdb_cpu_doing_single_step;
 
 extern struct task_struct      *kgdb_usethread;
 extern struct task_struct      *kgdb_contthread;
@@ -254,7 +254,7 @@ struct kgdb_arch {
        void    (*correct_hw_break)(void);
 
        void    (*enable_nmi)(bool on);
-};
+} __do_const;
 
 /**
  * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
@@ -279,7 +279,7 @@ struct kgdb_io {
        void                    (*pre_exception) (void);
        void                    (*post_exception) (void);
        int                     is_console;
-};
+} __do_const;
 
 extern struct kgdb_arch                arch_kgdb_ops;
 
index e705467ddb478d1d0a56f53269f5bebf19822b32..a92471d2f033077faf22c5de7c17cc52f950c9cb 100644 (file)
@@ -27,7 +27,7 @@
 
 extern void kmemleak_init(void) __ref;
 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
-                          gfp_t gfp) __ref;
+                          gfp_t gfp) __ref __size_overflow(2);
 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
 extern void kmemleak_free(const void *ptr) __ref;
 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
 static inline void kmemleak_init(void)
 {
 }
-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
                                  gfp_t gfp)
 {
 }
index 0555cc66a15b27dfa7fd70c47e1a519ad49afef1..40116ceb9c73574e4f5facac3fb6343f83e97194 100644 (file)
@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
  * usually useless though. */
 extern __printf(2, 3)
 int __request_module(bool wait, const char *name, ...);
+extern __printf(3, 4)
+int ___request_module(bool wait, char *param_name, const char *name, ...);
 #define request_module(mod...) __request_module(true, mod)
 #define request_module_nowait(mod...) __request_module(false, mod)
 #define try_then_request_module(x, mod...) \
@@ -57,6 +59,9 @@ struct subprocess_info {
        struct work_struct work;
        struct completion *complete;
        char *path;
+#ifdef CONFIG_GRKERNSEC
+       char *origpath;
+#endif
        char **argv;
        char **envp;
        int wait;
index 2d61b909f414f6f61e37ca9c53372d85392242fd..a1d0a138e8adcd09ebdb07166dfb554a9232c2ee 100644 (file)
@@ -118,7 +118,7 @@ struct kobj_type {
        struct attribute **default_attrs;
        const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
        const void *(*namespace)(struct kobject *kobj);
-};
+} __do_const;
 
 struct kobj_uevent_env {
        char *argv[3];
@@ -142,6 +142,7 @@ struct kobj_attribute {
        ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
                         const char *buf, size_t count);
 };
+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
 
 extern const struct sysfs_ops kobj_sysfs_ops;
 
@@ -169,7 +170,7 @@ struct kset {
        spinlock_t list_lock;
        struct kobject kobj;
        const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
 
 extern void kset_init(struct kset *kset);
 extern int __must_check kset_register(struct kset *kset);
index df32d2508290aae03f539d0434030bf8a355bbd9..fb52e27d59744474e971f1a53334537240fbf338 100644 (file)
@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
        const void *(*netlink_ns)(struct sock *sk);
        const void *(*initial_ns)(void);
        void (*drop_ns)(void *);
-};
+} __do_const;
 
 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
 int kobj_ns_type_registered(enum kobj_ns_type type);
index 484604d184be7380807868d9a12edc80e9064db3..0f6c5b68aa96569b5ca70d4f0712a0e6f77f3316 100644 (file)
@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
 static inline int kref_sub(struct kref *kref, unsigned int count,
             void (*release)(struct kref *kref))
 {
-       WARN_ON(release == NULL);
+       BUG_ON(release == NULL);
 
        if (atomic_sub_and_test((int) count, &kref->refcount)) {
                release(kref);
index 26f106022c8869dc5609f06f10579fb4ca5c4596..bafc04aa6c46f8e7e49fa244cc3d3c76a21faf4e 100644 (file)
@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
 {
 }
 #endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
                  struct module *module);
 void kvm_exit(void);
 
@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
                                        struct kvm_guest_debug *dbg);
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 
-int kvm_arch_init(void *opaque);
+int kvm_arch_init(const void *opaque);
 void kvm_arch_exit(void);
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
index 91f705de2c0be743dac06dac565817516058fc56..24be831a66990bda2e22ee928143ea78c741d1c8 100644 (file)
@@ -979,7 +979,7 @@ struct ata_port_operations {
         * fields must be pointers.
         */
        const struct ata_port_operations        *inherits;
-};
+} __do_const;
 
 struct ata_port_info {
        unsigned long           flags;
index a6a42dd024661324dbeed5b9cfaa028744bae154..6c5ebcec6f32c7df864998a2ac783500e69690ae 100644 (file)
@@ -36,6 +36,7 @@
 #endif
 
 #define __page_aligned_data    __section(.data..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_rodata  __read_only __aligned(PAGE_SIZE)
 #define __page_aligned_bss     __section(.bss..page_aligned) __aligned(PAGE_SIZE)
 
 /*
index feb773c76ee04cee9052eb7ef4ad75ecdb13b386..98f30753733663d49741f128d9b1bbd269fc5399 100644 (file)
@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
+extern void __pax_list_add(struct list_head *new,
+                             struct list_head *prev,
+                             struct list_head *next);
+static inline void pax_list_add(struct list_head *new, struct list_head *head)
+{
+       __pax_list_add(new, head, head->next);
+}
+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
+{
+       __pax_list_add(new, head->prev, head);
+}
+extern void pax_list_del(struct list_head *entry);
+
 /**
  * list_replace - replace old entry by new one
  * @old : the element to be replaced
@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
        INIT_LIST_HEAD(entry);
 }
 
+extern void pax_list_del_init(struct list_head *entry);
+
 /**
  * list_move - delete from one list and add as another's head
  * @list: the entry to move
index 4bfde0e99ed5169d7220204c39070faa387f57ab..d6e2e09b2cc6379a3893938abaca2519bc05dd35 100644 (file)
@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
        return ((int)l->count < 0);
 }
 
+static inline unsigned int __lockref_read(struct lockref *lockref)
+{
+       return lockref->count;
+}
+
+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
+{
+       lockref->count = count;
+}
+
+static inline void __lockref_inc(struct lockref *lockref)
+{
+
+#ifdef CONFIG_PAX_REFCOUNT
+       atomic_inc((atomic_t *)&lockref->count);
+#else
+       lockref->count++;
+#endif
+
+}
+
+static inline void __lockref_dec(struct lockref *lockref)
+{
+
+#ifdef CONFIG_PAX_REFCOUNT
+       atomic_dec((atomic_t *)&lockref->count);
+#else
+       lockref->count--;
+#endif
+
+}
+
 #endif /* __LINUX_LOCKREF_H */
index c45c089bfdaca9a91f32832102ff32291444884f..298841c85028e9cb4571fae4d8bb464a7ec7fc61 100644 (file)
@@ -15,7 +15,7 @@
  * This is commonly provided by 32bit archs to provide an optimized 64bit
  * divide.
  */
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
        *remainder = dividend % divisor;
        return dividend / divisor;
@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
 /**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  */
-static inline u64 div64_u64(u64 dividend, u64 divisor)
+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
 {
        return dividend / divisor;
 }
@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
 #define div64_ul(x, y)   div_u64((x), (y))
 
 #ifndef div_u64_rem
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
        *remainder = do_div(dividend, divisor);
        return dividend;
@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
 #endif
 
 #ifndef div64_u64
-extern u64 div64_u64(u64 dividend, u64 divisor);
+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
 #endif
 
 #ifndef div64_s64
@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
  * divide.
  */
 #ifndef div_u64
-static inline u64 div_u64(u64 dividend, u32 divisor)
+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
 {
        u32 remainder;
        return div_u64_rem(dividend, divisor, &remainder);
index 3d385c81c153497e9970b4f6227aeded05fd6f22..deacb6a0eed4df1d3f9141e2a0dfab0dd9325d86 100644 (file)
@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 }
 
 #define vma_policy(vma) ((vma)->vm_policy)
+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
+{
+       vma->vm_policy = pol;
+}
 
 static inline void mpol_get(struct mempolicy *pol)
 {
@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
 }
 
 #define vma_policy(vma) NULL
+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
+{
+}
 
 static inline int
 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
index dd5ea3016fc4e854ded6b1e7c2e096224d83317f..cf81cd13b78adf62774a9bc49e4e0577cc4ac00d 100644 (file)
@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
 
 #define VM_DONTCOPY    0x00020000      /* Do not copy this vma on fork */
 #define VM_DONTEXPAND  0x00040000      /* Cannot expand with mremap() */
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+#define VM_PAGEEXEC    0x00080000      /* vma->vm_page_prot needs special handling */
+#endif
+
 #define VM_ACCOUNT     0x00100000      /* Is a VM accounted object */
 #define VM_NORESERVE   0x00200000      /* should the VM suppress accounting */
 #define VM_HUGETLB     0x00400000      /* Huge TLB Page VM */
@@ -256,8 +261,8 @@ struct vm_operations_struct {
        /* called by access_process_vm when get_user_pages() fails, typically
         * for use by special VMAs that can switch between memory and hardware
         */
-       int (*access)(struct vm_area_struct *vma, unsigned long addr,
-                     void *buf, int len, int write);
+       ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
+                     void *buf, size_t len, int write);
 
        /* Called by the /proc/PID/maps code to ask the vma whether it
         * has a special name.  Returning non-NULL will also cause this
@@ -291,6 +296,7 @@ struct vm_operations_struct {
        int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
                           unsigned long size, pgoff_t pgoff);
 };
+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
 
 struct mmu_gather;
 struct inode;
@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
                unsigned int flags, unsigned long *prot, resource_size_t *phys);
-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-                       void *buf, int len, int write);
+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+                       void *buf, size_t len, int write);
 
 static inline void unmap_shared_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen)
@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
 }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write);
+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+               void *buf, size_t len, int write);
 
 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                      unsigned long start, unsigned long nr_pages,
@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
-                                            unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
-                                          unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSUP) &&
-               (vma->vm_end == addr) &&
-               !vma_growsup(vma->vm_next, addr);
-}
-
 extern struct task_struct *task_of_stack(struct task_struct *task,
                                struct vm_area_struct *vma, bool in_group);
 
@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
 {
        return 0;
 }
+
+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
+                                               unsigned long address)
+{
+       return 0;
+}
 #else
 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
 #endif
 
 #ifdef __PAGETABLE_PMD_FOLDED
@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
 {
        return 0;
 }
+
+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
+                                               unsigned long address)
+{
+       return 0;
+}
 #else
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
 #endif
 
 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
                NULL: pud_offset(pgd, address);
 }
 
+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+       return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
+               NULL: pud_offset(pgd, address);
+}
+
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
                NULL: pmd_offset(pud, address);
 }
+
+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+       return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
+               NULL: pmd_offset(pud, address);
+}
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
 #if USE_SPLIT_PTE_PTLOCKS
@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
        bool *need_rmap_locks);
 extern void exit_mmap(struct mm_struct *);
 
+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
+extern void gr_learn_resource(const struct task_struct *task, const int res,
+                             const unsigned long wanted, const int gt);
+#else
+static inline void gr_learn_resource(const struct task_struct *task, const int res,
+                                    const unsigned long wanted, const int gt)
+{
+}
+#endif
+
 static inline int check_data_rlimit(unsigned long rlim,
                                    unsigned long new,
                                    unsigned long start,
                                    unsigned long end_data,
                                    unsigned long start_data)
 {
+       gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
        if (rlim < RLIM_INFINITY) {
                if (((new - start) + (end_data - start_data)) > rlim)
                        return -ENOSPC;
@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags, struct page **pages);
 
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
 
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot, unsigned long flags,
        unsigned long pgoff, unsigned long *populate);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
 
 #ifdef CONFIG_MMU
 extern int __mm_populate(unsigned long addr, unsigned long len,
@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
        unsigned long high_limit;
        unsigned long align_mask;
        unsigned long align_offset;
+       unsigned long threadstack_offset;
 };
 
-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
 
 /*
  * Search for an unmapped address range.
@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
  */
 static inline unsigned long
-vm_unmapped_area(struct vm_unmapped_area_info *info)
+vm_unmapped_area(const struct vm_unmapped_area_info *info)
 {
        if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
                return unmapped_area(info);
@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
                                             struct vm_area_struct **pprev);
 
+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
+
 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
    NULL if none.  Assume start_addr < end_addr. */
 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
 }
 
 #ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
 void vma_set_page_prot(struct vm_area_struct *vma);
 #else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
 {
        return __pgprot(0);
 }
@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
 static inline void vm_stat_account(struct mm_struct *mm,
                        unsigned long flags, struct file *file, long pages)
 {
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
+#endif
+
        mm->total_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
 extern int sysctl_memory_failure_early_kill;
 extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
-extern atomic_long_t num_poisoned_pages;
+extern atomic_long_unchecked_t num_poisoned_pages;
 extern int soft_offline_page(struct page *page, int flags);
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
+#else
+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index 6d34aa266a8ce9bd8d2ad53b4fef7a204d5d01b5..d73d848dd960cc7a4fe6888fce6ea6e3699c603e 100644 (file)
@@ -309,7 +309,9 @@ struct vm_area_struct {
 #ifdef CONFIG_NUMA
        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 #endif
-};
+
+       struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+} __randomize_layout;
 
 struct core_thread {
        struct task_struct *task;
@@ -459,7 +461,25 @@ struct mm_struct {
        /* address of the bounds directory */
        void __user *bd_addr;
 #endif
-};
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+       unsigned long pax_flags;
+#endif
+
+#ifdef CONFIG_PAX_DLRESOLVE
+       unsigned long call_dl_resolve;
+#endif
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
+       unsigned long call_syscall;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+       unsigned long delta_mmap;               /* randomized offset */
+       unsigned long delta_stack;              /* randomized offset */
+#endif
+
+} __randomize_layout;
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
 {
index c5d52780d6a02fe27ec67cb892b447c43a6beefb..f0b68c85c4130a1e07b8e568467d954bfc829206 100644 (file)
@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
 /* Called from ioremap.c */
 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
                                                        void __iomem *addr);
-extern void mmiotrace_iounmap(volatile void __iomem *addr);
+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
 
 /* For anyone to insert markers. Remember trailing newline. */
 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
 {
 }
 
-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
 {
 }
 
index 2f0856d14b212d80786e09da61fcf6fcf21b09d8..5a4bc1ee662a832533222adac9cb7725c17d2530 100644 (file)
@@ -527,7 +527,7 @@ struct zone {
 
        ZONE_PADDING(_pad3_)
        /* Zone statistics */
-       atomic_long_t           vm_stat[NR_VM_ZONE_STAT_ITEMS];
+       atomic_long_unchecked_t         vm_stat[NR_VM_ZONE_STAT_ITEMS];
 } ____cacheline_internodealigned_in_smp;
 
 enum zone_flags {
index 745def862580726a26135621763ec0ce79b28cb3..08a820b282fd121d8b201dd77dae705e2099220a 100644 (file)
@@ -139,7 +139,7 @@ struct usb_device_id {
 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL       0x0200
 #define USB_DEVICE_ID_MATCH_INT_NUMBER         0x0400
 
-#define HID_ANY_ID                             (~0)
+#define HID_ANY_ID                             (~0U)
 #define HID_BUS_ANY                            0xffff
 #define HID_GROUP_ANY                          0x0000
 
@@ -475,7 +475,7 @@ struct dmi_system_id {
        const char *ident;
        struct dmi_strmatch matches[4];
        void *driver_data;
-};
+} __do_const;
 /*
  * struct dmi_device_id appears during expansion of
  * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
index b653d7c0a05a0abbaf5e1b4759a3f79b398c8b25..22a238f02ad91de3f860fa474e9b9cb983088fdc 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/jump_label.h>
 #include <linux/export.h>
+#include <linux/fs.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
+#include <asm/pgtable.h>
 
 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
 #define MODULE_SIG_STRING "~Module signature appended~\n"
@@ -42,7 +44,7 @@ struct module_kobject {
        struct kobject *drivers_dir;
        struct module_param_attrs *mp;
        struct completion *kobj_completion;
-};
+} __randomize_layout;
 
 struct module_attribute {
        struct attribute attr;
@@ -54,12 +56,13 @@ struct module_attribute {
        int (*test)(struct module *);
        void (*free)(struct module *);
 };
+typedef struct module_attribute __no_const module_attribute_no_const;
 
 struct module_version_attribute {
        struct module_attribute mattr;
        const char *module_name;
        const char *version;
-} __attribute__ ((__aligned__(sizeof(void *))));
+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
 
 extern ssize_t __modver_version_show(struct module_attribute *,
                                     struct module_kobject *, char *);
@@ -221,7 +224,7 @@ struct module {
 
        /* Sysfs stuff. */
        struct module_kobject mkobj;
-       struct module_attribute *modinfo_attrs;
+       module_attribute_no_const *modinfo_attrs;
        const char *version;
        const char *srcversion;
        struct kobject *holders_dir;
@@ -270,19 +273,16 @@ struct module {
        int (*init)(void);
 
        /* If this is non-NULL, vfree after init() returns */
-       void *module_init;
+       void *module_init_rx, *module_init_rw;
 
        /* Here is the actual code + data, vfree'd on unload. */
-       void *module_core;
+       void *module_core_rx, *module_core_rw;
 
        /* Here are the sizes of the init and core sections */
-       unsigned int init_size, core_size;
+       unsigned int init_size_rw, core_size_rw;
 
        /* The size of the executable code in each section.  */
-       unsigned int init_text_size, core_text_size;
-
-       /* Size of RO sections of the module (text+rodata) */
-       unsigned int init_ro_size, core_ro_size;
+       unsigned int init_size_rx, core_size_rx;
 
        /* Arch-specific module values */
        struct mod_arch_specific arch;
@@ -338,6 +338,10 @@ struct module {
 #ifdef CONFIG_EVENT_TRACING
        struct ftrace_event_call **trace_events;
        unsigned int num_trace_events;
+       struct file_operations trace_id;
+       struct file_operations trace_enable;
+       struct file_operations trace_format;
+       struct file_operations trace_filter;
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
        unsigned int num_ftrace_callsites;
@@ -361,7 +365,7 @@ struct module {
        ctor_fn_t *ctors;
        unsigned int num_ctors;
 #endif
-};
+} __randomize_layout;
 #ifndef MODULE_ARCH_INIT
 #define MODULE_ARCH_INIT {}
 #endif
@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
 bool is_module_percpu_address(unsigned long addr);
 bool is_module_text_address(unsigned long addr);
 
+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+       if (ktla_ktva(addr) >= (unsigned long)start &&
+           ktla_ktva(addr) < (unsigned long)start + size)
+               return 1;
+#endif
+
+       return ((void *)addr >= start && (void *)addr < start + size);
+}
+
+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
+{
+       return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
+}
+
+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
+{
+       return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
+}
+
+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
+{
+       return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
+}
+
+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
+{
+       return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
+}
+
 static inline bool within_module_core(unsigned long addr,
                                      const struct module *mod)
 {
-       return (unsigned long)mod->module_core <= addr &&
-              addr < (unsigned long)mod->module_core + mod->core_size;
+       return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
 }
 
 static inline bool within_module_init(unsigned long addr,
                                      const struct module *mod)
 {
-       return (unsigned long)mod->module_init <= addr &&
-              addr < (unsigned long)mod->module_init + mod->init_size;
+       return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
 }
 
 static inline bool within_module(unsigned long addr, const struct module *mod)
index f7556261fe3c54adb52b28789b7cb7b19b280b13..641f822dbfe4f5b9e0ba3881f75d19c601727619 100644 (file)
@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
    sections.  Returns NULL on failure. */
 void *module_alloc(unsigned long size);
 
+#ifdef CONFIG_PAX_KERNEXEC
+void *module_alloc_exec(unsigned long size);
+#else
+#define module_alloc_exec(x) module_alloc(x)
+#endif
+
 /* Free memory returned from module_alloc. */
 void module_memfree(void *module_region);
 
+#ifdef CONFIG_PAX_KERNEXEC
+void module_memfree_exec(void *module_region);
+#else
+#define module_memfree_exec(x) module_memfree((x))
+#endif
+
 /*
  * Apply the given relocation to the (simplified) ELF.  Return -error
  * or 0.
@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
                                 unsigned int relsec,
                                 struct module *me)
 {
+#ifdef CONFIG_MODULES
        printk(KERN_ERR "module %s: REL relocation unsupported\n",
               module_name(me));
+#endif
        return -ENOEXEC;
 }
 #endif
@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
                                     unsigned int relsec,
                                     struct module *me)
 {
+#ifdef CONFIG_MODULES
        printk(KERN_ERR "module %s: REL relocation unsupported\n",
               module_name(me));
+#endif
        return -ENOEXEC;
 }
 #endif
index 1c9effa25e2632497384e973a28954f7e33aedbc..1160bddd4d66c0214b6c0e8c6c6074eebddd714a 100644 (file)
@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
  * @len is usually just sizeof(string).
  */
 #define module_param_string(name, string, len, perm)                   \
-       static const struct kparam_string __param_string_##name         \
+       static const struct kparam_string __param_string_##name __used  \
                = { len, string };                                      \
        __module_param_call(MODULE_PARAM_PREFIX, name,                  \
                            &param_ops_string,                          \
@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
  */
 #define module_param_array_named(name, array, type, nump, perm)                \
        param_check_##type(name, &(array)[0]);                          \
-       static const struct kparam_array __param_arr_##name             \
+       static const struct kparam_array __param_arr_##name __used      \
        = { .max = ARRAY_SIZE(array), .num = nump,                      \
            .ops = &param_ops_##type,                                   \
            .elemsize = sizeof(array[0]), .elem = array };              \
index c2c561dc011440ee83f395aba0a226ab467204a0..a5f2a8c940da591733596e88924f88e3c12149c8 100644 (file)
@@ -66,7 +66,7 @@ struct vfsmount {
        struct dentry *mnt_root;        /* root of the mounted tree */
        struct super_block *mnt_sb;     /* pointer to superblock */
        int mnt_flags;
-};
+} __randomize_layout;
 
 struct file; /* forward dec */
 struct path;
index c8990779f0c33b99e552ca9406621cde03f49443..b9a2010f2f427fa4587b705106d6aee36bd08b1f 100644 (file)
@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
 extern void unlock_rename(struct dentry *, struct dentry *);
 
 extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_set_link(struct nameidata *nd, const char *path);
+extern const char *nd_get_link(const struct nameidata *nd);
 
 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
 {
index 17d83393afcc4337d50f00cfa837030d9429c6f8..81656c035668d81512cf29ca701a4cadd2aacbf7 100644 (file)
@@ -192,7 +192,7 @@ struct net_proto_family {
        int             (*create)(struct net *net, struct socket *sock,
                                  int protocol, int kern);
        struct module   *owner;
-};
+} __do_const;
 
 struct iovec;
 struct kvec;
index 52fd8e8694cfade5e844d52a70b191c3183f60a2..19430a1baf33ec07f427f157684166c0e5521b76 100644 (file)
@@ -1191,6 +1191,7 @@ struct net_device_ops {
                                                              u8 state);
 #endif
 };
+typedef struct net_device_ops __no_const net_device_ops_no_const;
 
 /**
  * enum net_device_priv_flags - &struct net_device priv_flags
@@ -1537,10 +1538,10 @@ struct net_device {
 
        struct net_device_stats stats;
 
-       atomic_long_t           rx_dropped;
-       atomic_long_t           tx_dropped;
+       atomic_long_unchecked_t rx_dropped;
+       atomic_long_unchecked_t tx_dropped;
 
-       atomic_t                carrier_changes;
+       atomic_unchecked_t      carrier_changes;
 
 #ifdef CONFIG_WIRELESS_EXT
        const struct iw_handler_def *   wireless_handlers;
index 2517ece988209a611b324a0bb8ade2b566eeb645..0bbfcfb1676efc24c984004f362870cd46a453e4 100644 (file)
@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
 #endif
        /* Use the module struct to lock set/get code in place */
        struct module *owner;
-};
+} __do_const;
 
 /* Function to register/unregister hook points. */
 int nf_register_hook(struct nf_hook_ops *reg);
index e955d47306259c5867d80831bd78d99490849b0b..04a5338208335b0b6261d6a7d6bc9b653e0f53ed 100644 (file)
@@ -19,7 +19,7 @@ struct nfnl_callback {
                          const struct nlattr * const cda[]);
        const struct nla_policy *policy;        /* netlink attribute policy */
        const u_int16_t attr_count;             /* number of nlattr's */
-};
+} __do_const;
 
 struct nfnetlink_subsystem {
        const char *name;
index 520681b6820817b6f08cb304503bf75b8f8f564f..2b7fabb9944b534280f065fa85601265c606f02f 100644 (file)
@@ -31,7 +31,7 @@ struct nls_table {
        const unsigned char *charset2upper;
        struct module *owner;
        struct nls_table *next;
-};
+} __do_const;
 
 /* this value hold the maximum octet of charset */
 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
@@ -46,7 +46,7 @@ enum utf16_endian {
 /* nls_base.c */
 extern int __register_nls(struct nls_table *, struct module *);
 extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *);
 extern void unload_nls(struct nls_table *);
 extern struct nls_table *load_nls_default(void);
 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
index d14a4c362465712857d1ea8fe39b974a5c05c4ce..a0787867659bc4a83b595a1031ef5a745e450b28 100644 (file)
@@ -54,7 +54,8 @@ struct notifier_block {
        notifier_fn_t notifier_call;
        struct notifier_block __rcu *next;
        int priority;
-};
+} __do_const;
+typedef struct notifier_block __no_const notifier_block_no_const;
 
 struct atomic_notifier_head {
        spinlock_t lock;
index b2a0f15f11feaf196fb4212791b14f08289c2ad7..4d7da327ec8b2bd3da16d5e6fcdb30c37d46d296 100644 (file)
@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
 int oprofilefs_create_ro_ulong(struct dentry * root,
        char const * name, ulong * val);
  
-/** Create a file for read-only access to an atomic_t. */
+/** Create a file for read-only access to an atomic_unchecked_t. */
 int oprofilefs_create_ro_atomic(struct dentry * root,
-       char const * name, atomic_t * val);
+       char const * name, atomic_unchecked_t * val);
  
 /** create a directory */
 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
index 438694650471cc66b62cb7890e19fde5799314f7..f50c615470c1e1932b4670b5099b022375e6fcf2 100644 (file)
@@ -129,7 +129,7 @@ struct parallel_data {
        struct padata_serial_queue      __percpu *squeue;
        atomic_t                        reorder_objects;
        atomic_t                        refcnt;
-       atomic_t                        seq_nr;
+       atomic_unchecked_t              seq_nr;
        struct padata_cpumask           cpumask;
        spinlock_t                      lock ____cacheline_aligned;
        unsigned int                    processed;
index d1372186f4315c3de45b3949017026dca2d7f109..be0c176b20f72ce89c290c1a666bf18ef7b6e86c 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef _LINUX_PATH_H
 #define _LINUX_PATH_H
 
+#include <linux/compiler.h>
+
 struct dentry;
 struct vfsmount;
 
 struct path {
        struct vfsmount *mnt;
        struct dentry *dentry;
-};
+} __randomize_layout;
 
 extern void path_get(const struct path *);
 extern void path_put(const struct path *);
index 8c7895061121e8295d30c51bd67304ebb7260cd7..0d74ed91f2794d2d83a828725b0764547ea637a9 100644 (file)
@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
        int (*get_latch_status)         (struct hotplug_slot *slot, u8 *value);
        int (*get_adapter_status)       (struct hotplug_slot *slot, u8 *value);
        int (*reset_slot)               (struct hotplug_slot *slot, int probe);
-};
+} __do_const;
+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
 
 /**
  * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
index caebf2a758dc0e573d7e5dc76b916dc7cb639ea3..4c3ae9d648be141444082296612b3a5a303cfc64 100644 (file)
@@ -34,7 +34,7 @@
  * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
  * larger than PERCPU_DYNAMIC_EARLY_SIZE.
  */
-#define PERCPU_DYNAMIC_EARLY_SLOTS     128
+#define PERCPU_DYNAMIC_EARLY_SLOTS     256
 #define PERCPU_DYNAMIC_EARLY_SIZE      (12 << 10)
 
 /*
index 664de5a4ec4672d9bbd9d1059a2c7005591329b3..b3e1bf4d2e675473505f4feb7607b85b87c81993 100644 (file)
@@ -336,8 +336,8 @@ struct perf_event {
 
        enum perf_event_active_state    state;
        unsigned int                    attach_state;
-       local64_t                       count;
-       atomic64_t                      child_count;
+       local64_t                       count; /* PaX: fix it one day */
+       atomic64_unchecked_t            child_count;
 
        /*
         * These are the total time in nanoseconds that the event
@@ -388,8 +388,8 @@ struct perf_event {
         * These accumulate total time (in nanoseconds) that children
         * events have been enabled and running, respectively.
         */
-       atomic64_t                      child_total_time_enabled;
-       atomic64_t                      child_total_time_running;
+       atomic64_unchecked_t            child_total_time_enabled;
+       atomic64_unchecked_t            child_total_time_running;
 
        /*
         * Protect attach/detach and child_list:
@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
                entry->ip[entry->nr++] = ip;
 }
 
-extern int sysctl_perf_event_paranoid;
+extern int sysctl_perf_event_legitimately_concerned;
 extern int sysctl_perf_event_mlock;
 extern int sysctl_perf_event_sample_rate;
 extern int sysctl_perf_cpu_time_max_percent;
@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
                loff_t *ppos);
 
 
+static inline bool perf_paranoid_any(void)
+{
+       return sysctl_perf_event_legitimately_concerned > 2;
+}
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
-       return sysctl_perf_event_paranoid > -1;
+       return sysctl_perf_event_legitimately_concerned > -1;
 }
 
 static inline bool perf_paranoid_cpu(void)
 {
-       return sysctl_perf_event_paranoid > 0;
+       return sysctl_perf_event_legitimately_concerned > 0;
 }
 
 static inline bool perf_paranoid_kernel(void)
 {
-       return sysctl_perf_event_paranoid > 1;
+       return sysctl_perf_event_legitimately_concerned > 1;
 }
 
 extern void perf_event_init(void);
@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
        struct device_attribute attr;
        u64 id;
        const char *event_str;
-};
+} __do_const;
 
 #define PMU_EVENT_ATTR(_name, _var, _id, _show)                                \
 static struct perf_pmu_events_attr _var = {                            \
index b9cf6c51b18106ebe2d6d715d6a1804e76ef900b..54624728edfdf2c49897f104442a847be4f9875e 100644 (file)
@@ -45,7 +45,7 @@ struct pid_namespace {
        int hide_pid;
        int reboot;     /* group exit code if this pidns was rebooted */
        struct ns_common ns;
-};
+} __randomize_layout;
 
 extern struct pid_namespace init_pid_ns;
 
index eb8b8ac6df3c844e2bd84903e0a50ff07f1575fe..62649e1d3264a2e79b4c4728663feac3e16658d7 100644 (file)
@@ -47,10 +47,10 @@ struct pipe_inode_info {
        struct mutex mutex;
        wait_queue_head_t wait;
        unsigned int nrbufs, curbuf, buffers;
-       unsigned int readers;
-       unsigned int writers;
-       unsigned int files;
-       unsigned int waiting_writers;
+       atomic_t readers;
+       atomic_t writers;
+       atomic_t files;
+       atomic_t waiting_writers;
        unsigned int r_counter;
        unsigned int w_counter;
        struct page *tmp_page;
index 8b5976364619a5ad113a4a4c8bc51155a526ae38..8a05939397676a90ab21023f74d9cc8515518155 100644 (file)
@@ -608,6 +608,7 @@ struct dev_pm_domain {
        struct dev_pm_ops       ops;
        void (*detach)(struct device *dev, bool power_off);
 };
+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
 
 /*
  * The PM_EVENT_ messages are also used by drivers implementing the legacy
index a9edab2c787a53e809150034128bc46448a010ad..8bada56d76be63798b657c584baf8d69b5d0f8e2 100644 (file)
@@ -39,11 +39,11 @@ struct gpd_dev_ops {
        int (*save_state)(struct device *dev);
        int (*restore_state)(struct device *dev);
        bool (*active_wakeup)(struct device *dev);
-};
+} __no_const;
 
 struct gpd_cpuidle_data {
        unsigned int saved_exit_latency;
-       struct cpuidle_state *idle_state;
+       cpuidle_state_no_const *idle_state;
 };
 
 struct generic_pm_domain {
index 30e84d48bfeaa8e40075f1d47bba6e0d49b310c6..22278b4e1057069dd0661783922579e66cece031 100644 (file)
@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
 
 static inline void pm_runtime_mark_last_busy(struct device *dev)
 {
-       ACCESS_ONCE(dev->power.last_busy) = jiffies;
+       ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
 }
 
 static inline bool pm_runtime_is_irq_safe(struct device *dev)
index 195aafc6cd07c691e37e7d0247538dbd63ff5360..49a7bc2a65fe2317df9895465f525bcbc79d7dd0 100644 (file)
@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
 struct pnp_fixup {
        char id[7];
        void (*quirk_function) (struct pnp_dev * dev);  /* fixup function */
-};
+} __do_const;
 
 /* config parameters */
 #define PNP_CONFIG_NORMAL      0x0001
index 2110a81c5e2afaab47ec5cb107cf17503d731317..13a11bb91397039f2f3bdaa014bd35354812c5d1 100644 (file)
@@ -19,8 +19,8 @@
  * under normal circumstances, used to verify that nobody uses
  * non-initialized list entries.
  */
-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1  ((void *) (long)0xFFFFFF01)
+#define LIST_POISON2  ((void *) (long)0xFFFFFF02)
 
 /********** include/linux/timer.h **********/
 /*
index d8b187c3925ddae170810cd7bf070c2a4b2ffbff..9a9257ac2e5de08bbf568a39dbe4883f5f158f28 100644 (file)
@@ -238,7 +238,7 @@ struct omap_sr_class_data {
        int (*notify)(struct omap_sr *sr, u32 status);
        u8 notify_flags;
        u8 class_type;
-};
+} __do_const;
 
 /**
  * struct omap_sr_nvalue_table - Smartreflex n-target value info
index 4ea1d377e1ad9d567ab22bd277652724742798d1..80f4b335ee91c601250cca11d3e9595627139e94 100644 (file)
@@ -84,7 +84,7 @@ struct compressor {
        struct module *owner;
        /* Extra skb space needed by the compressor algorithm */
        unsigned int comp_extra;
-};
+} __do_const;
 
 /*
  * The return value from decompress routine is the length of the
index de83b4eb164287db363328f87c0f8af216497a91..c4b997dbc8613d4181cc0c636355928be1235dd6 100644 (file)
@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
 #endif
 
+#define raw_preempt_count_add(val)     __preempt_count_add(val)
+#define raw_preempt_count_sub(val)     __preempt_count_sub(val)
+
 #define __preempt_count_inc() __preempt_count_add(1)
 #define __preempt_count_dec() __preempt_count_sub(1)
 
 #define preempt_count_inc() preempt_count_add(1)
+#define raw_preempt_count_inc() raw_preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
 
 #ifdef CONFIG_PREEMPT_COUNT
 
@@ -41,6 +46,12 @@ do { \
        barrier(); \
 } while (0)
 
+#define raw_preempt_disable() \
+do { \
+       raw_preempt_count_inc(); \
+       barrier(); \
+} while (0)
+
 #define sched_preempt_enable_no_resched() \
 do { \
        barrier(); \
@@ -49,6 +60,12 @@ do { \
 
 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
+#define raw_preempt_enable_no_resched() \
+do { \
+       barrier(); \
+       raw_preempt_count_dec(); \
+} while (0)
+
 #ifdef CONFIG_PREEMPT
 #define preempt_enable() \
 do { \
@@ -113,8 +130,10 @@ do { \
  * region.
  */
 #define preempt_disable()                      barrier()
+#define raw_preempt_disable()                  barrier()
 #define sched_preempt_enable_no_resched()      barrier()
 #define preempt_enable_no_resched()            barrier()
+#define raw_preempt_enable_no_resched()                barrier()
 #define preempt_enable()                       barrier()
 #define preempt_check_resched()                        do { } while (0)
 
@@ -128,11 +147,13 @@ do { \
 /*
  * Modules have no business playing preemption tricks.
  */
+#ifndef CONFIG_PAX_KERNEXEC
 #undef sched_preempt_enable_no_resched
 #undef preempt_enable_no_resched
 #undef preempt_enable_no_resched_notrace
 #undef preempt_check_resched
 #endif
+#endif
 
 #define preempt_set_need_resched() \
 do { \
index 4d5bf5726578c58b739a79a5f093e5f7c4a009a3..d94eccf1bdb0fb05d9911220ea4baa4fc59a2e5b 100644 (file)
@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
 #endif
 
 typedef int(*printk_func_t)(const char *fmt, va_list args);
+extern int kptr_restrict;
 
 #ifdef CONFIG_PRINTK
 asmlinkage __printf(5, 0)
@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
 
 extern int printk_delay_msec;
 extern int dmesg_restrict;
-extern int kptr_restrict;
 
 extern void wake_up_klogd(void);
 
index b97bf2ef996ef3e9e0cf906280741f20365d1455..f14c92d4caf0fad6fd4a2a142b3ed9723115a8c3 100644 (file)
@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
 extern struct proc_dir_entry *proc_symlink(const char *,
                struct proc_dir_entry *, const char *);
 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
                                              struct proc_dir_entry *, void *);
+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
+                                             struct proc_dir_entry *, void *);
 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
                                              struct proc_dir_entry *);
  
@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
        return proc_create_data(name, mode, parent, proc_fops, NULL);
 }
 
+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
+       struct proc_dir_entry *parent, const struct file_operations *proc_fops)
+{
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
+#else
+       return proc_create_data(name, mode, parent, proc_fops, NULL);
+#endif
+}
+
+
 extern void proc_set_size(struct proc_dir_entry *, loff_t);
 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
 extern void *PDE_DATA(const struct inode *);
@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
                struct proc_dir_entry *parent,const char *dest) { return NULL;}
 static inline struct proc_dir_entry *proc_mkdir(const char *name,
        struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
+       struct proc_dir_entry *parent) { return NULL; }
 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
        umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
+static inline  struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
+       umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
        umode_t mode, struct proc_dir_entry *parent) { return NULL; }
 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
@@ -79,7 +99,7 @@ struct net;
 static inline struct proc_dir_entry *proc_net_mkdir(
        struct net *net, const char *name, struct proc_dir_entry *parent)
 {
-       return proc_mkdir_data(name, 0, parent, net);
+       return proc_mkdir_data_restrict(name, 0, parent, net);
 }
 
 #endif /* _LINUX_PROC_FS_H */
index 42dfc615dbf8381a2057cb9956cec957e9ca5b1b..8113a99a7b15a3fd2bacbfee4e6dd71ad9cb168e 100644 (file)
@@ -16,7 +16,7 @@ struct proc_ns_operations {
        struct ns_common *(*get)(struct task_struct *task);
        void (*put)(struct ns_common *ns);
        int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
-};
+} __do_const __randomize_layout;
 
 extern const struct proc_ns_operations netns_operations;
 extern const struct proc_ns_operations utsns_operations;
index b86df497aba3a7e8f93f3f1ac0c6de6746f1545a..80029977cff48689efa10fee9425e9d7a200b010 100644 (file)
@@ -75,7 +75,7 @@ struct kqid {                 /* Type in which we store the quota identifier */
 
 extern bool qid_eq(struct kqid left, struct kqid right);
 extern bool qid_lt(struct kqid left, struct kqid right);
-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
 extern bool qid_valid(struct kqid qid);
 
index b05856e16b75be8b7dea5015a5a6ff6afcb021eb..0a9f14e48ef817236e450717f3b7b11d8b8c815f 100644 (file)
@@ -9,9 +9,19 @@
 #include <uapi/linux/random.h>
 
 extern void add_device_randomness(const void *, unsigned int);
+
+static inline void add_latent_entropy(void)
+{
+
+#ifdef LATENT_ENTROPY_PLUGIN
+       add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+#endif
+
+}
+
 extern void add_input_randomness(unsigned int type, unsigned int code,
-                                unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+                                unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
 
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
 extern const struct file_operations random_fops, urandom_fops;
 #endif
 
-unsigned int get_random_int(void);
+unsigned int __intentional_overflow(-1) get_random_int(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
 
-u32 prandom_u32(void);
+u32 prandom_u32(void) __intentional_overflow(-1);
 void prandom_bytes(void *buf, size_t nbytes);
 void prandom_seed(u32 seed);
 void prandom_reseed_late(void);
@@ -37,6 +47,11 @@ struct rnd_state {
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
 
+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
+{
+       return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
+}
+
 /**
  * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
  * @ep_ro: right open interval endpoint
@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
  *
  * Returns: pseudo-random number in interval [0, ep_ro)
  */
-static inline u32 prandom_u32_max(u32 ep_ro)
+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
 {
        return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
 }
index 378c5ee75f78f5e1a521d87afeaf13fe21999af5..aa84a47c138625b4d4b402e523c23f97f59bb866 100644 (file)
@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)     \
        old->rbaugmented = rbcompute(old);                              \
 }                                                                      \
 rbstatic const struct rb_augment_callbacks rbname = {                  \
-       rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \
+       .propagate = rbname ## _propagate,                              \
+       .copy = rbname ## _copy,                                        \
+       .rotate = rbname ## _rotate                                     \
 };
 
 
index 529bc946f450359158503332e6d563fa46ce6f47..82ce7783fd292e06bf684bab5717e335b3e32117 100644 (file)
@@ -29,8 +29,8 @@
  */
 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
 {
-       ACCESS_ONCE(list->next) = list;
-       ACCESS_ONCE(list->prev) = list;
+       ACCESS_ONCE_RW(list->next) = list;
+       ACCESS_ONCE_RW(list->prev) = list;
 }
 
 /*
@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
                    struct list_head *prev, struct list_head *next);
 #endif
 
+void __pax_list_add_rcu(struct list_head *new,
+                       struct list_head *prev, struct list_head *next);
+
 /**
  * list_add_rcu - add a new entry to rcu-protected list
  * @new: new entry to be added
@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
        __list_add_rcu(new, head, head->next);
 }
 
+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
+{
+       __pax_list_add_rcu(new, head, head->next);
+}
+
 /**
  * list_add_tail_rcu - add a new entry to rcu-protected list
  * @new: new entry to be added
@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
        __list_add_rcu(new, head->prev, head);
 }
 
+static inline void pax_list_add_tail_rcu(struct list_head *new,
+                                       struct list_head *head)
+{
+       __pax_list_add_rcu(new, head->prev, head);
+}
+
 /**
  * list_del_rcu - deletes entry from list without re-initialization
  * @entry: the element to delete from the list.
@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
        entry->prev = LIST_POISON2;
 }
 
+extern void pax_list_del_rcu(struct list_head *entry);
+
 /**
  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
  * @n: the element to delete from the hash list.
index ed4f5939a452cb424671f87dc9911fe318cb1fc3..8a51501c9aadc3af2093a405323409187331598b 100644 (file)
@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
 #define rcu_note_voluntary_context_switch(t) \
        do { \
                if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
-                       ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+                       ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
        } while (0)
 #else /* #ifdef CONFIG_TASKS_RCU */
 #define TASKS_RCU(x) do { } while (0)
index 67fc8fcdc4b0fdcd080ddef1c907b1c2aed3d445..a90f7d800246324dde0f869bc02d5143ad7ca137 100644 (file)
@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
  */
 
 extern void migrate_to_reboot_cpu(void);
-extern void machine_restart(char *cmd);
-extern void machine_halt(void);
-extern void machine_power_off(void);
+extern void machine_restart(char *cmd) __noreturn;
+extern void machine_halt(void) __noreturn;
+extern void machine_power_off(void) __noreturn;
 
 extern void machine_shutdown(void);
 struct pt_regs;
@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
  */
 
 extern void kernel_restart_prepare(char *cmd);
-extern void kernel_restart(char *cmd);
-extern void kernel_halt(void);
-extern void kernel_power_off(void);
+extern void kernel_restart(char *cmd) __noreturn;
+extern void kernel_halt(void) __noreturn;
+extern void kernel_power_off(void) __noreturn;
 
 extern int C_A_D; /* for sysctl */
 void ctrl_alt_del(void);
@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
  * Emergency restart, callable from an interrupt handler.
  */
 
-extern void emergency_restart(void);
+extern void emergency_restart(void) __noreturn;
 #include <asm/emergency-restart.h>
 
 #endif /* _LINUX_REBOOT_H */
index 8e0c9febf495f5c9dd9f397a91f1d4307dd4aaf9..ac4d22149ac2a6c19b347af3f963b2a62ecadf5e 100644 (file)
@@ -161,7 +161,8 @@ struct user_regset {
        unsigned int                    align;
        unsigned int                    bias;
        unsigned int                    core_note_type;
-};
+} __do_const;
+typedef struct user_regset __no_const user_regset_no_const;
 
 /**
  * struct user_regset_view - available regsets
index d7c8359693c6e19cdf57c52316f2e223581f14b2..818daf53e472fd37142cf9df33d18661fd201ef8 100644 (file)
@@ -157,7 +157,7 @@ struct rchan_callbacks
         * The callback should return 0 if successful, negative if not.
         */
        int (*remove_buf_file)(struct dentry *dentry);
-};
+} __no_const;
 
 /*
  * CONFIG_RELAY kernel API, kernel/relay.c
index 6bda06f21930bcce8ff57ad7eaa5f3afad8bf45d..bf39a9b22841cb03c136de193803f7e26d7b3dd0 100644 (file)
@@ -358,7 +358,7 @@ struct rio_ops {
        int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
                        u64 rstart, u32 size, u32 flags);
        void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
-};
+} __no_const;
 
 #define RIO_RESOURCE_MEM       0x00000100
 #define RIO_RESOURCE_DOORBELL  0x00000200
index d9d7e7e56352a8855def8f86af24fdb569ee480c..86f47acc4efd8ab573224659c6d28c624882309a 100644 (file)
@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
 void anon_vma_init(void);      /* create anon_vma_cachep */
 int  anon_vma_prepare(struct vm_area_struct *);
 void unlink_anon_vmas(struct vm_area_struct *);
-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
 
 static inline void anon_vma_merge(struct vm_area_struct *vma,
                                  struct vm_area_struct *next)
index ed8f9e70df9bcf72358ce9baf534c082699daa86..999bc962432bb5be4de3d7bc173060a288a81445 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_SCATTERLIST_H
 #define _LINUX_SCATTERLIST_H
 
+#include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/bug.h>
 #include <linux/mm.h>
@@ -113,6 +114,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
 {
 #ifdef CONFIG_DEBUG_SG
        BUG_ON(!virt_addr_valid(buf));
+#endif
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       if (object_starts_on_stack(buf)) {
+               void *adjbuf = buf - current->stack + current->lowmem_stack;
+               sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
+       } else
 #endif
        sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
 }
index 8db31ef98d2f4b5ad9f68f8296fbdbb475f96c46..0af1f81a0726cbfba006c6c753ef83f10869e00e 100644 (file)
@@ -133,6 +133,7 @@ struct fs_struct;
 struct perf_event_context;
 struct blk_plug;
 struct filename;
+struct linux_binprm;
 
 #define VMACACHE_BITS 2
 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
 extern int in_sched_functions(unsigned long addr);
 
 #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
-extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
 extern signed long schedule_timeout_interruptible(signed long timeout);
 extern signed long schedule_timeout_killable(signed long timeout);
 extern signed long schedule_timeout_uninterruptible(signed long timeout);
@@ -426,6 +427,19 @@ struct nsproxy;
 struct user_namespace;
 
 #ifdef CONFIG_MMU
+
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
+#else
+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
+{
+       return 0;
+}
+#endif
+
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
+
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -724,6 +738,17 @@ struct signal_struct {
 #ifdef CONFIG_TASKSTATS
        struct taskstats *stats;
 #endif
+
+#ifdef CONFIG_GRKERNSEC
+       u32 curr_ip;
+       u32 saved_ip;
+       u32 gr_saddr;
+       u32 gr_daddr;
+       u16 gr_sport;
+       u16 gr_dport;
+       u8 used_accept:1;
+#endif
+
 #ifdef CONFIG_AUDIT
        unsigned audit_tty;
        unsigned audit_tty_log_passwd;
@@ -750,7 +775,7 @@ struct signal_struct {
        struct mutex cred_guard_mutex;  /* guard against foreign influences on
                                         * credential calculations
                                         * (notably. ptrace) */
-};
+} __randomize_layout;
 
 /*
  * Bits in flags field of signal_struct.
@@ -803,6 +828,14 @@ struct user_struct {
        struct key *session_keyring;    /* UID's default session keyring */
 #endif
 
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+       unsigned char kernel_banned;
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
+       unsigned char suid_banned;
+       unsigned long suid_ban_expires;
+#endif
+
        /* Hash table maintenance information */
        struct hlist_node uidhash_node;
        kuid_t uid;
@@ -810,7 +843,7 @@ struct user_struct {
 #ifdef CONFIG_PERF_EVENTS
        atomic_long_t locked_vm;
 #endif
-};
+} __randomize_layout;
 
 extern int uids_sysfs_init(void);
 
@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
 struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       void *lowmem_stack;
+#endif
        atomic_t usage;
        unsigned int flags;     /* per process flags, defined below */
        unsigned int ptrace;
@@ -1405,8 +1441,8 @@ struct task_struct {
        struct list_head thread_node;
 
        struct completion *vfork_done;          /* for vfork() */
-       int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
-       int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
+       pid_t __user *set_child_tid;            /* CLONE_CHILD_SETTID */
+       pid_t __user *clear_child_tid;          /* CLONE_CHILD_CLEARTID */
 
        cputime_t utime, stime, utimescaled, stimescaled;
        cputime_t gtime;
@@ -1431,11 +1467,6 @@ struct task_struct {
        struct task_cputime cputime_expires;
        struct list_head cpu_timers[3];
 
-/* process credentials */
-       const struct cred __rcu *real_cred; /* objective and real subjective task
-                                        * credentials (COW) */
-       const struct cred __rcu *cred;  /* effective (overridable) subjective task
-                                        * credentials (COW) */
        char comm[TASK_COMM_LEN]; /* executable name excluding path
                                     - access with [gs]et_task_comm (which lock
                                       it with task_lock())
@@ -1453,6 +1484,10 @@ struct task_struct {
 #endif
 /* CPU-specific state of this task */
        struct thread_struct thread;
+/* thread_info moved to task_struct */
+#ifdef CONFIG_X86
+       struct thread_info tinfo;
+#endif
 /* filesystem information */
        struct fs_struct *fs;
 /* open file information */
@@ -1527,6 +1562,10 @@ struct task_struct {
        gfp_t lockdep_reclaim_gfp;
 #endif
 
+/* process credentials */
+       const struct cred __rcu *real_cred; /* objective and real subjective task
+                                        * credentials (COW) */
+
 /* journalling filesystem info */
        void *journal_info;
 
@@ -1565,6 +1604,10 @@ struct task_struct {
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
 #endif
+
+       const struct cred __rcu *cred;  /* effective (overridable) subjective task
+                                        * credentials (COW) */
+
 #ifdef CONFIG_FUTEX
        struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
@@ -1673,7 +1716,7 @@ struct task_struct {
         * Number of functions that haven't been traced
         * because of depth overrun.
         */
-       atomic_t trace_overrun;
+       atomic_unchecked_t trace_overrun;
        /* Pause for the tracing */
        atomic_t tracing_graph_pause;
 #endif
@@ -1701,7 +1744,78 @@ struct task_struct {
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long   task_state_change;
 #endif
-};
+
+#ifdef CONFIG_GRKERNSEC
+       /* grsecurity */
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       u64 exec_id;
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+       const struct cred *delayed_cred;
+#endif
+       struct dentry *gr_chroot_dentry;
+       struct acl_subject_label *acl;
+       struct acl_subject_label *tmpacl;
+       struct acl_role_label *role;
+       struct file *exec_file;
+       unsigned long brute_expires;
+       u16 acl_role_id;
+       u8 inherited;
+       /* is this the task that authenticated to the special role */
+       u8 acl_sp_role;
+       u8 is_writable;
+       u8 brute;
+       u8 gr_is_chrooted;
+#endif
+
+} __randomize_layout;
+
+#define MF_PAX_PAGEEXEC                0x01000000      /* Paging based non-executable pages */
+#define MF_PAX_EMUTRAMP                0x02000000      /* Emulate trampolines */
+#define MF_PAX_MPROTECT                0x04000000      /* Restrict mprotect() */
+#define MF_PAX_RANDMMAP                0x08000000      /* Randomize mmap() base */
+/*#define MF_PAX_RANDEXEC              0x10000000*/    /* Randomize ET_EXEC base */
+#define MF_PAX_SEGMEXEC                0x20000000      /* Segmentation based non-executable pages */
+
+#ifdef CONFIG_PAX_SOFTMODE
+extern int pax_softmode;
+#endif
+
+extern int pax_check_flags(unsigned long *);
+#define PAX_PARSE_FLAGS_FALLBACK       (~0UL)
+
+/* if tsk != current then task_lock must be held on it */
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static inline unsigned long pax_get_flags(struct task_struct *tsk)
+{
+       if (likely(tsk->mm))
+               return tsk->mm->pax_flags;
+       else
+               return 0UL;
+}
+
+/* if tsk != current then task_lock must be held on it */
+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
+{
+       if (likely(tsk->mm)) {
+               tsk->mm->pax_flags = flags;
+               return 0;
+       }
+       return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+extern void pax_set_initial_flags(struct linux_binprm *bprm);
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+#endif
+
+struct path;
+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -1783,7 +1897,7 @@ struct pid_namespace;
 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
                        struct pid_namespace *ns);
 
-static inline pid_t task_pid_nr(struct task_struct *tsk)
+static inline pid_t task_pid_nr(const struct task_struct *tsk)
 {
        return tsk->pid;
 }
@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
 
 extern void sched_clock_init(void);
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+static inline void populate_stack(void)
+{
+       struct task_struct *curtask = current;
+       int c;
+       int *ptr = curtask->stack;
+       int *end = curtask->stack + THREAD_SIZE;
+
+       while (ptr < end) {
+               c = *(volatile int *)ptr;
+               ptr += PAGE_SIZE/sizeof(int);
+       }
+}
+#else
+static inline void populate_stack(void)
+{
+}
+#endif
+
 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 static inline void sched_clock_tick(void)
 {
@@ -2283,7 +2416,9 @@ void yield(void);
 extern struct exec_domain      default_exec_domain;
 
 union thread_union {
+#ifndef CONFIG_X86
        struct thread_info thread_info;
+#endif
        unsigned long stack[THREAD_SIZE/sizeof(long)];
 };
 
@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
  */
 
 extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
                struct pid_namespace *ns);
 
@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
 extern void exit_itimers(struct signal_struct *);
 extern void flush_itimer_signals(void);
 
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
 
 extern int do_execve(struct filename *,
                     const char __user * const __user *,
@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
 #define task_stack_end_corrupted(task) \
                (*(end_of_stack(task)) != STACK_END_MAGIC)
 
-static inline int object_is_on_stack(void *obj)
+static inline int object_starts_on_stack(const void *obj)
 {
-       void *stack = task_stack_page(current);
+       const void *stack = task_stack_page(current);
 
        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
index 596a0e007c62d97e57d040ee45fa3df784403880..bea77ec008fac292cc3ba42bdb28cb84d31fa625 100644 (file)
@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
 #define DEFAULT_MAX_MAP_COUNT  (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 
 extern int sysctl_max_map_count;
+extern unsigned long sysctl_heap_stack_gap;
 
 extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_min_granularity;
index ba96471c11bae88b6673faec4d4a268cf550db1b..74fb3f65c89fc0e753cb2677ea549c7e13a06a14 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/string.h>
+#include <linux/grsecurity.h>
 
 struct linux_binprm;
 struct cred;
@@ -116,8 +117,6 @@ struct seq_file;
 
 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
 
-void reset_security_ops(void);
-
 #ifdef CONFIG_MMU
 extern unsigned long mmap_min_addr;
 extern unsigned long dac_mmap_min_addr;
@@ -1729,7 +1728,7 @@ struct security_operations {
                                 struct audit_context *actx);
        void (*audit_rule_free) (void *lsmrule);
 #endif /* CONFIG_AUDIT */
-};
+} __randomize_layout;
 
 /* prototypes */
 extern int security_init(void);
index dc368b8ce215ccc0f6a2b44a16dd2aaa603dbcd1..e8952092deebf8ffbb7456a6b0bdad5feada13cb 100644 (file)
@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
 }
 
 extern void down(struct semaphore *sem);
-extern int __must_check down_interruptible(struct semaphore *sem);
+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
 extern int __must_check down_killable(struct semaphore *sem);
 extern int __must_check down_trylock(struct semaphore *sem);
 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
index cf6a9daaaf6d670eda5ae59495649099c455dd5c..bd86b1f73aa7c802d3a4e1385b8a91c641735c40 100644 (file)
@@ -27,6 +27,9 @@ struct seq_file {
        struct mutex lock;
        const struct seq_operations *op;
        int poll_event;
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       u64 exec_id;
+#endif
 #ifdef CONFIG_USER_NS
        struct user_namespace *user_ns;
 #endif
@@ -39,6 +42,7 @@ struct seq_operations {
        void * (*next) (struct seq_file *m, void *v, loff_t *pos);
        int (*show) (struct seq_file *m, void *v);
 };
+typedef struct seq_operations __no_const seq_operations_no_const;
 
 #define SEQ_SKIP 1
 
@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
 
 char *mangle_path(char *s, const char *p, const char *esc);
 int seq_open(struct file *, const struct seq_operations *);
+int seq_open_restrict(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
 }
 
 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
 int single_release(struct inode *, struct file *);
 void *__seq_open_private(struct file *, const struct seq_operations *, int);
index 6fb801686ad6cea28b6fb4b1538dad63bd61117c..ab4465e1e981e7df2e23bad7aa65437826c0e751 100644 (file)
@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
        /* The task created the shm object.  NULL if the task is dead. */
        struct task_struct      *shm_creator;
        struct list_head        shm_clist;      /* list by creator */
+#ifdef CONFIG_GRKERNSEC
+       u64                     shm_createtime;
+       pid_t                   shm_lapid;
+#endif
 };
 
 /* shm_mode upper byte flags */
index ab1e0392b5ac1ce89c80dc2bf0e4e4ccc7a736bd..ad4229e56125b53f3ee823de49af61ca410d12f6 100644 (file)
@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
         * know it'll be handled, so that they don't get converted to
         * SIGKILL or just silently dropped.
         */
-       kernel_sigaction(sig, (__force __sighandler_t)2);
+       kernel_sigaction(sig, (__force_user __sighandler_t)2);
 }
 
 static inline void disallow_signal(int sig)
index 85ab7d72b54c2f269812015b19544674bc6dcd72..eb1585ab8927d1aaa6b1bb1ece0195b573e8ca9c 100644 (file)
@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
                            int node);
 struct sk_buff *build_skb(void *data, unsigned int frag_size);
-static inline struct sk_buff *alloc_skb(unsigned int size,
+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
        return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
        return skb->inner_transport_header - skb->inner_network_header;
 }
 
-static inline int skb_network_offset(const struct sk_buff *skb)
+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
 {
        return skb_network_header(skb) - skb->data;
 }
@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
 #ifndef NET_SKB_PAD
-#define NET_SKB_PAD    max(32, L1_CACHE_BYTES)
+#define NET_SKB_PAD    max(_AC(32,UL), L1_CACHE_BYTES)
 #endif
 
 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
 unsigned int datagram_poll(struct file *file, struct socket *sock,
                           struct poll_table_struct *wait);
-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
                                        struct msghdr *msg, int size)
 {
        return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
        nf_bridge_put(skb->nf_bridge);
        skb->nf_bridge = NULL;
 #endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+       skb->nf_trace = 0;
+#endif
 }
 
 static inline void nf_reset_trace(struct sk_buff *skb)
index 9a139b637069730e85fe6903253baf16e211e7b0..aab37b4882111d095123461e9c7e9c02cd9709bf 100644 (file)
 #include <linux/gfp.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
-
+#include <linux/err.h>
 
 /*
  * Flags to pass to kmem_cache_create().
  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
  */
 #define SLAB_DEBUG_FREE                0x00000100UL    /* DEBUG: Perform (expensive) checks on free */
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define SLAB_USERCOPY          0x00000200UL    /* PaX: Allow copying objs to/from userland */
+#else
+#define SLAB_USERCOPY          0x00000000UL
+#endif
+
 #define SLAB_RED_ZONE          0x00000400UL    /* DEBUG: Red zone objs in a cache */
 #define SLAB_POISON            0x00000800UL    /* DEBUG: Poison objects */
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#define SLAB_NO_SANITIZE       0x00001000UL    /* PaX: Do not sanitize objs on free */
+#else
+#define SLAB_NO_SANITIZE       0x00000000UL
+#endif
+
 #define SLAB_HWCACHE_ALIGN     0x00002000UL    /* Align objs on cache lines */
 #define SLAB_CACHE_DMA         0x00004000UL    /* Use GFP_DMA memory */
 #define SLAB_STORE_USER                0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  * Both make kfree a no-op.
  */
-#define ZERO_SIZE_PTR ((void *)16)
+#define ZERO_SIZE_PTR                          \
+({                                             \
+       BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
+       (void *)(-MAX_ERRNO-1L);                \
+})
 
-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
-                               (unsigned long)ZERO_SIZE_PTR)
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
 
 #include <linux/kmemleak.h>
 
@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
 void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
+const char *check_heap_object(const void *ptr, unsigned long n);
+bool is_usercopy_object(const void *ptr);
 
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
+#endif
+
 /*
  * Figure out which kmalloc slab an allocation of a certain size
  * belongs to.
@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  * 2 = 120 .. 192 bytes
  * n = 2^(n-1) .. 2^n -1
  */
-static __always_inline int kmalloc_index(size_t size)
+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
 {
        if (!size)
                return 0;
@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
 }
 #endif /* !CONFIG_SLOB */
 
-void *__kmalloc(size_t size, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
 
 #ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 #else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __kmalloc(size, flags);
 }
index b869d1662ba346b0ba246b814a9dc2b2f153188e..1453c7337178f3c00f3f15711f7f05ca8353c8b6 100644 (file)
@@ -40,7 +40,7 @@ struct kmem_cache {
 /* 4) cache creation/removal */
        const char *name;
        struct list_head list;
-       int refcount;
+       atomic_t refcount;
        int object_size;
        int align;
 
@@ -56,10 +56,14 @@ struct kmem_cache {
        unsigned long node_allocs;
        unsigned long node_frees;
        unsigned long node_overflow;
-       atomic_t allochit;
-       atomic_t allocmiss;
-       atomic_t freehit;
-       atomic_t freemiss;
+       atomic_unchecked_t allochit;
+       atomic_unchecked_t allocmiss;
+       atomic_unchecked_t freehit;
+       atomic_unchecked_t freemiss;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       atomic_unchecked_t sanitized;
+       atomic_unchecked_t not_sanitized;
+#endif
 
        /*
         * If debugging is enabled, then the allocator can add additional
index d82abd40a3c061745e94385b1c43e498004a60e8..408c3a0c11f631fae86963041b725eb154d67d2d 100644 (file)
@@ -74,7 +74,7 @@ struct kmem_cache {
        struct kmem_cache_order_objects max;
        struct kmem_cache_order_objects min;
        gfp_t allocflags;       /* gfp flags to use on each alloc */
-       int refcount;           /* Refcount for slab cache destroy */
+       atomic_t refcount;      /* Refcount for slab cache destroy */
        void (*ctor)(void *);
        int inuse;              /* Offset to metadata */
        int align;              /* Alignment */
index 93dff5fff524b720e9af7ff84098bb7760e0cd6d..933c5618add9f16db8d7e37f80d3a2c2becc21a2 100644 (file)
@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) {  }
 #endif
 
 #define get_cpu()              ({ preempt_disable(); smp_processor_id(); })
+#define raw_get_cpu()          ({ raw_preempt_disable(); raw_smp_processor_id(); })
 #define put_cpu()              preempt_enable()
+#define raw_put_cpu_no_resched()       raw_preempt_enable_no_resched()
 
 /*
  * Callback to arch code if there's nosmp or maxcpus=0 on the
index 46cca4c06848346ca84753ac182526a4514ff277..332353644db2d8d6daef9133e1841f84933bb30a 100644 (file)
@@ -11,7 +11,7 @@ struct sock;
 struct sock_diag_handler {
        __u8 family;
        int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
-};
+} __do_const;
 
 int sock_diag_register(const struct sock_diag_handler *h);
 void sock_diag_unregister(const struct sock_diag_handler *h);
index 680f9a31df25653043a41603c4134ff284469b0a..f13aeb04379a382d3b37e7fd629ec97df7e8bd3d 100644 (file)
@@ -7,7 +7,7 @@
 #include <uapi/linux/sonet.h>
 
 struct k_sonet_stats {
-#define __HANDLE_ITEM(i) atomic_t i
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
        __SONET_ITEMS
 #undef __HANDLE_ITEM
 };
index 07d8e53bedfc4fbe998a231c91442104308e5bf4..dc934c9d694a7adfeb93d31169d2765b9a8da3e5 100644 (file)
@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
 {
        switch (sap->sa_family) {
        case AF_INET:
-               return ntohs(((struct sockaddr_in *)sap)->sin_port);
+               return ntohs(((const struct sockaddr_in *)sap)->sin_port);
        case AF_INET6:
-               return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+               return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
        }
        return 0;
 }
@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
                                    const struct sockaddr *src)
 {
-       const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+       const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
        struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
 
        dsin->sin_family = ssin->sin_family;
@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
        if (sa->sa_family != AF_INET6)
                return 0;
 
-       return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+       return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
 }
 
 #endif /* _LINUX_SUNRPC_ADDR_H */
index 598ba80ec30c974f02477a216a21077213a25aa5..d90cba6bf89301cab397462b36d1e03918746b72 100644 (file)
@@ -100,7 +100,7 @@ struct rpc_procinfo {
        unsigned int            p_timer;        /* Which RTT timer to use */
        u32                     p_statidx;      /* Which procedure to account */
        const char *            p_name;         /* name of procedure */
-};
+} __do_const;
 
 #ifdef __KERNEL__
 
index 6f22cfeef5e3c55347d4e52b21c66333f12401ab..9fd090990a7d082c33cc4872114d7120d798b023 100644 (file)
@@ -420,7 +420,7 @@ struct svc_procedure {
        unsigned int            pc_count;       /* call count */
        unsigned int            pc_cachetype;   /* cache info (NFS) */
        unsigned int            pc_xdrressize;  /* maximum size of XDR reply */
-};
+} __do_const;
 
 /*
  * Function prototypes.
index 975da754c778d35921eee53cddb2913cf5ef698e..318c0838f709c7ace7e3ab5751948eabba3ccc5c 100644 (file)
@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
 extern unsigned int svcrdma_max_requests;
 extern unsigned int svcrdma_max_req_size;
 
-extern atomic_t rdma_stat_recv;
-extern atomic_t rdma_stat_read;
-extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
-extern atomic_t rdma_stat_rq_starve;
-extern atomic_t rdma_stat_rq_poll;
-extern atomic_t rdma_stat_rq_prod;
-extern atomic_t rdma_stat_sq_poll;
-extern atomic_t rdma_stat_sq_prod;
+extern atomic_unchecked_t rdma_stat_recv;
+extern atomic_unchecked_t rdma_stat_read;
+extern atomic_unchecked_t rdma_stat_write;
+extern atomic_unchecked_t rdma_stat_sq_starve;
+extern atomic_unchecked_t rdma_stat_rq_starve;
+extern atomic_unchecked_t rdma_stat_rq_poll;
+extern atomic_unchecked_t rdma_stat_rq_prod;
+extern atomic_unchecked_t rdma_stat_sq_poll;
+extern atomic_unchecked_t rdma_stat_sq_prod;
 
 #define RPCRDMA_VERSION 1
 
index 8d71d6577459c9f92d7f21e3ce7c8e76825ab714..f79586e25d8e2e38113e595e8030509b05e4d8cc 100644 (file)
@@ -120,7 +120,7 @@ struct auth_ops {
        int     (*release)(struct svc_rqst *rq);
        void    (*domain_release)(struct auth_domain *);
        int     (*set_client)(struct svc_rqst *rq);
-};
+} __do_const;
 
 #define        SVC_GARBAGE     1
 #define        SVC_SYSERR      2
index e7a018eaf3a255db2198049f543ea3111004852a..49f8b17edf86d8beddac96378838a9cebf557436 100644 (file)
@@ -60,7 +60,8 @@ extern void
 
 extern void
 swiotlb_free_coherent(struct device *hwdev, size_t size,
-                     void *vaddr, dma_addr_t dma_handle);
+                     void *vaddr, dma_addr_t dma_handle,
+                     struct dma_attrs *attrs);
 
 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
                                   unsigned long offset, size_t size,
index 85893d744901b0629edb48e811434193a13892d6..49235810da0bf25a0f928baaa5af65f760bb818a 100644 (file)
@@ -99,10 +99,16 @@ union bpf_attr;
 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
 
 #define __SC_DECL(t, a)        t a
+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
 #define __TYPE_IS_UL(t)        (__same_type((t)0, 0UL))
 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+#define __SC_LONG(t, a)        __typeof(                               \
+       __builtin_choose_expr(                                  \
+               sizeof(t) > sizeof(int),                        \
+               (t) 0,                                          \
+               __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L)  \
+       )) a
 #define __SC_CAST(t, a)        (t) a
 #define __SC_ARGS(t, a)        a
 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -384,11 +390,11 @@ asmlinkage long sys_sync(void);
 asmlinkage long sys_fsync(unsigned int fd);
 asmlinkage long sys_fdatasync(unsigned int fd);
 asmlinkage long sys_bdflush(int func, long data);
-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
-                               char __user *type, unsigned long flags,
+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
+                               const char __user *type, unsigned long flags,
                                void __user *data);
-asmlinkage long sys_umount(char __user *name, int flags);
-asmlinkage long sys_oldumount(char __user *name);
+asmlinkage long sys_umount(const char __user *name, int flags);
+asmlinkage long sys_oldumount(const char __user *name);
 asmlinkage long sys_truncate(const char __user *path, long length);
 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
 asmlinkage long sys_stat(const char __user *filename,
@@ -600,7 +606,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
-                               struct sockaddr __user *, int);
+                               struct sockaddr __user *, int) __intentional_overflow(0);
 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
                             unsigned int vlen, unsigned flags);
index 27b3b0bc41a94247188b154affa529a72c0b001b..e093dd979715560615d4e69755e26b2d1e69ee2f 100644 (file)
@@ -16,7 +16,7 @@ struct syscore_ops {
        int (*suspend)(void);
        void (*resume)(void);
        void (*shutdown)(void);
-};
+} __do_const;
 
 extern void register_syscore_ops(struct syscore_ops *ops);
 extern void unregister_syscore_ops(struct syscore_ops *ops);
index b7361f831226d97bab5ff8b4c86d9252e972d877..341a15a1da3e9b6888977c50f53e8933bfc263f7 100644 (file)
@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
 
 extern int proc_dostring(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
+extern int proc_dostring_modpriv(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
 extern int proc_dointvec(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
 extern int proc_dointvec_minmax(struct ctl_table *, int,
@@ -113,7 +115,8 @@ struct ctl_table
        struct ctl_table_poll *poll;
        void *extra1;
        void *extra2;
-};
+} __do_const __randomize_layout;
+typedef struct ctl_table __no_const ctl_table_no_const;
 
 struct ctl_node {
        struct rb_node node;
index ddad16148bd69cd2b6d7c4a682864b6d29f7a1dc..a3efd26649d9919e5c2b92a06c31132edc5a05e7 100644 (file)
@@ -34,7 +34,8 @@ struct attribute {
        struct lock_class_key   *key;
        struct lock_class_key   skey;
 #endif
-};
+} __do_const;
+typedef struct attribute __no_const attribute_no_const;
 
 /**
  *     sysfs_attr_init - initialize a dynamically allocated sysfs attribute
@@ -63,7 +64,8 @@ struct attribute_group {
                                              struct attribute *, int);
        struct attribute        **attrs;
        struct bin_attribute    **bin_attrs;
-};
+} __do_const;
+typedef struct attribute_group __no_const attribute_group_no_const;
 
 /**
  * Use these macros to make defining attributes easier. See include/linux/device.h
@@ -137,7 +139,8 @@ struct bin_attribute {
                         char *, loff_t, size_t);
        int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
                    struct vm_area_struct *vma);
-};
+} __do_const;
+typedef struct bin_attribute __no_const bin_attribute_no_const;
 
 /**
  *     sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
index 387fa7d05c982b758942f83395e328949324fc1a..3fcde6b8473bb37a374a72828200a719be623211 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
@@ -33,7 +34,7 @@ struct sysrq_key_op {
        char *help_msg;
        char *action_msg;
        int enable_mask;
-};
+} __do_const;
 
 #ifdef CONFIG_MAGIC_SYSRQ
 
index ff307b548ed3c91a0f1cd05e486789305cefb43f..f1a4468fd579f7e323395df341fba962cc4a9e61 100644 (file)
@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
 #error "no set_restore_sigmask() provided and default one won't work"
 #endif
 
+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
+
+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+       __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 7d66ae508e5cc08c0ba8f91b09e55224a565f04d..03271496f975189b63e9a94dfcbe9491caf2cfbf 100644 (file)
@@ -202,7 +202,7 @@ struct tty_port {
        const struct tty_port_operations *ops;  /* Port operations */
        spinlock_t              lock;           /* Lock protecting tty field */
        int                     blocked_open;   /* Waiting to open */
-       int                     count;          /* Usage count */
+       atomic_t                count;          /* Usage count */
        wait_queue_head_t       open_wait;      /* Open waiters */
        wait_queue_head_t       close_wait;     /* Close waiters */
        wait_queue_head_t       delta_msr_wait; /* Modem status change */
@@ -290,7 +290,7 @@ struct tty_struct {
        /* If the tty has a pending do_SAK, queue it here - akpm */
        struct work_struct SAK_work;
        struct tty_port *port;
-};
+} __randomize_layout;
 
 /* Each of a tty's open files has private_data pointing to tty_file_private */
 struct tty_file_private {
@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
                                struct tty_struct *tty, struct file *filp);
 static inline int tty_port_users(struct tty_port *port)
 {
-       return port->count + port->blocked_open;
+       return atomic_read(&port->count) + port->blocked_open;
 }
 
 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
index 92e337c18839b0df2e9c213fd0604983dfcdaefd..f46757b912913047a664f5ec9264c608eb85c800 100644 (file)
@@ -291,7 +291,7 @@ struct tty_operations {
        void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
 #endif
        const struct file_operations *proc_fops;
-};
+} __do_const __randomize_layout;
 
 struct tty_driver {
        int     magic;          /* magic number for this structure */
@@ -325,7 +325,7 @@ struct tty_driver {
 
        const struct tty_operations *ops;
        struct list_head tty_drivers;
-};
+} __randomize_layout;
 
 extern struct list_head tty_drivers;
 
index 00c9d688d7b7189cc4e15c266f29296cb3204145..bc0188b5fae705a2b07e8f56149992308a9e4534 100644 (file)
@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
 
        struct  module *owner;
 
-       int refcount;
+       atomic_t refcount;
 };
 
 struct tty_ldisc {
index a0bb7048687f565ea959ae21fde70d2cec8282ce..f511c773c2f4948d6cfbc885e21ab69c7e091bc3 100644 (file)
@@ -177,10 +177,26 @@ typedef struct {
        int counter;
 } atomic_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+       int counter;
+} atomic_unchecked_t;
+#else
+typedef atomic_t atomic_unchecked_t;
+#endif
+
 #ifdef CONFIG_64BIT
 typedef struct {
        long counter;
 } atomic64_t;
+
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+       long counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
 #endif
 
 struct list_head {
index ecd3319dac33140a21a7c8fd89a2c95c18ce9e42..8a36ded2a07650e3a83747c7354a76f418a1c823 100644 (file)
@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
                long ret;                               \
                mm_segment_t old_fs = get_fs();         \
                                                        \
-               set_fs(KERNEL_DS);                      \
                pagefault_disable();                    \
-               ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));            \
-               pagefault_enable();                     \
+               set_fs(KERNEL_DS);                      \
+               ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval));              \
                set_fs(old_fs);                         \
+               pagefault_enable();                     \
                ret;                                    \
        })
 
index 2d1f9b627f91971f2d34be6d3c375a6362c1e751..d7a9fcec7ad7c428864dedaddd4cceb68055cb3d 100644 (file)
@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
 
 #endif /* CONFIG_USER_NS */
 
+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
+
 #endif /* _LINUX_UIDGID_H */
index 32c0e83d62397355e1b6bb804c1d0381f56daca1..671eb3518ab3741d6c95c1fb5be83b4225e8e98a 100644 (file)
@@ -67,7 +67,7 @@ struct uio_device {
         struct module           *owner;
         struct device           *dev;
         int                     minor;
-        atomic_t                event;
+        atomic_unchecked_t      event;
         struct fasync_struct    *async_queue;
         wait_queue_head_t       wait;
         struct uio_info         *info;
index 99c1b4d20b0ff22012d4b59b53b3e9f9be6b4236..562e6f3a748e676fd80729cc9062edcaf9f61331 100644 (file)
@@ -4,34 +4,34 @@
 #include <linux/kernel.h>
 #include <asm/byteorder.h>
 
-static inline u16 get_unaligned_le16(const void *p)
+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
 {
-       return le16_to_cpup((__le16 *)p);
+       return le16_to_cpup((const __le16 *)p);
 }
 
-static inline u32 get_unaligned_le32(const void *p)
+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
 {
-       return le32_to_cpup((__le32 *)p);
+       return le32_to_cpup((const __le32 *)p);
 }
 
-static inline u64 get_unaligned_le64(const void *p)
+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
 {
-       return le64_to_cpup((__le64 *)p);
+       return le64_to_cpup((const __le64 *)p);
 }
 
-static inline u16 get_unaligned_be16(const void *p)
+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
 {
-       return be16_to_cpup((__be16 *)p);
+       return be16_to_cpup((const __be16 *)p);
 }
 
-static inline u32 get_unaligned_be32(const void *p)
+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
 {
-       return be32_to_cpup((__be32 *)p);
+       return be32_to_cpup((const __be32 *)p);
 }
 
-static inline u64 get_unaligned_be64(const void *p)
+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
 {
-       return be64_to_cpup((__be64 *)p);
+       return be64_to_cpup((const __be64 *)p);
 }
 
 static inline void put_unaligned_le16(u16 val, void *p)
index 058a7698d7e36945ee0ed1e9607e6cf027127ba2..c17a1c2c88530c27eabbec17d2dcfe57f0362ed4 100644 (file)
@@ -566,7 +566,7 @@ struct usb_device {
        int maxchild;
 
        u32 quirks;
-       atomic_t urbnum;
+       atomic_unchecked_t urbnum;
 
        unsigned long active_duration;
 
@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
 
 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
        __u8 request, __u8 requesttype, __u16 value, __u16 index,
-       void *data, __u16 size, int timeout);
+       void *data, __u16 size, int timeout) __intentional_overflow(-1);
 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
        void *data, int len, int *actual_length, int timeout);
 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
index 9fd9e481ea982e7ac08d4d0e5243e2239a7b37a1..e2c5f356049888fe1388c5567d9cdcf24e5a1164 100644 (file)
@@ -39,7 +39,7 @@ enum {
  */
 struct renesas_usbhs_driver_callback {
        int (*notify_hotplug)(struct platform_device *pdev);
-};
+} __no_const;
 
 /*
  * callback functions for platform
index 8297e5b341d863aa627a3b44a70d025bc8bc7c30..0dfae27ff48bc1da851572210099ddbd83557d23 100644 (file)
@@ -39,7 +39,7 @@ struct user_namespace {
        struct key              *persistent_keyring_register;
        struct rw_semaphore     persistent_keyring_register_sem;
 #endif
-};
+} __randomize_layout;
 
 extern struct user_namespace init_user_ns;
 
index 5093f58ae192c379b13b9e5e74605a0e6c571d25..c103e58bcc9dfd2f910ac8d34ffc1cce89256eab 100644 (file)
@@ -25,7 +25,7 @@ struct uts_namespace {
        struct new_utsname name;
        struct user_namespace *user_ns;
        struct ns_common ns;
-};
+} __randomize_layout;
 extern struct uts_namespace init_uts_ns;
 
 #ifdef CONFIG_UTS_NS
index 6f8fbcf10dfb8ac6dd3ead787c5e4cf27e142c42..4efc177ca5109db133f531a090cb5e2903d3af57 100644 (file)
 #define MODULE_ARCH_VERMAGIC ""
 #endif
 
+#ifdef CONFIG_PAX_REFCOUNT
+#define MODULE_PAX_REFCOUNT "REFCOUNT "
+#else
+#define MODULE_PAX_REFCOUNT ""
+#endif
+
+#ifdef CONSTIFY_PLUGIN
+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
+#else
+#define MODULE_CONSTIFY_PLUGIN ""
+#endif
+
+#ifdef STACKLEAK_PLUGIN
+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
+#else
+#define MODULE_STACKLEAK_PLUGIN ""
+#endif
+
+#ifdef RANDSTRUCT_PLUGIN
+#include <generated/randomize_layout_hash.h>
+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
+#else
+#define MODULE_RANDSTRUCT_PLUGIN
+#endif
+
+#ifdef CONFIG_GRKERNSEC
+#define MODULE_GRSEC "GRSEC "
+#else
+#define MODULE_GRSEC ""
+#endif
+
 #define VERMAGIC_STRING                                                \
        UTS_RELEASE " "                                                 \
        MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
        MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
-       MODULE_ARCH_VERMAGIC
+       MODULE_ARCH_VERMAGIC                                            \
+       MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
+       MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
 
index b483abd344934f9cb9827d112883bfdf8e861d2b..af305ade59329243335fa6ed8c210a17a7498421 100644 (file)
@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
 
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
 
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
 #else
 
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
 
 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
 
-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
 
 #endif
 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
index b87696fdf06ab14d14ff01f5cfd7b2650f1e6c96..1d11de76781b69fe02d1a7ede935b93fe4bd5682 100644 (file)
@@ -16,6 +16,11 @@ struct vm_area_struct;               /* vma defining user mapping in mm_types.h */
 #define VM_USERMAP             0x00000008      /* suitable for remap_vmalloc_range */
 #define VM_VPAGES              0x00000010      /* buffer for pages was vmalloc'ed */
 #define VM_UNINITIALIZED       0x00000020      /* vm_struct is not fully initialized */
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+#define VM_KERNEXEC            0x00000040      /* allocate from executable kernel memory range */
+#endif
+
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
                        unsigned long flags, pgprot_t prot);
 extern void vunmap(const void *addr);
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+extern void unmap_process_stacks(struct task_struct *task);
+#endif
+
 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
                                       unsigned long uaddr, void *kaddr,
                                       unsigned long size);
@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
 
 /* for /dev/kmem */
 extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
 
 /*
  *     Internals.  Dont't use..
index 82e7db7f7100f9e141de4a90196bffa24814ecc8..f8ce3d0b0780b214a844c6ef61a309f7ee24f9c1 100644 (file)
@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
 /*
  * Zone based page accounting with per cpu differentials.
  */
-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 
 static inline void zone_page_state_add(long x, struct zone *zone,
                                 enum zone_stat_item item)
 {
-       atomic_long_add(x, &zone->vm_stat[item]);
-       atomic_long_add(x, &vm_stat[item]);
+       atomic_long_add_unchecked(x, &zone->vm_stat[item]);
+       atomic_long_add_unchecked(x, &vm_stat[item]);
 }
 
-static inline unsigned long global_page_state(enum zone_stat_item item)
+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
 {
-       long x = atomic_long_read(&vm_stat[item]);
+       long x = atomic_long_read_unchecked(&vm_stat[item]);
 #ifdef CONFIG_SMP
        if (x < 0)
                x = 0;
@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
        return x;
 }
 
-static inline unsigned long zone_page_state(struct zone *zone,
+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
                                        enum zone_stat_item item)
 {
-       long x = atomic_long_read(&zone->vm_stat[item]);
+       long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
 #ifdef CONFIG_SMP
        if (x < 0)
                x = 0;
@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
                                        enum zone_stat_item item)
 {
-       long x = atomic_long_read(&zone->vm_stat[item]);
+       long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
 
 #ifdef CONFIG_SMP
        int cpu;
@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
 
 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       atomic_long_inc(&zone->vm_stat[item]);
-       atomic_long_inc(&vm_stat[item]);
+       atomic_long_inc_unchecked(&zone->vm_stat[item]);
+       atomic_long_inc_unchecked(&vm_stat[item]);
 }
 
 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       atomic_long_dec(&zone->vm_stat[item]);
-       atomic_long_dec(&vm_stat[item]);
+       atomic_long_dec_unchecked(&zone->vm_stat[item]);
+       atomic_long_dec_unchecked(&vm_stat[item]);
 }
 
 static inline void __inc_zone_page_state(struct page *page,
index 91b0a68d38dc2a4941c1b36a8cac270af0136422..0e9adf68f66b3fef26810abfbd7edda44e419239 100644 (file)
@@ -28,7 +28,7 @@ struct xattr_handler {
                   size_t size, int handler_flags);
        int (*set)(struct dentry *dentry, const char *name, const void *buffer,
                   size_t size, int flags, int handler_flags);
-};
+} __do_const;
 
 struct xattr {
        const char *name;
@@ -37,6 +37,9 @@ struct xattr {
 };
 
 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t pax_getxattr(struct dentry *, void *, size_t);
+#endif
 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
index 92dbbd3f6c757178d66b0b0e3d9f05789a05e02e..13ab0b38936f6a861f7ca5b858834deb2e27fab4 100644 (file)
@@ -31,6 +31,7 @@
 #define _ZLIB_H
 
 #include <linux/zconf.h>
+#include <linux/compiler.h>
 
 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
 
                         /* basic functions */
 
-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
 /*
    Returns the number of bytes that needs to be allocated for a per-
    stream workspace with the specified parameters.  A pointer to this
index eb76cfd4718930fff0dab85a0c8f032e65e24b2d..9fd0e7c4c5040d4d3c17e84fe0a1e07d8b962841 100644 (file)
@@ -75,7 +75,7 @@ struct v4l2_file_operations {
        int (*mmap) (struct file *, struct vm_area_struct *);
        int (*open) (struct file *);
        int (*release) (struct file *);
-};
+} __do_const;
 
 /*
  * Newer version of video_device, handled by videodev2.c
index ffb69da3ce9ef85fc0271b89b9fc7a374c608580..040393eb6d16b31999155db49d3c67770958311b 100644 (file)
@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
    this function returns 0. If the name ends with a digit (e.g. cx18),
    then the name will be set to cx18-0 since cx180 looks really odd. */
 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-                                               atomic_t *instance);
+                                               atomic_unchecked_t *instance);
 
 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
    Since the parent disappears this ensures that v4l2_dev doesn't have an
index 2a25dec3021166d5aba52ad155e8ca01e0b1570e..bf6dd8a22c72e5b0e7d60a92f542e2ec2ae09527 100644 (file)
@@ -62,7 +62,7 @@ struct p9_trans_module {
        int (*cancelled)(struct p9_client *, struct p9_req_t *req);
        int (*zc_request)(struct p9_client *, struct p9_req_t *,
                          char *, char *, int , int, int, int);
-};
+} __do_const;
 
 void v9fs_register_trans(struct p9_trans_module *m);
 void v9fs_unregister_trans(struct p9_trans_module *m);
index a175ba4a7adbc615c350d8b8daaf7b082049f1ec..196eb82424329712e3b03aabe08e33418139cd4b 100644 (file)
@@ -36,7 +36,7 @@ struct unix_skb_parms {
        u32                     secid;          /* Security ID          */
 #endif
        u32                     consumed;
-};
+} __randomize_layout;
 
 #define UNIXCB(skb)    (*(struct unix_skb_parms *)&((skb)->cb))
 #define UNIXSID(skb)   (&UNIXCB((skb)).secid)
index d1bb342d083f14efc11ba4992e8c7509a67deac1..e12f7d2b327fe556b3c9a85b2aaea5c7534bf1c5 100644 (file)
@@ -608,7 +608,7 @@ struct l2cap_ops {
        struct sk_buff          *(*alloc_skb) (struct l2cap_chan *chan,
                                               unsigned long hdr_len,
                                               unsigned long len, int nb);
-};
+} __do_const;
 
 struct l2cap_conn {
        struct hci_conn         *hcon;
index 983a94b86b954c90548df20fe6f574efb6359c70..7aa9b1651818f807ea0513a44ea810627f1f7ef1 100644 (file)
@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
 
 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
 {
-       atomic_long_inc(&dev->tx_dropped);
+       atomic_long_inc_unchecked(&dev->tx_dropped);
        dev_kfree_skb_any(skb);
 }
 
index f2ae33d23baf44692ef694ad24c978cbdbe3f245..c457cf08bc76f5b0548b57625972ee506650e1bc 100644 (file)
@@ -52,7 +52,7 @@ struct cfctrl_rsp {
        void (*radioset_rsp)(void);
        void (*reject_rsp)(struct cflayer *layer, u8 linkid,
                                struct cflayer *client_layer);
-};
+} __no_const;
 
 /* Link Setup Parameters for CAIF-Links. */
 struct cfctrl_link_param {
@@ -101,8 +101,8 @@ struct cfctrl_request_info {
 struct cfctrl {
        struct cfsrvl serv;
        struct cfctrl_rsp res;
-       atomic_t req_seq_no;
-       atomic_t rsp_seq_no;
+       atomic_unchecked_t req_seq_no;
+       atomic_unchecked_t rsp_seq_no;
        struct list_head list;
        /* Protects from simultaneous access to first_req list */
        spinlock_t info_list_lock;
index 8109a159d1b3ba5ced3aa6d2bc10b9f01d274520..504466d3adf4fc21e0e73b2af00c04695da119bd 100644 (file)
@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
 
 void flow_cache_flush(struct net *net);
 void flow_cache_flush_deferred(struct net *net);
-extern atomic_t flow_cache_genid;
+extern atomic_unchecked_t flow_cache_genid;
 
 #endif
index 6c92415311cacb3ee39ff768edc3d2a4461e922f..3a352d8473402b3b68fd06332aa8ea4113fa9d7e 100644 (file)
@@ -130,7 +130,7 @@ struct genl_ops {
        u8                      cmd;
        u8                      internal_flags;
        u8                      flags;
-};
+} __do_const;
 
 int __genl_register_family(struct genl_family *family);
 
index 734d9b5f577a8036594251ced6e1af265fbc1c6a..48a9a4b34cf4840c49ab121948dfe1a1f33d4fe1 100644 (file)
@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
                cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
 
        if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
-               atomic_long_inc(&dev->rx_dropped);
+               atomic_long_inc_unchecked(&dev->rx_dropped);
                kfree_skb(skb);
                return;
        }
index 848e85cb5c6128ecfe101e386657ce355f507b5c..051c7de34814285a3827cd8ecff2fef261a91fc9 100644 (file)
@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
        int         (*bind_conflict)(const struct sock *sk,
                                     const struct inet_bind_bucket *tb, bool relax);
        void        (*mtu_reduced)(struct sock *sk);
-};
+} __do_const;
 
 /** inet_connection_sock - INET connection oriented sock
  *
index 80479abddf73cc181899e1115a572727f578633d..0c3f647bfdd57beec30f97f1bc8064c0279a71d8 100644 (file)
@@ -47,7 +47,7 @@ struct inet_peer {
         */
        union {
                struct {
-                       atomic_t                        rid;            /* Frag reception counter */
+                       atomic_unchecked_t              rid;            /* Frag reception counter */
                };
                struct rcu_head         rcu;
                struct inet_peer        *gc_next;
index 09cf5aebb28368fbb93c974f14a78c3fa9a6f408..ab62fcf2cd7c865f10475721d603257f0b083493 100644 (file)
@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
        }
 }
 
-u32 ip_idents_reserve(u32 hash, int segs);
+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
index 09a819ee21519b36a3a841c3383267203444526b..3ab9e145053113b30a865b96e6cca41cb5f43497 100644 (file)
@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 
 #define FIB_RES_SADDR(net, res)                                \
        ((FIB_RES_NH(res).nh_saddr_genid ==             \
-         atomic_read(&(net)->ipv4.dev_addr_genid)) ?   \
+         atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
         FIB_RES_NH(res).nh_saddr :                     \
         fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
 #define FIB_RES_GW(res)                        (FIB_RES_NH(res).nh_gw)
index 615b20b585452111a25085890d8fa875657dbe76..fd4cbd8b04dd2c91061f9ae6595691940a815dd7 100644 (file)
@@ -534,7 +534,7 @@ struct ip_vs_conn {
        struct ip_vs_conn       *control;       /* Master control connection */
        atomic_t                n_control;      /* Number of controlled ones */
        struct ip_vs_dest       *dest;          /* real server */
-       atomic_t                in_pkts;        /* incoming packet counter */
+       atomic_unchecked_t      in_pkts;        /* incoming packet counter */
 
        /* Packet transmitter for different forwarding methods.  If it
         * mangles the packet, it must return NF_DROP or better NF_STOLEN,
@@ -682,7 +682,7 @@ struct ip_vs_dest {
        __be16                  port;           /* port number of the server */
        union nf_inet_addr      addr;           /* IP address of the server */
        volatile unsigned int   flags;          /* dest status flags */
-       atomic_t                conn_flags;     /* flags to copy to conn */
+       atomic_unchecked_t      conn_flags;     /* flags to copy to conn */
        atomic_t                weight;         /* server weight */
 
        atomic_t                refcnt;         /* reference counter */
@@ -928,11 +928,11 @@ struct netns_ipvs {
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
        struct ctl_table_header *lblc_ctl_header;
-       struct ctl_table        *lblc_ctl_table;
+       ctl_table_no_const      *lblc_ctl_table;
        /* ip_vs_lblcr */
        int                     sysctl_lblcr_expiration;
        struct ctl_table_header *lblcr_ctl_header;
-       struct ctl_table        *lblcr_ctl_table;
+       ctl_table_no_const      *lblcr_ctl_table;
        /* ip_vs_est */
        struct list_head        est_list;       /* estimator list */
        spinlock_t              est_lock;
index 8d4f588974bce7943659779b22386230a780892a..2e37ad21520ab73f6ba62f8e94e0cf4207fdf07f 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/termios.h>
 #include <linux/timer.h>
 #include <linux/tty.h>         /* struct tty_struct */
+#include <asm/local.h>
 
 #include <net/irda/irias_object.h>
 #include <net/irda/ircomm_core.h>
index 714cc9a54a4c38575f15581cb379a20f6615b3e2..ea05f3e9a0ea61e322327a031832579302f36f0c 100644 (file)
@@ -149,7 +149,7 @@ struct iucv_skb_cb {
 struct iucv_sock_list {
        struct hlist_head head;
        rwlock_t          lock;
-       atomic_t          autobind_name;
+       atomic_unchecked_t autobind_name;
 };
 
 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
index f3be818e73c1fbe1034d63bf6f2d132848060908..bf461964837bfe1fcce59af8273ffefa7233c0b9 100644 (file)
@@ -87,7 +87,7 @@
 #define LLC_CONN_AC_STOP_SENDACK_TMR                   70
 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING   71
 
-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
 
 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
index 3948cf111dd0d6d4948ed3b7c40740b82bfe41bc..83b28c48c523b00617d1b68b6a0ad0b0474252fd 100644 (file)
@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
        return (struct llc_conn_state_ev *)skb->cb;
 }
 
-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
 
 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
index 48f3f891b2f96157b47a8e4bee054f9558c2aef3..0e92c50d32fb228fae5a6d776f21c9bab9b14946 100644 (file)
@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
        u8                 next_state;
        const llc_conn_ev_qfyr_t *ev_qualifiers;
        const llc_conn_action_t  *ev_actions;
-};
+} __do_const;
 
 struct llc_conn_state {
        u8                          current_state;
index a61b98c108ee2c4df241a7d2318249427b686951..aade1eb047e111953159e2d54a4636ed00874963 100644 (file)
@@ -23,7 +23,7 @@
 #define SAP_ACT_TEST_IND       9
 
 /* All action functions must look like this */
-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
 
 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
index c4359e203013c097837f06ca826f3b887c21bfed..76dbc4a281f9fa4d50c074725eaf9d4a3fd688ed 100644 (file)
@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
        llc_sap_ev_t      ev;
        u8                next_state;
        const llc_sap_action_t *ev_actions;
-};
+} __do_const;
 
 struct llc_sap_state {
        u8                         curr_state;
index 29c7be8808d52b21e0c41949ce62d816d8b58372..746bd7323d9a70ade93a598b6ba734b57ce2cd26 100644 (file)
@@ -4869,7 +4869,7 @@ struct rate_control_ops {
        void (*remove_sta_debugfs)(void *priv, void *priv_sta);
 
        u32 (*get_expected_throughput)(void *priv_sta);
-};
+} __do_const;
 
 static inline int rate_supported(struct ieee80211_sta *sta,
                                 enum ieee80211_band band,
index 76f708486aaec76031a24ee5ff1d02f126185304..8f36e394361d5fcf298bd0fe6ccb34ffe28df31d 100644 (file)
@@ -163,7 +163,7 @@ struct neigh_ops {
        void                    (*error_report)(struct neighbour *, struct sk_buff *);
        int                     (*output)(struct neighbour *, struct sk_buff *);
        int                     (*connected_output)(struct neighbour *, struct sk_buff *);
-};
+} __do_const;
 
 struct pneigh_entry {
        struct pneigh_entry     *next;
@@ -217,7 +217,7 @@ struct neigh_table {
        struct neigh_statistics __percpu *stats;
        struct neigh_hash_table __rcu *nht;
        struct pneigh_entry     **phash_buckets;
-};
+} __randomize_layout;
 
 enum {
        NEIGH_ARP_TABLE = 0,
index 2e8756b8c77543391b1c3381fa442c2a2a1c582e..0bd00839ea5ed4ba6171b12427e7459db6f81989 100644 (file)
@@ -130,8 +130,8 @@ struct net {
        struct netns_ipvs       *ipvs;
 #endif
        struct sock             *diag_nlsk;
-       atomic_t                fnhe_genid;
-};
+       atomic_unchecked_t      fnhe_genid;
+} __randomize_layout;
 
 #include <linux/seq_file_net.h>
 
@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
 #define __net_init     __init
 #define __net_exit     __exit_refok
 #define __net_initdata __initdata
+#ifdef CONSTIFY_PLUGIN
 #define __net_initconst        __initconst
+#else
+#define __net_initconst        __initdata
+#endif
 #endif
 
 struct pernet_operations {
@@ -297,7 +301,7 @@ struct pernet_operations {
        void (*exit_batch)(struct list_head *net_exit_list);
        int *id;
        size_t size;
-};
+} __do_const;
 
 /*
  * Use these carefully.  If you implement a network device and it
@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
 
 static inline int rt_genid_ipv4(struct net *net)
 {
-       return atomic_read(&net->ipv4.rt_genid);
+       return atomic_read_unchecked(&net->ipv4.rt_genid);
 }
 
 static inline void rt_genid_bump_ipv4(struct net *net)
 {
-       atomic_inc(&net->ipv4.rt_genid);
+       atomic_inc_unchecked(&net->ipv4.rt_genid);
 }
 
 extern void (*__fib6_flush_trees)(struct net *net);
@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
 
 static inline int fnhe_genid(struct net *net)
 {
-       return atomic_read(&net->fnhe_genid);
+       return atomic_read_unchecked(&net->fnhe_genid);
 }
 
 static inline void fnhe_genid_bump(struct net *net)
 {
-       atomic_inc(&net->fnhe_genid);
+       atomic_inc_unchecked(&net->fnhe_genid);
 }
 
 #endif /* __NET_NET_NAMESPACE_H */
index 64158353ecb2750a3165dc07e915755ccb801522..ab96d87c98f00553a048fe66b5f4ed2c373458f9 100644 (file)
@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
 {
        if (mark)
-               skb_trim(skb, (unsigned char *) mark - skb->data);
+               skb_trim(skb, (const unsigned char *) mark - skb->data);
 }
 
 /**
index 29d6a94db54d6136b6b380d5817341d2bbe83bce..235d3d841cc7966cd33083f69f640f958c2aaf60 100644 (file)
@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
 struct nf_proto_net {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *ctl_table_header;
-       struct ctl_table        *ctl_table;
+       ctl_table_no_const      *ctl_table;
 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
        struct ctl_table_header *ctl_compat_header;
-       struct ctl_table        *ctl_compat_table;
+       ctl_table_no_const      *ctl_compat_table;
 #endif
 #endif
        unsigned int            users;
@@ -60,7 +60,7 @@ struct nf_ip_net {
        struct nf_icmp_net      icmpv6;
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
        struct ctl_table_header *ctl_table_header;
-       struct ctl_table        *ctl_table;
+       ctl_table_no_const      *ctl_table;
 #endif
 };
 
index 0ffef1a38efcc2f75e78dab09aa5e111f5b8b72f..2ce1ceba76714a403a9eefb83340a80b393702fc 100644 (file)
@@ -84,7 +84,7 @@ struct netns_ipv4 {
 
        struct ping_group_range ping_group_range;
 
-       atomic_t dev_addr_genid;
+       atomic_unchecked_t dev_addr_genid;
 
 #ifdef CONFIG_SYSCTL
        unsigned long *sysctl_local_reserved_ports;
@@ -98,6 +98,6 @@ struct netns_ipv4 {
        struct fib_rules_ops    *mr_rules_ops;
 #endif
 #endif
-       atomic_t        rt_genid;
+       atomic_unchecked_t      rt_genid;
 };
 #endif
index 69ae41f2098c159548a9e6ee6b432eabfa369785..4f948680c63ce690a2e8eb43cd1dd071d9ec7cff 100644 (file)
@@ -75,8 +75,8 @@ struct netns_ipv6 {
        struct fib_rules_ops    *mr6_rules_ops;
 #endif
 #endif
-       atomic_t                dev_addr_genid;
-       atomic_t                fib6_sernum;
+       atomic_unchecked_t      dev_addr_genid;
+       atomic_unchecked_t      fib6_sernum;
 };
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
index 730d82ad6ee521beddc3af344d8e99a339f74dd5..045f2c4449ea452813c74e09a87da0a9ff940569 100644 (file)
@@ -78,7 +78,7 @@ struct netns_xfrm {
 
        /* flow cache part */
        struct flow_cache       flow_cache_global;
-       atomic_t                flow_cache_genid;
+       atomic_unchecked_t      flow_cache_genid;
        struct list_head        flow_cache_gc_list;
        spinlock_t              flow_cache_gc_lock;
        struct work_struct      flow_cache_gc_work;
index f074060bc5de763a3488054db67a67e7a0a9ab00..830fba0920e8a87222be7d1b3fc27c78a21686ab 100644 (file)
@@ -54,7 +54,7 @@ struct ping_iter_state {
 
 extern struct proto ping_prot;
 #if IS_ENABLED(CONFIG_IPV6)
-extern struct pingv6_ops pingv6_ops;
+extern struct pingv6_ops *pingv6_ops;
 #endif
 
 struct pingfakehdr {
index d6fcc1fcdb5b0928a0bd89279e11819a9cc3169d..ca277058c3ecf1b3efe3e96679b9243b398d89ec 100644 (file)
@@ -49,7 +49,7 @@ struct net_protocol {
                                 * socket lookup?
                                 */
                                icmp_strict_tag_validation:1;
-};
+} __do_const;
 
 #if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
@@ -62,7 +62,7 @@ struct inet6_protocol {
                               u8 type, u8 code, int offset,
                               __be32 info);
        unsigned int    flags;  /* INET6_PROTO_xxx */
-};
+} __do_const;
 
 #define INET6_PROTO_NOPOLICY   0x1
 #define INET6_PROTO_FINAL      0x2
index e21b9f9653c011fe11634e2e5e745d4e5d7a9bf9..0191ef0445389a58f343cbb8a41cfc5dd67856d3 100644 (file)
@@ -93,7 +93,7 @@ struct rtnl_link_ops {
        int                     (*fill_slave_info)(struct sk_buff *skb,
                                                   const struct net_device *dev,
                                                   const struct net_device *slave_dev);
-};
+} __do_const;
 
 int __rtnl_link_register(struct rtnl_link_ops *ops);
 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
index 4a5b9a306c69b4139c8811138204410bb791e79b..ca27d7364e084a4bb3c38ec93a6fec3b947c0369 100644 (file)
@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
        struct sctphdr *sh = sctp_hdr(skb);
-        __le32 ret, old = sh->checksum;
-       const struct skb_checksum_ops ops = {
+       __le32 ret, old = sh->checksum;
+       static const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
        };
index 487ef34bbd63ff1cfe511c7ee8b1501593a14de3..d457f98e548256cc7f87597854e9b29207a6b9a6 100644 (file)
@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
 typedef struct {
        sctp_state_fn_t *fn;
        const char *name;
-} sctp_sm_table_entry_t;
+} __do_const sctp_sm_table_entry_t;
 
 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
  * currently in use.
@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
 
 /* Extern declarations for major data structures.  */
-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
 
 
 /* Get the size of a DATA chunk payload. */
index 2bb2fcf5b11f0387c81b860ad2d3a6607da19a7d..d17c2918ce9b944c1b398949e3c3b1348170b39e 100644 (file)
@@ -509,7 +509,7 @@ struct sctp_pf {
        void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
        void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
        struct sctp_af *af;
-};
+} __do_const;
 
 
 /* Structure to track chunk fragments that have been acked, but peer
index 2210fec65669c870384fe87f1d536eca34ab3586..2249ad014115e6eaeb6a5833eda074c8df6669e3 100644 (file)
@@ -362,7 +362,7 @@ struct sock {
        unsigned int            sk_napi_id;
        unsigned int            sk_ll_usec;
 #endif
-       atomic_t                sk_drops;
+       atomic_unchecked_t      sk_drops;
        int                     sk_rcvbuf;
 
        struct sk_filter __rcu  *sk_filter;
@@ -1061,7 +1061,7 @@ struct proto {
        void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
        struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
-};
+} __randomize_layout;
 
 /*
  * Bits in struct cg_proto.flags
@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
        page_counter_uncharge(&prot->memory_allocated, amt);
 }
 
-static inline long
+static inline long __intentional_overflow(-1)
 sk_memory_allocated(const struct sock *sk)
 {
        struct proto *prot = sk->sk_prot;
@@ -1385,7 +1385,7 @@ struct sock_iocb {
        struct scm_cookie       *scm;
        struct msghdr           *msg, async_msg;
        struct kiocb            *kiocb;
-};
+} __randomize_layout;
 
 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
 {
@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 }
 
 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
-                                          char __user *from, char *to,
+                                          char __user *from, unsigned char *to,
                                           int copy, int offset)
 {
        if (skb->ip_summed == CHECKSUM_NONE) {
@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
        }
 }
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
 
 /**
  * sk_page_frag - return an appropriate page_frag
index 9d9111ef43ae305ad60e11c4b848b2b5f0a7eec3..349c84738eb446c5086a5cb7d892f7c42391c87e 100644 (file)
@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
 void tcp_xmit_retransmit_queue(struct sock *);
 void tcp_simple_retransmit(struct sock *);
 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
 
 void tcp_send_probe0(struct sock *);
 void tcp_send_partial(struct sock *);
@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
  */
 struct tcp_skb_cb {
-       __u32           seq;            /* Starting sequence number     */
-       __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
+       __u32           seq __intentional_overflow(0);  /* Starting sequence number     */
+       __u32           end_seq __intentional_overflow(0);      /* SEQ + FIN + SYN + datalen    */
        union {
                /* Note : tcp_tw_isn is used in input path only
                 *        (isn chosen by tcp_timewait_state_process())
@@ -715,7 +715,7 @@ struct tcp_skb_cb {
 
        __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
        /* 1 byte hole */
-       __u32           ack_seq;        /* Sequence number ACK'd        */
+       __u32           ack_seq __intentional_overflow(0);      /* Sequence number ACK'd        */
        union {
                struct inet_skb_parm    h4;
 #if IS_ENABLED(CONFIG_IPV6)
index dc4865e90fe489c4859659b3a937e7c1f8a7a4bb..152ee4c0317d53c104107171f5e742f951c32cac 100644 (file)
@@ -285,7 +285,6 @@ struct xfrm_dst;
 struct xfrm_policy_afinfo {
        unsigned short          family;
        struct dst_ops          *dst_ops;
-       void                    (*garbage_collect)(struct net *net);
        struct dst_entry        *(*dst_lookup)(struct net *net, int tos,
                                               const xfrm_address_t *saddr,
                                               const xfrm_address_t *daddr);
@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
                                            struct net_device *dev,
                                            const struct flowi *fl);
        struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
-};
+} __do_const;
 
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
        int                     (*transport_finish)(struct sk_buff *skb,
                                                    int async);
        void                    (*local_error)(struct sk_buff *skb, u32 mtu);
-};
+} __do_const;
 
 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -437,7 +436,7 @@ struct xfrm_mode {
        struct module *owner;
        unsigned int encap;
        int flags;
-};
+} __do_const;
 
 /* Flags for xfrm_mode. */
 enum {
@@ -534,7 +533,7 @@ struct xfrm_policy {
        struct timer_list       timer;
 
        struct flow_cache_object flo;
-       atomic_t                genid;
+       atomic_unchecked_t      genid;
        u32                     priority;
        u32                     index;
        struct xfrm_mark        mark;
@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
 }
 
 void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect_deferred(struct net *net);
 
 #else
 
@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
 static inline void xfrm_garbage_collect(struct net *net)
 {
 }
+static inline void xfrm_garbage_collect_deferred(struct net *net)
+{
+}
 #endif
 
 static __inline__
index 1017e0bdf8baa75beb5ce0a13f852ddd7c683c4d..227aa4dcd28ce1151923ad356357b83fc8ea401f 100644 (file)
@@ -122,7 +122,7 @@ struct iw_cm_verbs {
                                         int backlog);
 
        int             (*destroy_listen)(struct iw_cm_id *cm_id);
-};
+} __no_const;
 
 /**
  * iw_create_cm_id - Create an IW CM identifier.
index 93d14daf0994725822461a8dbe36e97d52f56e51..734b3d83502dcd1cc7d185288138ffec2b586d20 100644 (file)
@@ -771,6 +771,7 @@ struct libfc_function_template {
         */
        void (*disc_stop_final) (struct fc_lport *);
 };
+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
 
 /**
  * struct fc_disc - Discovery context
@@ -875,7 +876,7 @@ struct fc_lport {
        struct fc_vport                *vport;
 
        /* Operational Information */
-       struct libfc_function_template tt;
+       libfc_function_template_no_const tt;
        u8                             link_up;
        u8                             qfull;
        enum fc_lport_state            state;
index 3a4edd1f7dbb15db835dd1b787dd30a0a29eae49..feb2e3eb61655d87b6e6385589a94fc24f1f7c34 100644 (file)
@@ -185,9 +185,9 @@ struct scsi_device {
        unsigned int max_device_blocked; /* what device_blocked counts down from  */
 #define SCSI_DEFAULT_DEVICE_BLOCKED    3
 
-       atomic_t iorequest_cnt;
-       atomic_t iodone_cnt;
-       atomic_t ioerr_cnt;
+       atomic_unchecked_t iorequest_cnt;
+       atomic_unchecked_t iodone_cnt;
+       atomic_unchecked_t ioerr_cnt;
 
        struct device           sdev_gendev,
                                sdev_dev;
index 007a0bc01b74a0a8779d759db6c091fa05f42404..7188db8655760c9741124e1ce1a7acda1e769207 100644 (file)
@@ -756,7 +756,8 @@ struct fc_function_template {
        unsigned long   show_host_system_hostname:1;
 
        unsigned long   disable_target_scan:1;
-};
+} __do_const;
+typedef struct fc_function_template __no_const fc_function_template_no_const;
 
 
 /**
index 396e8f73670a59a2eea321867d36a6b3d0a22b2d..b037e8946329d25fe4b55f65858ee45f9db036e8 100644 (file)
@@ -129,7 +129,7 @@ struct snd_compr_ops {
                        struct snd_compr_caps *caps);
        int (*get_codec_caps) (struct snd_compr_stream *stream,
                        struct snd_compr_codec_caps *codec);
-};
+} __no_const;
 
 /**
  * struct snd_compr: Compressed device
index ac8b333acb4dd721596db8ddf9bf7fcbc7ab6c1d..59c36922bef3450b0fc2f34cc3d1241ac4fb1fd6 100644 (file)
@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
                             enum snd_soc_dapm_type, int);
 
        bool ignore_pmdown_time;  /* Doesn't benefit from pmdown delay */
-};
+} __do_const;
 
 /* SoC platform interface */
 struct snd_soc_platform_driver {
@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
        const struct snd_compr_ops *compr_ops;
 
        int (*bespoke_trigger)(struct snd_pcm_substream *, int);
-};
+} __do_const;
 
 struct snd_soc_dai_link_component {
        const char *name;
index 672150b6aaf52bc24c640d16f1f7c841e0be655d..9d4bec40c791fa0446708c966d13075c871d7a67 100644 (file)
@@ -767,7 +767,7 @@ struct se_device {
        atomic_long_t           write_bytes;
        /* Active commands on this virtual SE device */
        atomic_t                simple_cmds;
-       atomic_t                dev_ordered_id;
+       atomic_unchecked_t      dev_ordered_id;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        int                     export_count;
index 3608bebd3d9c5e58a0349c6240a8670bd3d8bbfb..df39d8a56694da176c845c4f3c1a2916bf6a4702 100644 (file)
@@ -36,7 +36,7 @@ struct softirq_action;
  */
 TRACE_EVENT(irq_handler_entry,
 
-       TP_PROTO(int irq, struct irqaction *action),
+       TP_PROTO(int irq, const struct irqaction *action),
 
        TP_ARGS(irq, action),
 
@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
  */
 TRACE_EVENT(irq_handler_exit,
 
-       TP_PROTO(int irq, struct irqaction *action, int ret),
+       TP_PROTO(int irq, const struct irqaction *action, int ret),
 
        TP_ARGS(irq, action, ret),
 
index 7caf44c7fa51bcdec2b7db2efb7439beede648dc..23c6f274a4862c06076829edc2efa10379f7ad87 100644 (file)
@@ -39,6 +39,14 @@ enum machine_type {
   M_MIPS2 = 152                /* MIPS R6000/R4000 binary */
 };
 
+/* Constants for the N_FLAGS field */
+#define F_PAX_PAGEEXEC 1       /* Paging based non-executable pages */
+#define F_PAX_EMUTRAMP 2       /* Emulate trampolines */
+#define F_PAX_MPROTECT 4       /* Restrict mprotect() */
+#define F_PAX_RANDMMAP 8       /* Randomize mmap() base */
+/*#define F_PAX_RANDEXEC       16*/    /* Randomize ET_EXEC base */
+#define F_PAX_SEGMEXEC 32      /* Segmentation based non-executable pages */
+
 #if !defined (N_MAGIC)
 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
 #endif
index 22b6ad31c706dae59544286faea5808d7b303342..aeba37ee62e873ed7ee1ec42023c49e87d6836f1 100644 (file)
@@ -5,6 +5,7 @@
  * Bcache on disk data structures
  */
 
+#include <linux/compiler.h>
 #include <asm/types.h>
 
 #define BITMASK(name, type, field, offset, size)               \
@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v)                       \
 /* Btree keys - all units are in sectors */
 
 struct bkey {
-       __u64   high;
-       __u64   low;
+       __u64   high __intentional_overflow(-1);
+       __u64   low __intentional_overflow(-1);
        __u64   ptr[];
 };
 
index d876736a0017de05cf0ed1a8a0bb05c355f4f0ca..ccce5c0dd9f7ff78ad4dc3f34f4545148da01661 100644 (file)
 
 static inline __le64 __cpu_to_le64p(const __u64 *p)
 {
-       return (__force __le64)*p;
+       return (__force const __le64)*p;
 }
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
 {
-       return (__force __u64)*p;
+       return (__force const __u64)*p;
 }
 static inline __le32 __cpu_to_le32p(const __u32 *p)
 {
-       return (__force __le32)*p;
+       return (__force const __le32)*p;
 }
 static inline __u32 __le32_to_cpup(const __le32 *p)
 {
-       return (__force __u32)*p;
+       return (__force const __u32)*p;
 }
 static inline __le16 __cpu_to_le16p(const __u16 *p)
 {
-       return (__force __le16)*p;
+       return (__force const __le16)*p;
 }
 static inline __u16 __le16_to_cpup(const __le16 *p)
 {
-       return (__force __u16)*p;
+       return (__force const __u16)*p;
 }
 static inline __be64 __cpu_to_be64p(const __u64 *p)
 {
-       return (__force __be64)__swab64p(p);
+       return (__force const __be64)__swab64p(p);
 }
 static inline __u64 __be64_to_cpup(const __be64 *p)
 {
-       return __swab64p((__u64 *)p);
+       return __swab64p((const __u64 *)p);
 }
 static inline __be32 __cpu_to_be32p(const __u32 *p)
 {
-       return (__force __be32)__swab32p(p);
+       return (__force const __be32)__swab32p(p);
 }
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
 {
-       return __swab32p((__u32 *)p);
+       return __swab32p((const __u32 *)p);
 }
 static inline __be16 __cpu_to_be16p(const __u16 *p)
 {
-       return (__force __be16)__swab16p(p);
+       return (__force const __be16)__swab16p(p);
 }
 static inline __u16 __be16_to_cpup(const __be16 *p)
 {
-       return __swab16p((__u16 *)p);
+       return __swab16p((const __u16 *)p);
 }
 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
 #define __le64_to_cpus(x) do { (void)(x); } while (0)
index 71e1d0ed92f73f40c1393e1d38d2588df97f5696..6cc9caff3da9c42417ec677fa5c2db44e6fdc4cd 100644 (file)
@@ -37,6 +37,17 @@ typedef __s64        Elf64_Sxword;
 #define PT_GNU_EH_FRAME                0x6474e550
 
 #define PT_GNU_STACK   (PT_LOOS + 0x474e551)
+#define PT_GNU_RELRO   (PT_LOOS + 0x474e552)
+
+#define PT_PAX_FLAGS   (PT_LOOS + 0x5041580)
+
+/* Constants for the e_flags field */
+#define EF_PAX_PAGEEXEC                1       /* Paging based non-executable pages */
+#define EF_PAX_EMUTRAMP                2       /* Emulate trampolines */
+#define EF_PAX_MPROTECT                4       /* Restrict mprotect() */
+#define EF_PAX_RANDMMAP                8       /* Randomize mmap() base */
+/*#define EF_PAX_RANDEXEC              16*/    /* Randomize ET_EXEC base */
+#define EF_PAX_SEGMEXEC                32      /* Segmentation based non-executable pages */
 
 /*
  * Extended Numbering
@@ -94,6 +105,8 @@ typedef __s64        Elf64_Sxword;
 #define DT_DEBUG       21
 #define DT_TEXTREL     22
 #define DT_JMPREL      23
+#define DT_FLAGS       30
+  #define DF_TEXTREL  0x00000004
 #define DT_ENCODING    32
 #define OLD_DT_LOOS    0x60000000
 #define DT_LOOS                0x6000000d
@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
 #define PF_W           0x2
 #define PF_X           0x1
 
+#define PF_PAGEEXEC    (1U << 4)       /* Enable  PAGEEXEC */
+#define PF_NOPAGEEXEC  (1U << 5)       /* Disable PAGEEXEC */
+#define PF_SEGMEXEC    (1U << 6)       /* Enable  SEGMEXEC */
+#define PF_NOSEGMEXEC  (1U << 7)       /* Disable SEGMEXEC */
+#define PF_MPROTECT    (1U << 8)       /* Enable  MPROTECT */
+#define PF_NOMPROTECT  (1U << 9)       /* Disable MPROTECT */
+/*#define PF_RANDEXEC  (1U << 10)*/    /* Enable  RANDEXEC */
+/*#define PF_NORANDEXEC        (1U << 11)*/    /* Disable RANDEXEC */
+#define PF_EMUTRAMP    (1U << 12)      /* Enable  EMUTRAMP */
+#define PF_NOEMUTRAMP  (1U << 13)      /* Disable EMUTRAMP */
+#define PF_RANDMMAP    (1U << 14)      /* Enable  RANDMMAP */
+#define PF_NORANDMMAP  (1U << 15)      /* Disable RANDMMAP */
+
 typedef struct elf32_phdr{
   Elf32_Word   p_type;
   Elf32_Off    p_offset;
@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
 #define        EI_OSABI        7
 #define        EI_PAD          8
 
+#define        EI_PAX          14
+
 #define        ELFMAG0         0x7f            /* EI_MAG */
 #define        ELFMAG1         'E'
 #define        ELFMAG2         'L'
index aa169c4339d20de36472c84f6ba3a4732b2276e6..6a2771dbb4aea4a9b99ddcb901fedcc6bb84b159 100644 (file)
@@ -30,6 +30,7 @@ enum {
 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC  | \
                            ADDR_NO_RANDOMIZE  | \
                            ADDR_COMPAT_LAYOUT | \
+                           ADDR_LIMIT_3GB     | \
                            MMAP_PAGE_ZERO)
 
 /*
index 7530e7447620ef8e4a1fee697c016e84da52cf0f..e71482860ce88a99277776ba8b165fb2e3a076af 100644 (file)
@@ -43,7 +43,8 @@ struct screen_info {
        __u16 pages;            /* 0x32 */
        __u16 vesa_attributes;  /* 0x34 */
        __u32 capabilities;     /* 0x36 */
-       __u8  _reserved[6];     /* 0x3a */
+       __u16 vesapm_size;      /* 0x3a */
+       __u8  _reserved[4];     /* 0x3c */
 } __attribute__((packed));
 
 #define VIDEO_TYPE_MDA         0x10    /* Monochrome Text Display      */
index 0e011eb91b5d7523c1774c1e786dab9fe0809720..82681b1541c301f8f00eafa7b934f745b75cb6d1 100644 (file)
@@ -43,7 +43,7 @@
  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
  */
 
-static inline __attribute_const__ __u16 __fswab16(__u16 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP16__
        return __builtin_bswap16(val);
@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
 #endif
 }
 
-static inline __attribute_const__ __u32 __fswab32(__u32 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP32__
        return __builtin_bswap32(val);
@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
 #endif
 }
 
-static inline __attribute_const__ __u64 __fswab64(__u64 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP64__
        return __builtin_bswap64(val);
index 1590c49cae572f66a7172c9985b2e4d5e493a8db..5eab4629a524afbd72390341a29bdc7292159d16 100644 (file)
@@ -73,5 +73,9 @@
 #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
 
+/* User namespace */
+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
+#define XATTR_PAX_FLAGS_SUFFIX "flags"
+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
 
 #endif /* _UAPI_LINUX_XATTR_H */
index f9466fa54ba4bd626fe618aef5b95f3b30dfde9b..f4e2b818f2dbd789a2a0784b3ba15dd6182932bc 100644 (file)
@@ -53,10 +53,10 @@ struct dlfb_data {
        u32 pseudo_palette[256];
        int blank_mode; /*one of FB_BLANK_ */
        /* blit-only rendering path metrics, exposed through sysfs */
-       atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
-       atomic_t bytes_identical; /* saved effort with backbuffer comparison */
-       atomic_t bytes_sent; /* to usb, after compression including overhead */
-       atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+       atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+       atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
+       atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
+       atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
 };
 
 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
index 30f53625415ce5bbd74628d86eb2634e7e3269c0..8ed8ac9828443e90e0bcd73667c9a188313e59f0 100644 (file)
@@ -122,6 +122,7 @@ struct uvesafb_par {
        u8 ypan;                        /* 0 - nothing, 1 - ypan, 2 - ywrap */
        u8 pmi_setpal;                  /* PMI for palette changes */
        u16 *pmi_base;                  /* protected mode interface location */
+       u8 *pmi_code;                   /* protected mode code location */
        void *pmi_start;
        void *pmi_pal;
        u8 *vbe_state_orig;             /*
index 9afb971497f4c9972b0dcf14071edf9ef81320be..27d6fcacc33e84404ea0fce2115711b3a8f74d76 100644 (file)
@@ -1129,6 +1129,7 @@ endif # CGROUPS
 
 config CHECKPOINT_RESTORE
        bool "Checkpoint/restore support" if EXPERT
+       depends on !GRKERNSEC
        default n
        help
          Enables additional kernel features in a sake of checkpoint/restore.
@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
 
 config COMPAT_BRK
        bool "Disable heap randomization"
-       default y
+       default n
        help
          Randomizing heap placement makes heap exploits harder, but it
          also breaks ancient binaries (including anything libc5 based).
@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
 config STOP_MACHINE
        bool
        default y
-       depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
+       depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
        help
          Need stop_machine() primitive.
 
index 7bc47ee31c369d442676edba32233fbb2ddbb462..6da2dc794f861c4ffa9c36f617429ea0ec28002f 100644 (file)
@@ -2,6 +2,9 @@
 # Makefile for the linux kernel.
 #
 
+ccflags-y := $(GCC_PLUGINS_CFLAGS)
+asflags-y := $(GCC_PLUGINS_AFLAGS)
+
 obj-y                          := main.o version.o mounts.o
 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
index eb410083e8e075f9ca1829d0db1bf3cb70d17139..f5dbbf9393806c01f36df9cf703aca4423f371df 100644 (file)
@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
 {
        struct super_block *s;
-       int err = sys_mount(name, "/root", fs, flags, data);
+       int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
        if (err)
                return err;
 
-       sys_chdir("/root");
+       sys_chdir((const char __force_user *)"/root");
        s = current->fs->pwd.dentry->d_sb;
        ROOT_DEV = s->s_dev;
        printk(KERN_INFO
@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
        va_start(args, fmt);
        vsprintf(buf, fmt, args);
        va_end(args);
-       fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
+       fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
        if (fd >= 0) {
                sys_ioctl(fd, FDEJECT, 0);
                sys_close(fd);
        }
        printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
-       fd = sys_open("/dev/console", O_RDWR, 0);
+       fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
        if (fd >= 0) {
                sys_ioctl(fd, TCGETS, (long)&termios);
                termios.c_lflag &= ~ICANON;
                sys_ioctl(fd, TCSETSF, (long)&termios);
-               sys_read(fd, &c, 1);
+               sys_read(fd, (char __user *)&c, 1);
                termios.c_lflag |= ICANON;
                sys_ioctl(fd, TCSETSF, (long)&termios);
                sys_close(fd);
@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
        mount_root();
 out:
        devtmpfs_mount("dev");
-       sys_mount(".", "/", NULL, MS_MOVE, NULL);
-       sys_chroot(".");
+       sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
+       sys_chroot((const char __force_user *)".");
 }
 
 static bool is_tmpfs;
index f5b978a9bb92892a5e876ae3ce1338ad8a896e04..69dbfe8412d9b8be0028e42bcb0c8a3f78724ec0 100644 (file)
@@ -15,15 +15,15 @@ extern int root_mountflags;
 
 static inline int create_dev(char *name, dev_t dev)
 {
-       sys_unlink(name);
-       return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
+       sys_unlink((char __force_user *)name);
+       return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
 }
 
 #if BITS_PER_LONG == 32
 static inline u32 bstat(char *name)
 {
        struct stat64 stat;
-       if (sys_stat64(name, &stat) != 0)
+       if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
                return 0;
        if (!S_ISBLK(stat.st_mode))
                return 0;
@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
 static inline u32 bstat(char *name)
 {
        struct stat stat;
-       if (sys_newstat(name, &stat) != 0)
+       if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
                return 0;
        if (!S_ISBLK(stat.st_mode))
                return 0;
index 3e0878e8a80d65548ffac907a3285f9deea8d5a3..8a9d7a0a29eb5eee801481df6209d6a1a995a190 100644 (file)
@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
 {
        sys_unshare(CLONE_FS | CLONE_FILES);
        /* stdin/stdout/stderr for /linuxrc */
-       sys_open("/dev/console", O_RDWR, 0);
+       sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
        sys_dup(0);
        sys_dup(0);
        /* move initrd over / and chdir/chroot in initrd root */
-       sys_chdir("/root");
-       sys_mount(".", "/", NULL, MS_MOVE, NULL);
-       sys_chroot(".");
+       sys_chdir((const char __force_user *)"/root");
+       sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
+       sys_chroot((const char __force_user *)".");
        sys_setsid();
        return 0;
 }
@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
        create_dev("/dev/root.old", Root_RAM0);
        /* mount initrd on rootfs' /root */
        mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
-       sys_mkdir("/old", 0700);
-       sys_chdir("/old");
+       sys_mkdir((const char __force_user *)"/old", 0700);
+       sys_chdir((const char __force_user *)"/old");
 
        /* try loading default modules from initrd */
        load_default_modules();
@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
        current->flags &= ~PF_FREEZER_SKIP;
 
        /* move initrd to rootfs' /old */
-       sys_mount("..", ".", NULL, MS_MOVE, NULL);
+       sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
        /* switch root and cwd back to / of rootfs */
-       sys_chroot("..");
+       sys_chroot((const char __force_user *)"..");
 
        if (new_decode_dev(real_root_dev) == Root_RAM0) {
-               sys_chdir("/old");
+               sys_chdir((const char __force_user *)"/old");
                return;
        }
 
-       sys_chdir("/");
+       sys_chdir((const char __force_user *)"/");
        ROOT_DEV = new_decode_dev(real_root_dev);
        mount_root();
 
        printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
-       error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
+       error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
        if (!error)
                printk("okay\n");
        else {
-               int fd = sys_open("/dev/root.old", O_RDWR, 0);
+               int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
                if (error == -ENOENT)
                        printk("/initrd does not exist. Ignored.\n");
                else
                        printk("failed\n");
                printk(KERN_NOTICE "Unmounting old root\n");
-               sys_umount("/old", MNT_DETACH);
+               sys_umount((char __force_user *)"/old", MNT_DETACH);
                printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
                if (fd < 0) {
                        error = fd;
@@ -127,11 +127,11 @@ int __init initrd_load(void)
                 * mounted in the normal path.
                 */
                if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
-                       sys_unlink("/initrd.image");
+                       sys_unlink((const char __force_user *)"/initrd.image");
                        handle_initrd();
                        return 1;
                }
        }
-       sys_unlink("/initrd.image");
+       sys_unlink((const char __force_user *)"/initrd.image");
        return 0;
 }
index 8cb6db54285ba64f81af9ba2b388a7c216ceec61..d729f501b90e586b545a2b8710f156552a1a60e9 100644 (file)
@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
                        partitioned ? "_d" : "", minor,
                        md_setup_args[ent].device_names);
 
-               fd = sys_open(name, 0, 0);
+               fd = sys_open((char __force_user *)name, 0, 0);
                if (fd < 0) {
                        printk(KERN_ERR "md: open failed - cannot start "
                                        "array %s\n", name);
@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
                         * array without it
                         */
                        sys_close(fd);
-                       fd = sys_open(name, 0, 0);
+                       fd = sys_open((char __force_user *)name, 0, 0);
                        sys_ioctl(fd, BLKRRPART, 0);
                }
                sys_close(fd);
@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
 
        wait_for_device_probe();
 
-       fd = sys_open("/dev/md0", 0, 0);
+       fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
        if (fd >= 0) {
                sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
                sys_close(fd);
index ba0a7f362d9e3ffbf00a45c7baa5c2f0a2186b8f..2bcf1d51f31bb536cedd5433684438170b9f2400 100644 (file)
@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
  * Initial thread structure. Alignment of this is handled by a special
  * linker map entry.
  */
+#ifdef CONFIG_X86
+union thread_union init_thread_union __init_task_data;
+#else
 union thread_union init_thread_union __init_task_data =
        { INIT_THREAD_INFO(init_task) };
+#endif
index ad1bd7787bbb0c3298e2f9790b0edd5322227639..dca2c1b1a738b82cf70afe017483b875f7c992c8 100644 (file)
@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
 
        /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
        while (count) {
-               ssize_t rv = sys_write(fd, p, count);
+               ssize_t rv = sys_write(fd, (char __force_user *)p, count);
 
                if (rv < 0) {
                        if (rv == -EINTR || rv == -EAGAIN)
@@ -107,7 +107,7 @@ static void __init free_hash(void)
        }
 }
 
-static long __init do_utime(char *filename, time_t mtime)
+static long __init do_utime(char __force_user *filename, time_t mtime)
 {
        struct timespec t[2];
 
@@ -142,7 +142,7 @@ static void __init dir_utime(void)
        struct dir_entry *de, *tmp;
        list_for_each_entry_safe(de, tmp, &dir_list, list) {
                list_del(&de->list);
-               do_utime(de->name, de->mtime);
+               do_utime((char __force_user *)de->name, de->mtime);
                kfree(de->name);
                kfree(de);
        }
@@ -304,7 +304,7 @@ static int __init maybe_link(void)
        if (nlink >= 2) {
                char *old = find_link(major, minor, ino, mode, collected);
                if (old)
-                       return (sys_link(old, collected) < 0) ? -1 : 1;
+                       return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
        }
        return 0;
 }
@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
 {
        struct stat st;
 
-       if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
+       if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
                if (S_ISDIR(st.st_mode))
-                       sys_rmdir(path);
+                       sys_rmdir((char __force_user *)path);
                else
-                       sys_unlink(path);
+                       sys_unlink((char __force_user *)path);
        }
 }
 
@@ -338,7 +338,7 @@ static int __init do_name(void)
                        int openflags = O_WRONLY|O_CREAT;
                        if (ml != 1)
                                openflags |= O_TRUNC;
-                       wfd = sys_open(collected, openflags, mode);
+                       wfd = sys_open((char __force_user *)collected, openflags, mode);
 
                        if (wfd >= 0) {
                                sys_fchown(wfd, uid, gid);
@@ -350,17 +350,17 @@ static int __init do_name(void)
                        }
                }
        } else if (S_ISDIR(mode)) {
-               sys_mkdir(collected, mode);
-               sys_chown(collected, uid, gid);
-               sys_chmod(collected, mode);
+               sys_mkdir((char __force_user *)collected, mode);
+               sys_chown((char __force_user *)collected, uid, gid);
+               sys_chmod((char __force_user *)collected, mode);
                dir_add(collected, mtime);
        } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
                   S_ISFIFO(mode) || S_ISSOCK(mode)) {
                if (maybe_link() == 0) {
-                       sys_mknod(collected, mode, rdev);
-                       sys_chown(collected, uid, gid);
-                       sys_chmod(collected, mode);
-                       do_utime(collected, mtime);
+                       sys_mknod((char __force_user *)collected, mode, rdev);
+                       sys_chown((char __force_user *)collected, uid, gid);
+                       sys_chmod((char __force_user *)collected, mode);
+                       do_utime((char __force_user *)collected, mtime);
                }
        }
        return 0;
@@ -372,7 +372,7 @@ static int __init do_copy(void)
                if (xwrite(wfd, victim, body_len) != body_len)
                        error("write error");
                sys_close(wfd);
-               do_utime(vcollected, mtime);
+               do_utime((char __force_user *)vcollected, mtime);
                kfree(vcollected);
                eat(body_len);
                state = SkipIt;
@@ -390,9 +390,9 @@ static int __init do_symlink(void)
 {
        collected[N_ALIGN(name_len) + body_len] = '\0';
        clean_path(collected, 0);
-       sys_symlink(collected + N_ALIGN(name_len), collected);
-       sys_lchown(collected, uid, gid);
-       do_utime(collected, mtime);
+       sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
+       sys_lchown((char __force_user *)collected, uid, gid);
+       do_utime((char __force_user *)collected, mtime);
        state = SkipIt;
        next_state = Reset;
        return 0;
index 61b993767db53e8401dc1223fa6e028f7edafd74..85893612dca9789e0c30112f7ff2ea7012f01dec 100644 (file)
@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
 static inline void mark_rodata_ro(void) { }
 #endif
 
+extern void grsecurity_init(void);
+
 /*
  * Debug helper: via this flag we know that we are in 'early bootup code'
  * where only the boot processor is running with IRQ disabled.  This means
@@ -161,6 +163,75 @@ static int __init set_reset_devices(char *str)
 
 __setup("reset_devices", set_reset_devices);
 
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
+static int __init setup_grsec_proc_gid(char *str)
+{
+       grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
+       return 1;
+}
+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+unsigned long pax_user_shadow_base __read_only;
+EXPORT_SYMBOL(pax_user_shadow_base);
+extern char pax_enter_kernel_user[];
+extern char pax_exit_kernel_user[];
+#endif
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
+static int __init setup_pax_nouderef(char *str)
+{
+#ifdef CONFIG_X86_32
+       unsigned int cpu;
+       struct desc_struct *gdt;
+
+       for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+               gdt = get_cpu_gdt_table(cpu);
+               gdt[GDT_ENTRY_KERNEL_DS].type = 3;
+               gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
+               gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
+               gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
+       }
+       loadsegment(ds, __KERNEL_DS);
+       loadsegment(es, __KERNEL_DS);
+       loadsegment(ss, __KERNEL_DS);
+#else
+       memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
+       memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
+       clone_pgd_mask = ~(pgdval_t)0UL;
+       pax_user_shadow_base = 0UL;
+       setup_clear_cpu_cap(X86_FEATURE_PCID);
+       setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+#endif
+
+       return 0;
+}
+early_param("pax_nouderef", setup_pax_nouderef);
+
+#ifdef CONFIG_X86_64
+static int __init setup_pax_weakuderef(char *str)
+{
+       if (clone_pgd_mask != ~(pgdval_t)0UL)
+               pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+       return 1;
+}
+__setup("pax_weakuderef", setup_pax_weakuderef);
+#endif
+#endif
+
+#ifdef CONFIG_PAX_SOFTMODE
+int pax_softmode;
+
+static int __init setup_pax_softmode(char *str)
+{
+       get_option(&str, &pax_softmode);
+       return 1;
+}
+__setup("pax_softmode=", setup_pax_softmode);
+#endif
+
 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
 static const char *panic_later, *panic_param;
@@ -735,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
        struct blacklist_entry *entry;
        char *fn_name;
 
-       fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
+       fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
        if (!fn_name)
                return false;
 
@@ -787,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
 {
        int count = preempt_count();
        int ret;
-       char msgbuf[64];
+       const char *msg1 = "", *msg2 = "";
 
        if (initcall_blacklisted(fn))
                return -EPERM;
@@ -797,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
        else
                ret = fn();
 
-       msgbuf[0] = 0;
-
        if (preempt_count() != count) {
-               sprintf(msgbuf, "preemption imbalance ");
+               msg1 = " preemption imbalance";
                preempt_count_set(count);
        }
        if (irqs_disabled()) {
-               strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
+               msg2 = " disabled interrupts";
                local_irq_enable();
        }
-       WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
+       WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
 
+       add_latent_entropy();
        return ret;
 }
 
@@ -914,8 +984,8 @@ static int run_init_process(const char *init_filename)
 {
        argv_init[0] = init_filename;
        return do_execve(getname_kernel(init_filename),
-               (const char __user *const __user *)argv_init,
-               (const char __user *const __user *)envp_init);
+               (const char __user *const __force_user *)argv_init,
+               (const char __user *const __force_user *)envp_init);
 }
 
 static int try_to_run_init_process(const char *init_filename)
@@ -932,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
        return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+extern int gr_init_ran;
+#endif
+
 static noinline void __init kernel_init_freeable(void);
 
 static int __ref kernel_init(void *unused)
@@ -956,6 +1030,11 @@ static int __ref kernel_init(void *unused)
                       ramdisk_execute_command, ret);
        }
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+       /* if no initrd was used, be extra sure we enforce chroot restrictions */
+       gr_init_ran = 1;
+#endif
+
        /*
         * We try each of these until one succeeds.
         *
@@ -1016,7 +1095,7 @@ static noinline void __init kernel_init_freeable(void)
        do_basic_setup();
 
        /* Open the /dev/console on the rootfs, this should never fail */
-       if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+       if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
                pr_err("Warning: unable to open an initial console.\n");
 
        (void) sys_dup(0);
@@ -1029,11 +1108,13 @@ static noinline void __init kernel_init_freeable(void)
        if (!ramdisk_execute_command)
                ramdisk_execute_command = "/init";
 
-       if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
+       if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
                ramdisk_execute_command = NULL;
                prepare_namespace();
        }
 
+       grsecurity_init();
+
        /*
         * Ok, we have completed the initial bootup, and
         * we're essentially up and running. Get rid of the
index 9b3c85f8a53825330b99015e34af9d2117393f06..1c4d897847eb122e84e0bd91d561ffa09fb26d04 100644 (file)
@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
                               COMPAT_SHMLBA);
                if (err < 0)
                        return err;
-               return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
+               return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
        }
        case SHMDT:
                return sys_shmdt(compat_ptr(ptr));
index 8ad93c29f511033dc4a6df1b2a26398ea78c9104..efd80f81e198f9e77f937379c831d3ca6f61307e 100644 (file)
@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
 static int proc_ipc_dointvec(struct ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table ipc_table;
+       ctl_table_no_const ipc_table;
 
        memcpy(&ipc_table, table, sizeof(ipc_table));
        ipc_table.data = get_ipc(table);
@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table ipc_table;
+       ctl_table_no_const ipc_table;
 
        memcpy(&ipc_table, table, sizeof(ipc_table));
        ipc_table.data = get_ipc(table);
@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table ipc_table;
+       ctl_table_no_const ipc_table;
        memcpy(&ipc_table, table, sizeof(ipc_table));
        ipc_table.data = get_ipc(table);
 
@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table ipc_table;
+       ctl_table_no_const ipc_table;
        int dummy = 0;
 
        memcpy(&ipc_table, table, sizeof(ipc_table));
index 68d4e953762cc8d51b4f7ab4a4c8caa199b3a29a..1477dedfe9d7519c9c801b3343954a054b89d05c 100644 (file)
@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
 static int proc_mq_dointvec(struct ctl_table *table, int write,
                            void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table mq_table;
+       ctl_table_no_const mq_table;
        memcpy(&mq_table, table, sizeof(mq_table));
        mq_table.data = get_mq(table);
 
@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table mq_table;
+       ctl_table_no_const mq_table;
        memcpy(&mq_table, table, sizeof(mq_table));
        mq_table.data = get_mq(table);
 
index 7635a1cf99f3da986b0cfa57774c7ad38ef28fe3..7432cb6078673c3ca6ffb720eddb6de6b2882fa3 100644 (file)
@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
                mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
                                          info->attr.mq_msgsize);
 
+               gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
                spin_lock(&mq_lock);
                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
                    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
index 19633b4a2350edf689af1a1cd2d8fcc1b0d7b6c2..d4549044c9f8ca084eff0351398277863478639a 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
 #endif
 
+#ifdef CONFIG_GRKERNSEC
+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+                          const u64 shm_createtime, const kuid_t cuid,
+                          const int shmid);
+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+                          const u64 shm_createtime);
+#endif
+
 void shm_init_ns(struct ipc_namespace *ns)
 {
        ns->shm_ctlmax = SHMMAX;
@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_lprid = 0;
        shp->shm_atim = shp->shm_dtim = 0;
        shp->shm_ctim = get_seconds();
+#ifdef CONFIG_GRKERNSEC
+       shp->shm_createtime = ktime_get_ns();
+#endif
        shp->shm_segsz = size;
        shp->shm_nattch = 0;
        shp->shm_file = file;
@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
                f_mode = FMODE_READ | FMODE_WRITE;
        }
        if (shmflg & SHM_EXEC) {
+
+#ifdef CONFIG_PAX_MPROTECT
+               if (current->mm->pax_flags & MF_PAX_MPROTECT)
+                       goto out;
+#endif
+
                prot |= PROT_EXEC;
                acc_mode |= S_IXUGO;
        }
@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
        if (err)
                goto out_unlock;
 
+#ifdef CONFIG_GRKERNSEC
+       if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
+                            shp->shm_perm.cuid, shmid) ||
+           !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
+               err = -EACCES;
+               goto out_unlock;
+       }
+#endif
+
        ipc_lock_object(&shp->shm_perm);
 
        /* check if shm_destroy() is tearing down shp */
@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
        path = shp->shm_file->f_path;
        path_get(&path);
        shp->shm_nattch++;
+#ifdef CONFIG_GRKERNSEC
+       shp->shm_lapid = current->pid;
+#endif
        size = i_size_read(path.dentry->d_inode);
        ipc_unlock_object(&shp->shm_perm);
        rcu_read_unlock();
index 106bed0378ab3575d6aadac8196b32971b5a2b19..f8514292dd526223fa5a0dc8581eb47af2a79e6d 100644 (file)
@@ -71,6 +71,8 @@ struct ipc_proc_iface {
        int (*show)(struct seq_file *, void *);
 };
 
+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
+
 /**
  * ipc_init - initialise ipc subsystem
  *
@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
                granted_mode >>= 6;
        else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
                granted_mode >>= 3;
+
+       if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
+               return -1;
+
        /* is there some bit set in requested_mode but not in granted_mode? */
        if ((requested_mode & ~granted_mode & 0007) &&
            !ns_capable(ns->user_ns, CAP_IPC_OWNER))
index 72ab759a0b43a6400750cefa71650ed64e7a8222..757debac435bc1d2eee47e74f9dd94e0ccd55bde 100644 (file)
@@ -122,7 +122,7 @@ u32         audit_sig_sid = 0;
    3) suppressed due to audit_rate_limit
    4) suppressed due to audit_backlog_limit
 */
-static atomic_t    audit_lost = ATOMIC_INIT(0);
+static atomic_unchecked_t    audit_lost = ATOMIC_INIT(0);
 
 /* The netlink socket. */
 static struct sock *audit_sock;
@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
        unsigned long           now;
        int                     print;
 
-       atomic_inc(&audit_lost);
+       atomic_inc_unchecked(&audit_lost);
 
        print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
 
@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
        if (print) {
                if (printk_ratelimit())
                        pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
-                               atomic_read(&audit_lost),
+                               atomic_read_unchecked(&audit_lost),
                                audit_rate_limit,
                                audit_backlog_limit);
                audit_panic(message);
@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                s.pid                   = audit_pid;
                s.rate_limit            = audit_rate_limit;
                s.backlog_limit         = audit_backlog_limit;
-               s.lost                  = atomic_read(&audit_lost);
+               s.lost                  = atomic_read_unchecked(&audit_lost);
                s.backlog               = skb_queue_len(&audit_skb_queue);
                s.feature_bitmap        = AUDIT_FEATURE_BITMAP_ALL;
                s.backlog_wait_time     = audit_backlog_wait_time;
index 072566dd0caf7739fc42b7d59c6791c29dc89343..1190489505c7aa2274d251b6774ffcc4bbd06ba5 100644 (file)
@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
 }
 
 /* global counter which is incremented every time something logs in */
-static atomic_t session_id = ATOMIC_INIT(0);
+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
 
 static int audit_set_loginuid_perm(kuid_t loginuid)
 {
@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
 
        /* are we setting or clearing? */
        if (uid_valid(loginuid))
-               sessionid = (unsigned int)atomic_inc_return(&session_id);
+               sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
 
        task->sessionid = sessionid;
        task->loginuid = loginuid;
index a64e7a207d2b5cd123b65f7143c6d659c0aed726..2e69448b2a63b6ce7cb9cf81076e447d68627eb2 100644 (file)
@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
         * random section of illegal instructions.
         */
        size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
-       hdr = module_alloc(size);
+       hdr = module_alloc_exec(size);
        if (hdr == NULL)
                return NULL;
 
        /* Fill space with illegal/arch-dep instructions. */
        bpf_fill_ill_insns(hdr, size);
 
+       pax_open_kernel();
        hdr->pages = size / PAGE_SIZE;
+       pax_close_kernel();
+
        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
                     PAGE_SIZE - sizeof(*hdr));
        start = (prandom_u32() % hole) & ~(alignment - 1);
@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 
 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 {
-       module_memfree(hdr);
+       module_memfree_exec(hdr);
 }
 #endif /* CONFIG_BPF_JIT */
 
index 536edc2be3072e91ab132555fc4f9bc3ce656604..d28c85d7357a7fd42eb411fc10f775230940a180 100644 (file)
@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        int err;
 
        /* the syscall is limited to root temporarily. This restriction will be
-        * lifted when security audit is clean. Note that eBPF+tracing must have
-        * this restriction, since it may pass kernel data to user space
+        * lifted by upstream when a half-assed security audit is clean. Note
+        * that eBPF+tracing must have this restriction, since it may pass
+        * kernel data to user space
         */
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
+#ifdef CONFIG_GRKERNSEC
+       return -EPERM;
+#endif
 
        if (!access_ok(VERIFY_READ, uattr, 1))
                return -EFAULT;
index 989f5bfc57dcfde3046b2756ddd2aac59da1ec73..d317ca0a87daaaacf0cb2ed394eccffe7bc1c1e8 100644 (file)
@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
                 * before modification is attempted and the application
                 * fails.
                 */
+               if (tocopy > ARRAY_SIZE(kdata))
+                       return -EFAULT;
+
                if (copy_to_user(dataptr, kdata, tocopy
                                 * sizeof(struct __user_cap_data_struct))) {
                        return -EFAULT;
@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
        int ret;
 
        rcu_read_lock();
-       ret = security_capable(__task_cred(t), ns, cap);
+       ret = security_capable(__task_cred(t), ns, cap) == 0 &&
+               gr_task_is_capable(t, __task_cred(t), cap);
        rcu_read_unlock();
 
-       return (ret == 0);
+       return ret;
 }
 
 /**
@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
        int ret;
 
        rcu_read_lock();
-       ret = security_capable_noaudit(__task_cred(t), ns, cap);
+       ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
        rcu_read_unlock();
 
-       return (ret == 0);
+       return ret;
 }
 
 /**
@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
                BUG();
        }
 
-       if (security_capable(current_cred(), ns, cap) == 0) {
+       if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
                current->flags |= PF_SUPERPRIV;
                return true;
        }
@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
 }
 EXPORT_SYMBOL(ns_capable);
 
+bool ns_capable_nolog(struct user_namespace *ns, int cap)
+{
+       if (unlikely(!cap_valid(cap))) {
+               printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
+               BUG();
+       }
+
+       if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
+               current->flags |= PF_SUPERPRIV;
+               return true;
+       }
+       return false;
+}
+EXPORT_SYMBOL(ns_capable_nolog);
+
 /**
  * file_ns_capable - Determine if the file's opener had a capability in effect
  * @file:  The file we want to check
@@ -427,6 +446,12 @@ bool capable(int cap)
 }
 EXPORT_SYMBOL(capable);
 
+bool capable_nolog(int cap)
+{
+       return ns_capable_nolog(&init_user_ns, cap);
+}
+EXPORT_SYMBOL(capable_nolog);
+
 /**
  * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
  * @inode: The inode in question
@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
                kgid_has_mapping(ns, inode->i_gid);
 }
 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
+
+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
+{
+       struct user_namespace *ns = current_user_ns();
+
+       return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
+               kgid_has_mapping(ns, inode->i_gid);
+}
+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
index 04cfe8ace52088a4c5ed092c389d08fd99117b19..adadcc0bff9853eb356ced5c01eadf1644eda731 100644 (file)
@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
        if (!pathbuf || !agentbuf)
                goto out;
 
+       if (agentbuf[0] == '\0')
+               goto out;
+
        path = cgroup_path(cgrp, pathbuf, PATH_MAX);
        if (!path)
                goto out;
@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
                struct task_struct *task;
                int count = 0;
 
-               seq_printf(seq, "css_set %p\n", cset);
+               seq_printf(seq, "css_set %pK\n", cset);
 
                list_for_each_entry(task, &cset->tasks, cg_list) {
                        if (count++ > MAX_TASKS_SHOWN_PER_CSS)
index ebb3c369d03d594067bda50bdffedd32159026e3..1df606e6c2c083463259cb02ce79132e2b5e4abc 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/linkage.h>
 #include <linux/compat.h>
+#include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/time.h>
 #include <linux/signal.h>
@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
        mm_segment_t oldfs;
        long ret;
 
-       restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
+       restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        ret = hrtimer_nanosleep_restart(restart);
@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        ret = hrtimer_nanosleep(&tu,
-                               rmtp ? (struct timespec __user *)&rmt : NULL,
+                               rmtp ? (struct timespec __force_user *)&rmt : NULL,
                                HRTIMER_MODE_REL, CLOCK_MONOTONIC);
        set_fs(oldfs);
 
@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
        mm_segment_t old_fs = get_fs();
 
        set_fs(KERNEL_DS);
-       ret = sys_sigpending((old_sigset_t __user *) &s);
+       ret = sys_sigpending((old_sigset_t __force_user *) &s);
        set_fs(old_fs);
        if (ret == 0)
                ret = put_user(s, set);
@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
        mm_segment_t old_fs = get_fs();
 
        set_fs(KERNEL_DS);
-       ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
+       ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
        set_fs(old_fs);
 
        if (!ret) {
@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
                set_fs (KERNEL_DS);
                ret = sys_wait4(pid,
                                (stat_addr ?
-                                (unsigned int __user *) &status : NULL),
-                               options, (struct rusage __user *) &r);
+                                (unsigned int __force_user *) &status : NULL),
+                               options, (struct rusage __force_user *) &r);
                set_fs (old_fs);
 
                if (ret > 0) {
@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
        memset(&info, 0, sizeof(info));
 
        set_fs(KERNEL_DS);
-       ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
-                        uru ? (struct rusage __user *)&ru : NULL);
+       ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
+                        uru ? (struct rusage __force_user *)&ru : NULL);
        set_fs(old_fs);
 
        if ((ret < 0) || (info.si_signo == 0))
@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_timer_settime(timer_id, flags,
-                               (struct itimerspec __user *) &newts,
-                               (struct itimerspec __user *) &oldts);
+                               (struct itimerspec __force_user *) &newts,
+                               (struct itimerspec __force_user *) &oldts);
        set_fs(oldfs);
        if (!err && old && put_compat_itimerspec(old, &oldts))
                return -EFAULT;
@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_timer_gettime(timer_id,
-                               (struct itimerspec __user *) &ts);
+                               (struct itimerspec __force_user *) &ts);
        set_fs(oldfs);
        if (!err && put_compat_itimerspec(setting, &ts))
                return -EFAULT;
@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_clock_settime(which_clock,
-                               (struct timespec __user *) &ts);
+                               (struct timespec __force_user *) &ts);
        set_fs(oldfs);
        return err;
 }
@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_clock_gettime(which_clock,
-                               (struct timespec __user *) &ts);
+                               (struct timespec __force_user *) &ts);
        set_fs(oldfs);
        if (!err && compat_put_timespec(&ts, tp))
                return -EFAULT;
@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
 
        oldfs = get_fs();
        set_fs(KERNEL_DS);
-       ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
+       ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
        set_fs(oldfs);
 
        err = compat_put_timex(utp, &txc);
@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_clock_getres(which_clock,
-                              (struct timespec __user *) &ts);
+                              (struct timespec __force_user *) &ts);
        set_fs(oldfs);
        if (!err && tp && compat_put_timespec(&ts, tp))
                return -EFAULT;
@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
        struct timespec tu;
        struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
 
-       restart->nanosleep.rmtp = (struct timespec __user *) &tu;
+       restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = clock_nanosleep_restart(restart);
@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
        oldfs = get_fs();
        set_fs(KERNEL_DS);
        err = sys_clock_nanosleep(which_clock, flags,
-                                 (struct timespec __user *) &in,
-                                 (struct timespec __user *) &out);
+                                 (struct timespec __force_user *) &in,
+                                 (struct timespec __force_user *) &out);
        set_fs(oldfs);
 
        if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
        mm_segment_t old_fs = get_fs();
 
        set_fs(KERNEL_DS);
-       ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
+       ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
        set_fs(old_fs);
        if (compat_put_timespec(&t, interval))
                return -EFAULT;
index c18b1f1ae515a73cf0fe404d413ddcc4bc8f847d..b9a0132215df12e259621459305b40f8a51a8108 100644 (file)
@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
        struct proc_dir_entry *entry;
 
        /* create the current config file */
+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
+       entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
+                           &ikconfig_file_ops);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
+                           &ikconfig_file_ops);
+#endif
+#else
        entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
                            &ikconfig_file_ops);
+#endif
+
        if (!entry)
                return -ENOMEM;
 
index e0573a43c7df62b1b37df319bd74ff377603ccf5..26c0fd3bf45ddc56af52c07f7c5a7794ac21d15d 100644 (file)
@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
        validate_creds(cred);
        alter_cred_subscribers(cred, -1);
        put_cred(cred);
+
+#ifdef CONFIG_GRKERNSEC_SETXID
+       cred = (struct cred *) tsk->delayed_cred;
+       if (cred != NULL) {
+               tsk->delayed_cred = NULL;
+               validate_creds(cred);
+               alter_cred_subscribers(cred, -1);
+               put_cred(cred);
+       }
+#endif
 }
 
 /**
@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
  * Always returns 0 thus allowing this function to be tail-called at the end
  * of, say, sys_setgid().
  */
-int commit_creds(struct cred *new)
+static int __commit_creds(struct cred *new)
 {
        struct task_struct *task = current;
        const struct cred *old = task->real_cred;
@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
 
        get_cred(new); /* we will require a ref for the subj creds too */
 
+       gr_set_role_label(task, new->uid, new->gid);
+
        /* dumpability changes */
        if (!uid_eq(old->euid, new->euid) ||
            !gid_eq(old->egid, new->egid) ||
@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
        put_cred(old);
        return 0;
 }
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern int set_user(struct cred *new);
+
+void gr_delayed_cred_worker(void)
+{
+       const struct cred *new = current->delayed_cred;
+       struct cred *ncred;
+
+       current->delayed_cred = NULL;
+
+       if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
+               // from doing get_cred on it when queueing this
+               put_cred(new);
+               return;
+       } else if (new == NULL)
+               return;
+
+       ncred = prepare_creds();
+       if (!ncred)
+               goto die;
+       // uids
+       ncred->uid = new->uid;
+       ncred->euid = new->euid;
+       ncred->suid = new->suid;
+       ncred->fsuid = new->fsuid;
+       // gids
+       ncred->gid = new->gid;
+       ncred->egid = new->egid;
+       ncred->sgid = new->sgid;
+       ncred->fsgid = new->fsgid;
+       // groups
+       set_groups(ncred, new->group_info);
+       // caps
+       ncred->securebits = new->securebits;
+       ncred->cap_inheritable = new->cap_inheritable;
+       ncred->cap_permitted = new->cap_permitted;
+       ncred->cap_effective = new->cap_effective;
+       ncred->cap_bset = new->cap_bset;
+
+       if (set_user(ncred)) {
+               abort_creds(ncred);
+               goto die;
+       }
+
+       // from doing get_cred on it when queueing this
+       put_cred(new);
+
+       __commit_creds(ncred);
+       return;
+die:
+       // from doing get_cred on it when queueing this
+       put_cred(new);
+       do_group_exit(SIGKILL);
+}
+#endif
+
+int commit_creds(struct cred *new)
+{
+#ifdef CONFIG_GRKERNSEC_SETXID
+       int ret;
+       int schedule_it = 0;
+       struct task_struct *t;
+       unsigned oldsecurebits = current_cred()->securebits;
+
+       /* we won't get called with tasklist_lock held for writing
+          and interrupts disabled as the cred struct in that case is
+          init_cred
+       */
+       if (grsec_enable_setxid && !current_is_single_threaded() &&
+           uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
+           !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
+               schedule_it = 1;
+       }
+       ret = __commit_creds(new);
+       if (schedule_it) {
+               rcu_read_lock();
+               read_lock(&tasklist_lock);
+               for (t = next_thread(current); t != current;
+                    t = next_thread(t)) {
+                       /* we'll check if the thread has uid 0 in
+                        * the delayed worker routine
+                        */
+                       if (task_securebits(t) == oldsecurebits &&
+                           t->delayed_cred == NULL) {
+                               t->delayed_cred = get_cred(new);
+                               set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
+                               set_tsk_need_resched(t);
+                       }
+               }
+               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
+       }
+
+       return ret;
+#else
+       return __commit_creds(new);
+#endif
+}
+
 EXPORT_SYMBOL(commit_creds);
 
 /**
index ac5c0f9c7a20e81fce8d9296d44219498fdcbb66..4b1c6c29d3cde2f45f07896c781ac81acd4c1351 100644 (file)
@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  */
 static atomic_t                        masters_in_kgdb;
 static atomic_t                        slaves_in_kgdb;
-static atomic_t                        kgdb_break_tasklet_var;
+static atomic_unchecked_t      kgdb_break_tasklet_var;
 atomic_t                       kgdb_setting_breakpoint;
 
 struct task_struct             *kgdb_usethread;
@@ -137,7 +137,7 @@ int                         kgdb_single_step;
 static pid_t                   kgdb_sstep_pid;
 
 /* to keep track of the CPU which is doing the single stepping*/
-atomic_t                       kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+atomic_unchecked_t             kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
 
 /*
  * If you are debugging a problem where roundup (the collection of
@@ -552,7 +552,7 @@ return_normal:
         * kernel will only try for the value of sstep_tries before
         * giving up and continuing on.
         */
-       if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+       if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
            (kgdb_info[cpu].task &&
             kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
                atomic_set(&kgdb_active, -1);
@@ -654,8 +654,8 @@ cpu_master_loop:
        }
 
 kgdb_restore:
-       if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-               int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+       if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+               int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
                if (kgdb_info[sstep_cpu].task)
                        kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
                else
@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
 static void kgdb_tasklet_bpt(unsigned long ing)
 {
        kgdb_breakpoint();
-       atomic_set(&kgdb_break_tasklet_var, 0);
+       atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
 }
 
 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
 
 void kgdb_schedule_breakpoint(void)
 {
-       if (atomic_read(&kgdb_break_tasklet_var) ||
+       if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
                atomic_read(&kgdb_active) != -1 ||
                atomic_read(&kgdb_setting_breakpoint))
                return;
-       atomic_inc(&kgdb_break_tasklet_var);
+       atomic_inc_unchecked(&kgdb_break_tasklet_var);
        tasklet_schedule(&kgdb_tasklet_breakpoint);
 }
 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
index 60f6bb817f701939b7d372204604499c431feed6..104bb07660576ad81cfc156a16d0cbbbab42b173 100644 (file)
@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
                        continue;
 
                kdb_printf("%-20s%8u  0x%p ", mod->name,
-                          mod->core_size, (void *)mod);
+                          mod->core_size_rx + mod->core_size_rw, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
                kdb_printf("%4d ", module_refcount(mod));
 #endif
@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
                        kdb_printf(" (Loading)");
                else
                        kdb_printf(" (Live)");
-               kdb_printf(" 0x%p", mod->module_core);
+               kdb_printf(" 0x%p 0x%p", mod->module_core_rx,  mod->module_core_rw);
 
 #ifdef CONFIG_MODULE_UNLOAD
                {
index 19efcf13375a2960e6d8e6994a8aa660d6221253..7c05c93dfcfc8b563a7932807dfccba64d710e77 100644 (file)
@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
+#else
+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
 
        tmp *= sysctl_perf_cpu_time_max_percent;
        do_div(tmp, 100);
-       ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
+       ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
 }
 
 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
        }
 }
 
-static atomic64_t perf_event_id;
+static atomic64_unchecked_t perf_event_id;
 
 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type);
@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
 
 static inline u64 perf_event_count(struct perf_event *event)
 {
-       return local64_read(&event->count) + atomic64_read(&event->child_count);
+       return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
 }
 
 static u64 perf_event_read(struct perf_event *event)
@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
        mutex_lock(&event->child_mutex);
        total += perf_event_read(event);
        *enabled += event->total_time_enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
        *running += event->total_time_running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
 
        list_for_each_entry(child, &event->child_list, child_list) {
                total += perf_event_read(child);
@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
                userpg->offset -= local64_read(&event->hw.prev_count);
 
        userpg->time_enabled = enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
 
        userpg->time_running = running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
 
        arch_perf_update_userpage(userpg, now);
 
@@ -4568,7 +4575,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
-               rem = __output_copy_user(handle, (void *) sp, dump_size);
+               rem = __output_copy_user(handle, (void __user *) sp, dump_size);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
@@ -4659,11 +4666,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
        values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
                values[n++] = enabled +
-                       atomic64_read(&event->child_total_time_enabled);
+                       atomic64_read_unchecked(&event->child_total_time_enabled);
        }
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
                values[n++] = running +
-                       atomic64_read(&event->child_total_time_running);
+                       atomic64_read_unchecked(&event->child_total_time_running);
        }
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
@@ -6994,7 +7001,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        event->parent           = parent_event;
 
        event->ns               = get_pid_ns(task_active_pid_ns(current));
-       event->id               = atomic64_inc_return(&perf_event_id);
+       event->id               = atomic64_inc_return_unchecked(&perf_event_id);
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
@@ -7275,6 +7282,11 @@ SYSCALL_DEFINE5(perf_event_open,
        if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
 
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+       if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+               return -EACCES;
+#endif
+
        err = perf_copy_attr(attr_uptr, &attr);
        if (err)
                return err;
@@ -7642,10 +7654,10 @@ static void sync_child_event(struct perf_event *child_event,
        /*
         * Add back the child's count to the parent's count:
         */
-       atomic64_add(child_val, &parent_event->child_count);
-       atomic64_add(child_event->total_time_enabled,
+       atomic64_add_unchecked(child_val, &parent_event->child_count);
+       atomic64_add_unchecked(child_event->total_time_enabled,
                     &parent_event->child_total_time_enabled);
-       atomic64_add(child_event->total_time_running,
+       atomic64_add_unchecked(child_event->total_time_running,
                     &parent_event->child_total_time_running);
 
        /*
index 569b218782ad6f52053a21495c893935dcde5b10..19940d93c46364c1b275caf5f77774c8435db37b 100644 (file)
@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
        return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 }
 
-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                     \
+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user)               \
 static inline unsigned long                                            \
 func_name(struct perf_output_handle *handle,                           \
-         const void *buf, unsigned long len)                           \
+         const void user *buf, unsigned long len)                      \
 {                                                                      \
        unsigned long size, written;                                    \
                                                                        \
@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
        return 0;
 }
 
-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
 
 static inline unsigned long
 memcpy_skip(void *dst, const void *src, unsigned long n)
@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
        return 0;
 }
 
-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
 
 #ifndef arch_perf_out_copy_user
 #define arch_perf_out_copy_user arch_perf_out_copy_user
@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
 }
 #endif
 
-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
 
 /* Callchain handling */
 extern struct perf_callchain_entry *
index cb346f26a22de1d618edc19f2c10300a5850c4c9..e4dc317803fd87120d1058cddbb9109a2f7c1559 100644 (file)
@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
 {
        struct page *page;
        uprobe_opcode_t opcode;
-       int result;
+       long result;
 
        pagefault_disable();
        result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
index 6806c55475eec17be40b1d6c53cf9fe007376279..a5fb128a108b53a01d92b9ace855221b95b6ad40 100644 (file)
@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
        struct task_struct *leader;
        int zap_leader;
 repeat:
+#ifdef CONFIG_NET
+       gr_del_task_from_ip_table(p);
+#endif
+
        /* don't need to get the RCU readlock here - the process is dead and
         * can't be modifying its own credentials. But shut RCU-lockdep up */
        rcu_read_lock();
@@ -655,6 +659,8 @@ void do_exit(long code)
        int group_dead;
        TASKS_RCU(int tasks_rcu_i);
 
+       set_fs(USER_DS);
+
        profile_task_exit(tsk);
 
        WARN_ON(blk_needs_flush_plug(tsk));
@@ -671,7 +677,6 @@ void do_exit(long code)
         * mm_release()->clear_child_tid() from writing to a user-controlled
         * kernel address.
         */
-       set_fs(USER_DS);
 
        ptrace_event(PTRACE_EVENT_EXIT, code);
 
@@ -729,6 +734,9 @@ void do_exit(long code)
        tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
 
+       gr_acl_handle_psacct(tsk, code);
+       gr_acl_handle_exit();
+
        exit_mm(tsk);
 
        if (group_dead)
@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
  * Take down every thread in the group.  This is called by fatal signals
  * as well as by sys_exit_group (below).
  */
-void
+__noreturn void
 do_group_exit(int exit_code)
 {
        struct signal_struct *sig = current->signal;
index 4dc2ddade9f1f288aaa08d8a665f1fded2ba88cc..651add0e904e8cb84e7aeae4c195383aff56ab92 100644 (file)
@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
 void thread_info_cache_init(void)
 {
        thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
-                                             THREAD_SIZE, 0, NULL);
+                                             THREAD_SIZE, SLAB_USERCOPY, NULL);
        BUG_ON(thread_info_cache == NULL);
 }
 # endif
 #endif
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
+                                                 int node, void **lowmem_stack)
+{
+       struct page *pages[THREAD_SIZE / PAGE_SIZE];
+       void *ret = NULL;
+       unsigned int i;
+
+       *lowmem_stack = alloc_thread_info_node(tsk, node);
+       if (*lowmem_stack == NULL)
+               goto out;
+
+       for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
+               pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
+       
+       /* use VM_IOREMAP to gain THREAD_SIZE alignment */
+       ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
+       if (ret == NULL) {
+               free_thread_info(*lowmem_stack);
+               *lowmem_stack = NULL;
+       }
+
+out:
+       return ret;
+}
+
+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
+{
+       unmap_process_stacks(tsk);
+}
+#else
+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
+                                                 int node, void **lowmem_stack)
+{
+       return alloc_thread_info_node(tsk, node);
+}
+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
+{
+       free_thread_info(ti);
+}
+#endif
+
 /* SLAB cache for signal_struct structures (tsk->signal) */
 static struct kmem_cache *signal_cachep;
 
@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
 {
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
+#else
        struct zone *zone = page_zone(virt_to_page(ti));
+#endif
 
        mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 }
 
 void free_task(struct task_struct *tsk)
 {
-       account_kernel_stack(tsk->stack, -1);
+       account_kernel_stack(tsk, tsk->stack, -1);
        arch_release_thread_info(tsk->stack);
-       free_thread_info(tsk->stack);
+       gr_free_thread_info(tsk, tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 {
        struct task_struct *tsk;
        struct thread_info *ti;
+       void *lowmem_stack;
        int node = tsk_fork_get_node(orig);
        int err;
 
@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        if (!tsk)
                return NULL;
 
-       ti = alloc_thread_info_node(tsk, node);
+       ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
        if (!ti)
                goto free_tsk;
 
@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto free_ti;
 
        tsk->stack = ti;
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+       tsk->lowmem_stack = lowmem_stack;
+#endif
 #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        set_task_stack_end_magic(tsk);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-       tsk->stack_canary = get_random_int();
+       tsk->stack_canary = pax_get_random_long();
 #endif
 
        /*
@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->splice_pipe = NULL;
        tsk->task_frag.page = NULL;
 
-       account_kernel_stack(ti, 1);
+       account_kernel_stack(tsk, ti, 1);
 
        return tsk;
 
 free_ti:
-       free_thread_info(ti);
+       gr_free_thread_info(tsk, ti);
 free_tsk:
        free_task_struct(tsk);
        return NULL;
 }
 
 #ifdef CONFIG_MMU
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
+{
+       struct vm_area_struct *tmp;
+       unsigned long charge;
+       struct file *file;
+       int retval;
+
+       charge = 0;
+       if (mpnt->vm_flags & VM_ACCOUNT) {
+               unsigned long len = vma_pages(mpnt);
+
+               if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+                       goto fail_nomem;
+               charge = len;
+       }
+       tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       if (!tmp)
+               goto fail_nomem;
+       *tmp = *mpnt;
+       tmp->vm_mm = mm;
+       INIT_LIST_HEAD(&tmp->anon_vma_chain);
+       retval = vma_dup_policy(mpnt, tmp);
+       if (retval)
+               goto fail_nomem_policy;
+       if (anon_vma_fork(tmp, mpnt))
+               goto fail_nomem_anon_vma_fork;
+       tmp->vm_flags &= ~VM_LOCKED;
+       tmp->vm_next = tmp->vm_prev = NULL;
+       tmp->vm_mirror = NULL;
+       file = tmp->vm_file;
+       if (file) {
+               struct inode *inode = file_inode(file);
+               struct address_space *mapping = file->f_mapping;
+
+               get_file(file);
+               if (tmp->vm_flags & VM_DENYWRITE)
+                       atomic_dec(&inode->i_writecount);
+               i_mmap_lock_write(mapping);
+               if (tmp->vm_flags & VM_SHARED)
+                       atomic_inc(&mapping->i_mmap_writable);
+               flush_dcache_mmap_lock(mapping);
+               /* insert tmp into the share list, just after mpnt */
+               if (unlikely(tmp->vm_flags & VM_NONLINEAR))
+                       vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
+               else
+                       vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
+               flush_dcache_mmap_unlock(mapping);
+               i_mmap_unlock_write(mapping);
+       }
+
+       /*
+        * Clear hugetlb-related page reserves for children. This only
+        * affects MAP_PRIVATE mappings. Faults generated by the child
+        * are not guaranteed to succeed, even if read-only
+        */
+       if (is_vm_hugetlb_page(tmp))
+               reset_vma_resv_huge_pages(tmp);
+
+       return tmp;
+
+fail_nomem_anon_vma_fork:
+       mpol_put(vma_policy(tmp));
+fail_nomem_policy:
+       kmem_cache_free(vm_area_cachep, tmp);
+fail_nomem:
+       vm_unacct_memory(charge);
+       return NULL;
+}
+
+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
        struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
        int retval;
-       unsigned long charge;
 
        uprobe_start_dup_mmap();
        down_write(&oldmm->mmap_sem);
@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 
        prev = NULL;
        for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
-               struct file *file;
-
                if (mpnt->vm_flags & VM_DONTCOPY) {
                        vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
                                                        -vma_pages(mpnt));
                        continue;
                }
-               charge = 0;
-               if (mpnt->vm_flags & VM_ACCOUNT) {
-                       unsigned long len = vma_pages(mpnt);
-
-                       if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
-                               goto fail_nomem;
-                       charge = len;
-               }
-               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-               if (!tmp)
-                       goto fail_nomem;
-               *tmp = *mpnt;
-               INIT_LIST_HEAD(&tmp->anon_vma_chain);
-               retval = vma_dup_policy(mpnt, tmp);
-               if (retval)
-                       goto fail_nomem_policy;
-               tmp->vm_mm = mm;
-               if (anon_vma_fork(tmp, mpnt))
-                       goto fail_nomem_anon_vma_fork;
-               tmp->vm_flags &= ~VM_LOCKED;
-               tmp->vm_next = tmp->vm_prev = NULL;
-               file = tmp->vm_file;
-               if (file) {
-                       struct inode *inode = file_inode(file);
-                       struct address_space *mapping = file->f_mapping;
-
-                       get_file(file);
-                       if (tmp->vm_flags & VM_DENYWRITE)
-                               atomic_dec(&inode->i_writecount);
-                       i_mmap_lock_write(mapping);
-                       if (tmp->vm_flags & VM_SHARED)
-                               atomic_inc(&mapping->i_mmap_writable);
-                       flush_dcache_mmap_lock(mapping);
-                       /* insert tmp into the share list, just after mpnt */
-                       if (unlikely(tmp->vm_flags & VM_NONLINEAR))
-                               vma_nonlinear_insert(tmp,
-                                               &mapping->i_mmap_nonlinear);
-                       else
-                               vma_interval_tree_insert_after(tmp, mpnt,
-                                                       &mapping->i_mmap);
-                       flush_dcache_mmap_unlock(mapping);
-                       i_mmap_unlock_write(mapping);
+               tmp = dup_vma(mm, oldmm, mpnt);
+               if (!tmp) {
+                       retval = -ENOMEM;
+                       goto out;
                }
 
                /*
@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (retval)
                        goto out;
        }
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
+               struct vm_area_struct *mpnt_m;
+
+               for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
+                       BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
+
+                       if (!mpnt->vm_mirror)
+                               continue;
+
+                       if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
+                               BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
+                               mpnt->vm_mirror = mpnt_m;
+                       } else {
+                               BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
+                               mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
+                               mpnt_m->vm_mirror->vm_mirror = mpnt_m;
+                               mpnt->vm_mirror->vm_mirror = mpnt;
+                       }
+               }
+               BUG_ON(mpnt_m);
+       }
+#endif
+
        /* a new mm has just been created */
        arch_dup_mmap(oldmm, mm);
        retval = 0;
@@ -486,14 +589,6 @@ out:
        up_write(&oldmm->mmap_sem);
        uprobe_end_dup_mmap();
        return retval;
-fail_nomem_anon_vma_fork:
-       mpol_put(vma_policy(tmp));
-fail_nomem_policy:
-       kmem_cache_free(vm_area_cachep, tmp);
-fail_nomem:
-       retval = -ENOMEM;
-       vm_unacct_memory(charge);
-       goto out;
 }
 
 static inline int mm_alloc_pgd(struct mm_struct *mm)
@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
                return ERR_PTR(err);
 
        mm = get_task_mm(task);
-       if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, mode)) {
+       if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
+                 (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
                mmput(mm);
                mm = ERR_PTR(-EACCES);
        }
@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
                        spin_unlock(&fs->lock);
                        return -EAGAIN;
                }
-               fs->users++;
+               atomic_inc(&fs->users);
                spin_unlock(&fs->lock);
                return 0;
        }
        tsk->fs = copy_fs_struct(fs);
        if (!tsk->fs)
                return -ENOMEM;
+       /* Carry through gr_chroot_dentry and is_chrooted instead
+          of recomputing it here.  Already copied when the task struct
+          is duplicated.  This allows pivot_root to not be treated as
+          a chroot
+       */
+       //gr_set_chroot_entries(tsk, &tsk->fs->root);
+
        return 0;
 }
 
@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
  * parts of the process environment (as per the clone
  * flags). The actual kick-off is left to the caller.
  */
-static struct task_struct *copy_process(unsigned long clone_flags,
+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
                                        unsigned long stack_start,
                                        unsigned long stack_size,
                                        int __user *child_tidptr,
@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
        retval = -EAGAIN;
+
+       gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
+
        if (atomic_read(&p->real_cred->user->processes) >=
                        task_rlimit(p, RLIMIT_NPROC)) {
                if (p->real_cred->user != INIT_USER &&
@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_free_pid;
        }
 
+       /* synchronizes with gr_set_acls()
+          we need to call this past the point of no return for fork()
+       */
+       gr_copy_label(p);
+
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
 bad_fork_free:
        free_task(p);
 fork_out:
+       gr_log_forkfail(retval);
+
        return ERR_PTR(retval);
 }
 
@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
 
        p = copy_process(clone_flags, stack_start, stack_size,
                         child_tidptr, NULL, trace);
+       add_latent_entropy();
        /*
         * Do this prior waking up the new thread - the thread pointer
         * might get invalid after that point, if the thread exits quickly.
@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
                if (clone_flags & CLONE_PARENT_SETTID)
                        put_user(nr, parent_tidptr);
 
+               gr_handle_brute_check();
+
                if (clone_flags & CLONE_VFORK) {
                        p->vfork_done = &vfork;
                        init_completion(&vfork);
@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
-       vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
+       vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
        mmap_init();
        nsproxy_cache_init();
 }
@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
                return 0;
 
        /* don't need lock here; in the worst case we'll do useless copy */
-       if (fs->users == 1)
+       if (atomic_read(&fs->users) == 1)
                return 0;
 
        *new_fsp = copy_fs_struct(fs);
@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                        fs = current->fs;
                        spin_lock(&fs->lock);
                        current->fs = new_fs;
-                       if (--fs->users)
+                       gr_set_chroot_entries(current, &current->fs->root);
+                       if (atomic_dec_return(&fs->users))
                                new_fs = NULL;
                        else
                                new_fs = fs;
index 63678b573d6135201700db85ede47d5111082a9c..512f9afc4cc657e1ca8e60f292b3ecdeb0ff2943 100644 (file)
@@ -201,7 +201,7 @@ struct futex_pi_state {
        atomic_t refcount;
 
        union futex_key key;
-};
+} __randomize_layout;
 
 /**
  * struct futex_q - The hashed futex queue entry, one per waiting task
@@ -235,7 +235,7 @@ struct futex_q {
        struct rt_mutex_waiter *rt_waiter;
        union futex_key *requeue_pi_key;
        u32 bitset;
-};
+} __randomize_layout;
 
 static const struct futex_q futex_q_init = {
        /* list gets initialized in queue_me()*/
@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
        struct page *page, *page_head;
        int err, ro = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
+               return -EFAULT;
+#endif
+
        /*
         * The futex address must be "naturally" aligned.
         */
@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
 
 static int get_futex_value_locked(u32 *dest, u32 __user *from)
 {
-       int ret;
+       unsigned long ret;
 
        pagefault_disable();
        ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
 {
 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
        u32 curval;
+       mm_segment_t oldfs;
 
        /*
         * This will fail and we want it. Some arch implementations do
@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
         * implementation, the non-functional ones will return
         * -ENOSYS.
         */
+       oldfs = get_fs();
+       set_fs(USER_DS);
        if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
                futex_cmpxchg_enabled = 1;
+       set_fs(oldfs);
 #endif
 }
 
index 55c8c9349cfe6db49b9443c7b4aefb378c6f0249..9ba7ad6528438838d90a84f0f688a5595a34307b 100644 (file)
@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
        return 0;
 }
 
-static void __user *futex_uaddr(struct robust_list __user *entry,
+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
                                compat_long_t futex_offset)
 {
        compat_uptr_t base = ptr_to_compat(entry);
index b358a802fd184391ae4926bc02b2983511bc8077..fc2524004ff335637bed54b93e7de05aaf093659 100644 (file)
@@ -114,11 +114,6 @@ void gcov_enable_events(void)
 }
 
 #ifdef CONFIG_MODULES
-static inline int within(void *addr, void *start, unsigned long size)
-{
-       return ((addr >= start) && (addr < start + size));
-}
-
 /* Update list and generate events when modules are unloaded. */
 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
                                void *data)
@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
 
        /* Remove entries located in module from linked list. */
        while ((info = gcov_info_next(info))) {
-               if (within(info, mod->module_core, mod->core_size)) {
+               if (within_module_core_rw((unsigned long)info, mod)) {
                        gcov_info_unlink(prev, info);
                        if (gcov_events_enabled)
                                gcov_event(GCOV_REMOVE, info);
index 80692373abd6e809b973c4be5fcad24ef52edfd7..fe712d0b561bacd53642592f4f4fd8f5ce3f3d5d 100644 (file)
@@ -871,7 +871,7 @@ static int irq_thread(void *data)
 
                action_ret = handler_fn(desc, action);
                if (action_ret == IRQ_HANDLED)
-                       atomic_inc(&desc->threads_handled);
+                       atomic_inc_unchecked(&desc->threads_handled);
 
                wake_threads_waitq(desc);
        }
index e2514b0e439e942e19245558e0fad10918f84371..de3dfe06ca4168e9bc637296de8f64d55fca29b0 100644 (file)
@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
                         * count. We just care about the count being
                         * different than the one we saw before.
                         */
-                       handled = atomic_read(&desc->threads_handled);
+                       handled = atomic_read_unchecked(&desc->threads_handled);
                        handled |= SPURIOUS_DEFERRED;
                        if (handled != desc->threads_handled_last) {
                                action_ret = IRQ_HANDLED;
index 9019f15deab201127065e4d6987677fdcdd63676..9a3c42edd8f8d935496c40bb29139fbbf259e579 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
+#include <linux/mm.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 
        size = (((unsigned long)stop - (unsigned long)start)
                                        / sizeof(struct jump_entry));
+       pax_open_kernel();
        sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+       pax_close_kernel();
 }
 
 static void jump_label_update(struct static_key *key, int enable);
@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
        struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
        struct jump_entry *iter;
 
+       pax_open_kernel();
        for (iter = iter_start; iter < iter_stop; iter++) {
                if (within_module_init(iter->code, mod))
                        iter->code = 0;
        }
+       pax_close_kernel();
 }
 
 static int
index 5c5987f1081964109f96cd6cc52b285b948f0a9e..bc502b0e6663239da28a266a5bdc5b3e20350408 100644 (file)
@@ -11,6 +11,9 @@
  *      Changed the compression method from stem compression to "table lookup"
  *      compression (see scripts/kallsyms.c for a more complete description)
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kallsyms.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
 
 static inline int is_kernel_inittext(unsigned long addr)
 {
+       if (system_state != SYSTEM_BOOTING)
+               return 0;
+
        if (addr >= (unsigned long)_sinittext
            && addr <= (unsigned long)_einittext)
                return 1;
        return 0;
 }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#ifdef CONFIG_MODULES
+static inline int is_module_text(unsigned long addr)
+{
+       if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
+               return 1;
+
+       addr = ktla_ktva(addr);
+       return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
+}
+#else
+static inline int is_module_text(unsigned long addr)
+{
+       return 0;
+}
+#endif
+#endif
+
 static inline int is_kernel_text(unsigned long addr)
 {
        if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
 
 static inline int is_kernel(unsigned long addr)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       if (is_kernel_text(addr) || is_kernel_inittext(addr))
+               return 1;
+
+       if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
+#else
        if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+#endif
+
                return 1;
        return in_gate_area_no_mm(addr);
 }
 
 static int is_ksym_addr(unsigned long addr)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       if (is_module_text(addr))
+               return 0;
+#endif
+
        if (all_var)
                return is_kernel(addr);
 
@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
 
 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
 {
-       iter->name[0] = '\0';
        iter->nameoff = get_symbol_offset(new_pos);
        iter->pos = new_pos;
 }
@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
 {
        struct kallsym_iter *iter = m->private;
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
+               return 0;
+#endif
+
        /* Some debugging symbols have no name.  Ignore them. */
        if (!iter->name[0])
                return 0;
@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
                 */
                type = iter->exported ? toupper(iter->type) :
                                        tolower(iter->type);
+
                seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
                           type, iter->name, iter->module_name);
        } else
index 0aa69ea1d8fdcfa68046aa75b03c4373783a02fa..a7fcafb37e64b5e707030d7f841ce72067b3b50c 100644 (file)
@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
        struct task_struct *task1, *task2;
        int ret;
 
+#ifdef CONFIG_GRKERNSEC
+       return -ENOSYS;
+#endif
+
        rcu_read_lock();
 
        /*
index 9a8a01abbaed16a235c6e98b12f43996e6900cc2..3c35dd64f4b6eff9b4a26d25a1b21daf00b74c5a 100644 (file)
@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
                       compat_ulong_t, flags)
 {
        struct compat_kexec_segment in;
-       struct kexec_segment out, __user *ksegments;
+       struct kexec_segment out;
+       struct kexec_segment __user *ksegments;
        unsigned long i, result;
 
        /* Don't allow clients that don't understand the native
index 2777f40a9c7be84c60da316960a012e982d67c35..6cf5e70df2c9356e6e24b537bd394dd251f3ae5f 100644 (file)
@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
        kfree(info->argv);
 }
 
-static int call_modprobe(char *module_name, int wait)
+static int call_modprobe(char *module_name, char *module_param, int wait)
 {
        struct subprocess_info *info;
        static char *envp[] = {
@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
                NULL
        };
 
-       char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
+       char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
        if (!argv)
                goto out;
 
@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
        argv[1] = "-q";
        argv[2] = "--";
        argv[3] = module_name;  /* check free_modprobe_argv() */
-       argv[4] = NULL;
+       argv[4] = module_param;
+       argv[5] = NULL;
 
        info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
                                         NULL, free_modprobe_argv, NULL);
@@ -122,9 +123,8 @@ out:
  * If module auto-loading support is disabled then this function
  * becomes a no-operation.
  */
-int __request_module(bool wait, const char *fmt, ...)
+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
 {
-       va_list args;
        char module_name[MODULE_NAME_LEN];
        unsigned int max_modprobes;
        int ret;
@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
        if (!modprobe_path[0])
                return 0;
 
-       va_start(args, fmt);
-       ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
-       va_end(args);
+       ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
        if (ret >= MODULE_NAME_LEN)
                return -ENAMETOOLONG;
 
@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
        if (ret)
                return ret;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
+               /* hack to workaround consolekit/udisks stupidity */
+               read_lock(&tasklist_lock);
+               if (!strcmp(current->comm, "mount") &&
+                   current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
+                       read_unlock(&tasklist_lock);
+                       printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
+                       return -EPERM;
+               }
+               read_unlock(&tasklist_lock);
+       }
+#endif
+
        /* If modprobe needs a service that is in a module, we get a recursive
         * loop.  Limit the number of running kmod threads to max_threads/2 or
         * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
 
        trace_module_request(module_name, wait, _RET_IP_);
 
-       ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+       ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
 
        atomic_dec(&kmod_concurrent);
        return ret;
 }
+
+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
+{
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       ret = ____request_module(wait, module_param, fmt, args);
+       va_end(args);
+
+       return ret;
+}
+
+int __request_module(bool wait, const char *fmt, ...)
+{
+       va_list args;
+       int ret;
+
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
+               char module_param[MODULE_NAME_LEN];
+
+               memset(module_param, 0, sizeof(module_param));
+
+               snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
+
+               va_start(args, fmt);
+               ret = ____request_module(wait, module_param, fmt, args);
+               va_end(args);
+
+               return ret;
+       }
+#endif
+
+       va_start(args, fmt);
+       ret = ____request_module(wait, NULL, fmt, args);
+       va_end(args);
+
+       return ret;
+}
+
 EXPORT_SYMBOL(__request_module);
 #endif /* CONFIG_MODULES */
 
 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
+#ifdef CONFIG_GRKERNSEC
+       kfree(info->path);
+       info->path = info->origpath;
+#endif
        if (info->cleanup)
                (*info->cleanup)(info);
        kfree(info);
@@ -232,6 +289,20 @@ static int ____call_usermodehelper(void *data)
         */
        set_user_nice(current, 0);
 
+#ifdef CONFIG_GRKERNSEC
+       /* this is race-free as far as userland is concerned as we copied
+          out the path to be used prior to this point and are now operating
+          on that copy
+       */
+       if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
+            strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
+            strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
+               printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
+               retval = -EPERM;
+               goto out;
+       }
+#endif
+
        retval = -ENOMEM;
        new = prepare_kernel_cred(current);
        if (!new)
@@ -254,8 +325,8 @@ static int ____call_usermodehelper(void *data)
        commit_creds(new);
 
        retval = do_execve(getname_kernel(sub_info->path),
-                          (const char __user *const __user *)sub_info->argv,
-                          (const char __user *const __user *)sub_info->envp);
+                          (const char __user *const __force_user *)sub_info->argv,
+                          (const char __user *const __force_user *)sub_info->envp);
 out:
        sub_info->retval = retval;
        /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
@@ -288,7 +359,7 @@ static int wait_for_helper(void *data)
                 *
                 * Thus the __user pointer cast is valid here.
                 */
-               sys_wait4(pid, (int __user *)&ret, 0, NULL);
+               sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
 
                /*
                 * If ret is 0, either ____call_usermodehelper failed and the
@@ -510,7 +581,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
                goto out;
 
        INIT_WORK(&sub_info->work, __call_usermodehelper);
+#ifdef CONFIG_GRKERNSEC
+       sub_info->origpath = path;
+       sub_info->path = kstrdup(path, gfp_mask);
+#else
        sub_info->path = path;
+#endif
        sub_info->argv = argv;
        sub_info->envp = envp;
 
@@ -612,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper);
 static int proc_cap_handler(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table t;
+       ctl_table_no_const t;
        unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
        kernel_cap_t new_cap;
        int err, i;
index ee619929cf9091059406e8f82df32c50d609c0d9..62142b177a273c3796de061205ce41a871fa5105 100644 (file)
@@ -31,6 +31,9 @@
  *             <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  *             <prasanna@in.ibm.com> added function-return probes.
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kprobes.h>
 #include <linux/hash.h>
 #include <linux/init.h>
@@ -122,12 +125,12 @@ enum kprobe_slot_state {
 
 static void *alloc_insn_page(void)
 {
-       return module_alloc(PAGE_SIZE);
+       return module_alloc_exec(PAGE_SIZE);
 }
 
 static void free_insn_page(void *page)
 {
-       module_memfree(page);
+       module_memfree_exec(page);
 }
 
 struct kprobe_insn_cache kprobe_insn_slots = {
@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
                kprobe_type = "k";
 
        if (sym)
-               seq_printf(pi, "%p  %s  %s+0x%x  %s ",
+               seq_printf(pi, "%pK  %s  %s+0x%x  %s ",
                        p->addr, kprobe_type, sym, offset,
                        (modname ? modname : " "));
        else
-               seq_printf(pi, "%p  %s  %p ",
+               seq_printf(pi, "%pK  %s  %pK ",
                        p->addr, kprobe_type, p->addr);
 
        if (!pp)
index 6683ccef9fffb2de28b6a4d6d01393b080811d6a..daf8999f595a9dab0d093dd57875e9c89524189c 100644 (file)
@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
 {
        if (count+1 > UEVENT_HELPER_PATH_LEN)
                return -ENOENT;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
        memcpy(uevent_helper, buf, count);
        uevent_helper[count] = '\0';
        if (count && uevent_helper[count-1] == '\n')
@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
        return count;
 }
 
-static struct bin_attribute notes_attr = {
+static bin_attribute_no_const notes_attr __read_only = {
        .attr = {
                .name = "notes",
                .mode = S_IRUGO,
index 88d0d4420ad2e3e47e71129d361b153c96a49e18..e9ce0eeb475a76f7da0346e5acb3fcdf3d7bb169 100644 (file)
@@ -599,6 +599,10 @@ static int static_obj(void *obj)
                      end   = (unsigned long) &_end,
                      addr  = (unsigned long) obj;
 
+#ifdef CONFIG_PAX_KERNEXEC
+       start = ktla_ktva(start);
+#endif
+
        /*
         * static variable?
         */
@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        if (!static_obj(lock->key)) {
                debug_locks_off();
                printk("INFO: trying to register non-static key.\n");
+               printk("lock:%pS key:%pS.\n", lock, lock->key);
                printk("the code is fine but needs lockdep annotation.\n");
                printk("turning off the locking correctness validator.\n");
                dump_stack();
@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                if (!class)
                        return 0;
        }
-       atomic_inc((atomic_t *)&class->ops);
+       atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
        if (very_verbose(class)) {
                printk("\nacquire class [%p] %s", class->key, class->name);
                if (class->name_version > 1)
index ef43ac4bafb59b83ab979a680d49d6077749f955..2720dfa1214d04788f49bac0e09d784e70a6b6a2 100644 (file)
@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
                return 0;
        }
 
-       seq_printf(m, "%p", class->key);
+       seq_printf(m, "%pK", class->key);
 #ifdef CONFIG_DEBUG_LOCKDEP
        seq_printf(m, " OPS:%8ld", class->ops);
 #endif
@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
 
        list_for_each_entry(entry, &class->locks_after, entry) {
                if (entry->distance == 1) {
-                       seq_printf(m, " -> [%p] ", entry->class->key);
+                       seq_printf(m, " -> [%pK] ", entry->class->key);
                        print_name(m, entry->class);
                        seq_puts(m, "\n");
                }
@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
                if (!class->key)
                        continue;
 
-               seq_printf(m, "[%p] ", class->key);
+               seq_printf(m, "[%pK] ", class->key);
                print_name(m, class);
                seq_puts(m, "\n");
        }
@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
                if (!i)
                        seq_line(m, '-', 40-namelen, namelen);
 
-               snprintf(ip, sizeof(ip), "[<%p>]",
+               snprintf(ip, sizeof(ip), "[<%pK>]",
                                (void *)class->contention_point[i]);
                seq_printf(m, "%40s %14lu %29s %pS\n",
                           name, stats->contention_point[i],
@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
                if (!i)
                        seq_line(m, '-', 40-namelen, namelen);
 
-               snprintf(ip, sizeof(ip), "[<%p>]",
+               snprintf(ip, sizeof(ip), "[<%pK>]",
                                (void *)class->contending_point[i]);
                seq_printf(m, "%40s %14lu %29s %pS\n",
                           name, stats->contending_point[i],
index 9887a905a7626278d8ccf6da8176c447e2aad988..0cd2b1d4d87b15a115d324530faaa5c2777fd3b3 100644 (file)
@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
 
        prev = decode_cpu(old);
        node->prev = prev;
-       ACCESS_ONCE(prev->next) = node;
+       ACCESS_ONCE_RW(prev->next) = node;
 
        /*
         * Normally @prev is untouchable after the above store; because at that
@@ -172,8 +172,8 @@ unqueue:
         * it will wait in Step-A.
         */
 
-       ACCESS_ONCE(next->prev) = prev;
-       ACCESS_ONCE(prev->next) = next;
+       ACCESS_ONCE_RW(next->prev) = prev;
+       ACCESS_ONCE_RW(prev->next) = next;
 
        return false;
 }
@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        node = this_cpu_ptr(&osq_node);
        next = xchg(&node->next, NULL);
        if (next) {
-               ACCESS_ONCE(next->locked) = 1;
+               ACCESS_ONCE_RW(next->locked) = 1;
                return;
        }
 
        next = osq_wait_next(lock, node, NULL);
        if (next)
-               ACCESS_ONCE(next->locked) = 1;
+               ACCESS_ONCE_RW(next->locked) = 1;
 }
 
 #endif
index 4d60986fcbee74a4fde3906e0d87fc113c5e8172..5d351c1f5a22be26d23fdbddf55dbaa22f5710dd 100644 (file)
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                 */
                return;
        }
-       ACCESS_ONCE(prev->next) = node;
+       ACCESS_ONCE_RW(prev->next) = node;
 
        /* Wait until the lock holder passes the lock down. */
        arch_mcs_spin_lock_contended(&node->locked);
index 3ef3736002d895854794a4d940adcd96288640f2..9c951fade415be1549b568d86275e0327bbdba58 100644 (file)
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
 }
 
 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                           struct thread_info *ti)
+                           struct task_struct *task)
 {
        SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
 
        /* Mark the current thread as blocked on the lock: */
-       ti->task->blocked_on = waiter;
+       task->blocked_on = waiter;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                        struct thread_info *ti)
+                        struct task_struct *task)
 {
        DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-       DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-       DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-       ti->task->blocked_on = NULL;
+       DEBUG_LOCKS_WARN_ON(waiter->task != task);
+       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+       task->blocked_on = NULL;
 
        list_del_init(&waiter->list);
        waiter->task = NULL;
index 0799fd3e4cfacbfdac923c6dedba278dbc36e0f9..d06ae3bb46c5f9f928cf7123464f49e284ab3ecf 100644 (file)
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
                                   struct mutex_waiter *waiter,
-                                  struct thread_info *ti);
+                                  struct task_struct *task);
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct thread_info *ti);
+                               struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
index 454195194d4a133f2d600d0d3659b2ee5306b7ad..39fe90a3bdafabce985a0db10cf459606b650a1b 100644 (file)
@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+       debug_mutex_add_waiter(lock, &waiter, task);
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                schedule_preempt_disabled();
                spin_lock_mutex(&lock->wait_lock, flags);
        }
-       mutex_remove_waiter(lock, &waiter, current_thread_info());
+       mutex_remove_waiter(lock, &waiter, task);
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
@@ -606,7 +606,7 @@ skip_wait:
        return 0;
 
 err:
-       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+       mutex_remove_waiter(lock, &waiter, task);
        spin_unlock_mutex(&lock->wait_lock, flags);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
index 1d96dd0d93c1aa24eaff65f13b9df2cc22945ae2..994ff190d9cb5708902607628e316ffd8e42a48e 100644 (file)
@@ -22,7 +22,7 @@
 #define MAX_RT_TEST_MUTEXES    8
 
 static spinlock_t rttest_lock;
-static atomic_t rttest_event;
+static atomic_unchecked_t rttest_event;
 
 struct test_thread_data {
        int                     opcode;
@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
 
        case RTTEST_LOCKCONT:
                td->mutexes[td->opdata] = 1;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                return 0;
 
        case RTTEST_RESET:
@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                return 0;
 
        case RTTEST_RESETEVENT:
-               atomic_set(&rttest_event, 0);
+               atomic_set_unchecked(&rttest_event, 0);
                return 0;
 
        default:
@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                        return ret;
 
                td->mutexes[id] = 1;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                rt_mutex_lock(&mutexes[id]);
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                td->mutexes[id] = 4;
                return 0;
 
@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                        return ret;
 
                td->mutexes[id] = 1;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                td->mutexes[id] = ret ? 0 : 4;
                return ret ? -EINTR : 0;
 
@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
                if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
                        return ret;
 
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                rt_mutex_unlock(&mutexes[id]);
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                td->mutexes[id] = 0;
                return 0;
 
@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
                        break;
 
                td->mutexes[dat] = 2;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                break;
 
        default:
@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
                        return;
 
                td->mutexes[dat] = 3;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                break;
 
        case RTTEST_LOCKNOWAIT:
@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
                        return;
 
                td->mutexes[dat] = 1;
-               td->event = atomic_add_return(1, &rttest_event);
+               td->event = atomic_add_return_unchecked(1, &rttest_event);
                return;
 
        default:
index d856e96a3cce440f4c9bb0bc5e7fbf2eee4b1afe..b82225c646402e871397ec46ba1f5e5d64da1b86 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
+#include <linux/grsecurity.h>
 #include <uapi/linux/module.h>
 #include "module-internal.h"
 
@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 
 /* Bounds of module allocation, for speeding __module_address.
  * Protected by module_mutex. */
-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
 
 int register_module_notifier(struct notifier_block *nb)
 {
@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
                return true;
 
        list_for_each_entry_rcu(mod, &modules, list) {
-               struct symsearch arr[] = {
+               struct symsearch modarr[] = {
                        { mod->syms, mod->syms + mod->num_syms, mod->crcs,
                          NOT_GPL_ONLY, false },
                        { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
 
-               if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
+               if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
                        return true;
        }
        return false;
@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
        if (!pcpusec->sh_size)
                return 0;
 
-       if (align > PAGE_SIZE) {
+       if (align-1 >= PAGE_SIZE) {
                pr_warn("%s: per-cpu alignment %li > %li\n",
                        mod->name, align, PAGE_SIZE);
                align = PAGE_SIZE;
@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
 static ssize_t show_coresize(struct module_attribute *mattr,
                             struct module_kobject *mk, char *buffer)
 {
-       return sprintf(buffer, "%u\n", mk->mod->core_size);
+       return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
 }
 
 static struct module_attribute modinfo_coresize =
@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
 static ssize_t show_initsize(struct module_attribute *mattr,
                             struct module_kobject *mk, char *buffer)
 {
-       return sprintf(buffer, "%u\n", mk->mod->init_size);
+       return sprintf(buffer, "%u\n", mk->mod->init_size_rx +  mk->mod->init_size_rw);
 }
 
 static struct module_attribute modinfo_initsize =
@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
                goto bad_version;
        }
 
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+       /*
+        * avoid potentially printing jibberish on attempted load
+        * of a module randomized with a different seed
+        */
+       pr_warn("no symbol version for %s\n", symname);
+#else
        pr_warn("%s: no symbol version for %s\n", mod->name, symname);
+#endif
        return 0;
 
 bad_version:
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+       /*
+        * avoid potentially printing jibberish on attempted load
+        * of a module randomized with a different seed
+        */
+       pr_warn("attempted module disagrees about version of symbol %s\n",
+              symname);
+#else
        pr_warn("%s: disagrees about version of symbol %s\n",
               mod->name, symname);
+#endif
        return 0;
 }
 
@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
  */
 #ifdef CONFIG_SYSFS
 
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 static inline bool sect_empty(const Elf_Shdr *sect)
 {
        return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
 {
        unsigned int notes, loaded, i;
        struct module_notes_attrs *notes_attrs;
-       struct bin_attribute *nattr;
+       bin_attribute_no_const *nattr;
 
        /* failed to create section attributes, so can't create notes */
        if (!mod->sect_attrs)
@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
 static int module_add_modinfo_attrs(struct module *mod)
 {
        struct module_attribute *attr;
-       struct module_attribute *temp_attr;
+       module_attribute_no_const *temp_attr;
        int error = 0;
        int i;
 
@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
 
 static void unset_module_core_ro_nx(struct module *mod)
 {
-       set_page_attributes(mod->module_core + mod->core_text_size,
-               mod->module_core + mod->core_size,
+       set_page_attributes(mod->module_core_rw,
+               mod->module_core_rw + mod->core_size_rw,
                set_memory_x);
-       set_page_attributes(mod->module_core,
-               mod->module_core + mod->core_ro_size,
+       set_page_attributes(mod->module_core_rx,
+               mod->module_core_rx + mod->core_size_rx,
                set_memory_rw);
 }
 
 static void unset_module_init_ro_nx(struct module *mod)
 {
-       set_page_attributes(mod->module_init + mod->init_text_size,
-               mod->module_init + mod->init_size,
+       set_page_attributes(mod->module_init_rw,
+               mod->module_init_rw + mod->init_size_rw,
                set_memory_x);
-       set_page_attributes(mod->module_init,
-               mod->module_init + mod->init_ro_size,
+       set_page_attributes(mod->module_init_rx,
+               mod->module_init_rx + mod->init_size_rx,
                set_memory_rw);
 }
 
@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
        list_for_each_entry_rcu(mod, &modules, list) {
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if ((mod->module_core) && (mod->core_text_size)) {
-                       set_page_attributes(mod->module_core,
-                                               mod->module_core + mod->core_text_size,
+               if ((mod->module_core_rx) && (mod->core_size_rx)) {
+                       set_page_attributes(mod->module_core_rx,
+                                               mod->module_core_rx + mod->core_size_rx,
                                                set_memory_rw);
                }
-               if ((mod->module_init) && (mod->init_text_size)) {
-                       set_page_attributes(mod->module_init,
-                                               mod->module_init + mod->init_text_size,
+               if ((mod->module_init_rx) && (mod->init_size_rx)) {
+                       set_page_attributes(mod->module_init_rx,
+                                               mod->module_init_rx + mod->init_size_rx,
                                                set_memory_rw);
                }
        }
@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
        list_for_each_entry_rcu(mod, &modules, list) {
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if ((mod->module_core) && (mod->core_text_size)) {
-                       set_page_attributes(mod->module_core,
-                                               mod->module_core + mod->core_text_size,
+               if ((mod->module_core_rx) && (mod->core_size_rx)) {
+                       set_page_attributes(mod->module_core_rx,
+                                               mod->module_core_rx + mod->core_size_rx,
                                                set_memory_ro);
                }
-               if ((mod->module_init) && (mod->init_text_size)) {
-                       set_page_attributes(mod->module_init,
-                                               mod->module_init + mod->init_text_size,
+               if ((mod->module_init_rx) && (mod->init_size_rx)) {
+                       set_page_attributes(mod->module_init_rx,
+                                               mod->module_init_rx + mod->init_size_rx,
                                                set_memory_ro);
                }
        }
@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
 #else
 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
 static void unset_module_core_ro_nx(struct module *mod) { }
-static void unset_module_init_ro_nx(struct module *mod) { }
+static void unset_module_init_ro_nx(struct module *mod)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+       set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
+       set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
+#endif
+
+}
 #endif
 
 void __weak module_memfree(void *module_region)
@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
        /* This may be NULL, but that's OK */
        unset_module_init_ro_nx(mod);
        module_arch_freeing_init(mod);
-       module_memfree(mod->module_init);
+       module_memfree(mod->module_init_rw);
+       module_memfree_exec(mod->module_init_rx);
        kfree(mod->args);
        percpu_modfree(mod);
 
        /* Free lock-classes: */
-       lockdep_free_key_range(mod->module_core, mod->core_size);
+       lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
+       lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
 
        /* Finally, free the core (containing the module structure) */
        unset_module_core_ro_nx(mod);
-       module_memfree(mod->module_core);
+       module_memfree_exec(mod->module_core_rx);
+       module_memfree(mod->module_core_rw);
 
 #ifdef CONFIG_MPU
        update_protections(current->mm);
@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
        int ret = 0;
        const struct kernel_symbol *ksym;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       int is_fs_load = 0;
+       int register_filesystem_found = 0;
+       char *p;
+
+       p = strstr(mod->args, "grsec_modharden_fs");
+       if (p) {
+               char *endptr = p + sizeof("grsec_modharden_fs") - 1;
+               /* copy \0 as well */
+               memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
+               is_fs_load = 1;
+       }
+#endif
+
        for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
                const char *name = info->strtab + sym[i].st_name;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+               /* it's a real shame this will never get ripped and copied
+                  upstream! ;(
+               */
+               if (is_fs_load && !strcmp(name, "register_filesystem"))
+                       register_filesystem_found = 1;
+#endif
+
                switch (sym[i].st_shndx) {
                case SHN_COMMON:
                        /* Ignore common symbols */
@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
                        ksym = resolve_symbol_wait(mod, info, name);
                        /* Ok if resolved.  */
                        if (ksym && !IS_ERR(ksym)) {
+                               pax_open_kernel();
                                sym[i].st_value = ksym->value;
+                               pax_close_kernel();
                                break;
                        }
 
@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
                                secbase = (unsigned long)mod_percpu(mod);
                        else
                                secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
+                       pax_open_kernel();
                        sym[i].st_value += secbase;
+                       pax_close_kernel();
                        break;
                }
        }
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       if (is_fs_load && !register_filesystem_found) {
+               printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
+               ret = -EPERM;
+       }
+#endif
+
        return ret;
 }
 
@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
                            || s->sh_entsize != ~0UL
                            || strstarts(sname, ".init"))
                                continue;
-                       s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
+                       if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
+                               s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
+                       else
+                               s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
                        pr_debug("\t%s\n", sname);
                }
-               switch (m) {
-               case 0: /* executable */
-                       mod->core_size = debug_align(mod->core_size);
-                       mod->core_text_size = mod->core_size;
-                       break;
-               case 1: /* RO: text and ro-data */
-                       mod->core_size = debug_align(mod->core_size);
-                       mod->core_ro_size = mod->core_size;
-                       break;
-               case 3: /* whole core */
-                       mod->core_size = debug_align(mod->core_size);
-                       break;
-               }
        }
 
        pr_debug("Init section allocation order:\n");
@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
                            || s->sh_entsize != ~0UL
                            || !strstarts(sname, ".init"))
                                continue;
-                       s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
-                                        | INIT_OFFSET_MASK);
+                       if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
+                               s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
+                       else
+                               s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
+                       s->sh_entsize |= INIT_OFFSET_MASK;
                        pr_debug("\t%s\n", sname);
                }
-               switch (m) {
-               case 0: /* executable */
-                       mod->init_size = debug_align(mod->init_size);
-                       mod->init_text_size = mod->init_size;
-                       break;
-               case 1: /* RO: text and ro-data */
-                       mod->init_size = debug_align(mod->init_size);
-                       mod->init_ro_size = mod->init_size;
-                       break;
-               case 3: /* whole init */
-                       mod->init_size = debug_align(mod->init_size);
-                       break;
-               }
        }
 }
 
@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
 
        /* Put symbol section at end of init part of module. */
        symsect->sh_flags |= SHF_ALLOC;
-       symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
+       symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
                                         info->index.sym) | INIT_OFFSET_MASK;
        pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
 
@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        }
 
        /* Append room for core symbols at end of core part. */
-       info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
-       info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
-       mod->core_size += strtab_size;
+       info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
+       info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
+       mod->core_size_rx += strtab_size;
 
        /* Put string table section at end of init part of module. */
        strsect->sh_flags |= SHF_ALLOC;
-       strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
+       strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
                                         info->index.str) | INIT_OFFSET_MASK;
        pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
 }
@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
        /* Make sure we get permanent strtab: don't use info->strtab. */
        mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
+       pax_open_kernel();
+
        /* Set types up while we still have access to sections. */
        for (i = 0; i < mod->num_symtab; i++)
                mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
 
-       mod->core_symtab = dst = mod->module_core + info->symoffs;
-       mod->core_strtab = s = mod->module_core + info->stroffs;
+       mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
+       mod->core_strtab = s = mod->module_core_rx + info->stroffs;
        src = mod->symtab;
        for (ndst = i = 0; i < mod->num_symtab; i++) {
                if (i == 0 ||
@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
                }
        }
        mod->core_num_syms = ndst;
+
+       pax_close_kernel();
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
        return vmalloc_exec(size);
 }
 
-static void *module_alloc_update_bounds(unsigned long size)
+static void *module_alloc_update_bounds_rw(unsigned long size)
 {
        void *ret = module_alloc(size);
 
        if (ret) {
                mutex_lock(&module_mutex);
                /* Update module bounds. */
-               if ((unsigned long)ret < module_addr_min)
-                       module_addr_min = (unsigned long)ret;
-               if ((unsigned long)ret + size > module_addr_max)
-                       module_addr_max = (unsigned long)ret + size;
+               if ((unsigned long)ret < module_addr_min_rw)
+                       module_addr_min_rw = (unsigned long)ret;
+               if ((unsigned long)ret + size > module_addr_max_rw)
+                       module_addr_max_rw = (unsigned long)ret + size;
+               mutex_unlock(&module_mutex);
+       }
+       return ret;
+}
+
+static void *module_alloc_update_bounds_rx(unsigned long size)
+{
+       void *ret = module_alloc_exec(size);
+
+       if (ret) {
+               mutex_lock(&module_mutex);
+               /* Update module bounds. */
+               if ((unsigned long)ret < module_addr_min_rx)
+                       module_addr_min_rx = (unsigned long)ret;
+               if ((unsigned long)ret + size > module_addr_max_rx)
+                       module_addr_max_rx = (unsigned long)ret + size;
                mutex_unlock(&module_mutex);
        }
        return ret;
@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
        mod = (void *)info->sechdrs[info->index.mod].sh_addr;
 
        if (info->index.sym == 0) {
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+               /*
+                * avoid potentially printing jibberish on attempted load
+                * of a module randomized with a different seed
+                */
+               pr_warn("module has no symbols (stripped?)\n");
+#else
                pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
+#endif
                return ERR_PTR(-ENOEXEC);
        }
 
@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
 {
        const char *modmagic = get_modinfo(info, "vermagic");
+       const char *license = get_modinfo(info, "license");
        int err;
 
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+       if (!license || !license_is_gpl_compatible(license))
+               return -ENOEXEC;
+#endif
+
        if (flags & MODULE_INIT_IGNORE_VERMAGIC)
                modmagic = NULL;
 
@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
        }
 
        /* Set up license info based on the info section */
-       set_license(mod, get_modinfo(info, "license"));
+       set_license(mod, license);
 
        return 0;
 }
@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
        void *ptr;
 
        /* Do the allocs. */
-       ptr = module_alloc_update_bounds(mod->core_size);
+       ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
        /*
         * The pointer to this block is stored in the module structure
         * which is inside the block. Just mark it as not being a
@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
        if (!ptr)
                return -ENOMEM;
 
-       memset(ptr, 0, mod->core_size);
-       mod->module_core = ptr;
+       memset(ptr, 0, mod->core_size_rw);
+       mod->module_core_rw = ptr;
 
-       if (mod->init_size) {
-               ptr = module_alloc_update_bounds(mod->init_size);
+       if (mod->init_size_rw) {
+               ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
                /*
                 * The pointer to this block is stored in the module structure
                 * which is inside the block. This block doesn't need to be
@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
                 */
                kmemleak_ignore(ptr);
                if (!ptr) {
-                       module_memfree(mod->module_core);
+                       module_memfree(mod->module_core_rw);
+                       return -ENOMEM;
+               }
+               memset(ptr, 0, mod->init_size_rw);
+               mod->module_init_rw = ptr;
+       } else
+               mod->module_init_rw = NULL;
+
+       ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
+       kmemleak_not_leak(ptr);
+       if (!ptr) {
+               if (mod->module_init_rw)
+                       module_memfree(mod->module_init_rw);
+               module_memfree(mod->module_core_rw);
+               return -ENOMEM;
+       }
+
+       pax_open_kernel();
+       memset(ptr, 0, mod->core_size_rx);
+       pax_close_kernel();
+       mod->module_core_rx = ptr;
+
+       if (mod->init_size_rx) {
+               ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
+               kmemleak_ignore(ptr);
+               if (!ptr && mod->init_size_rx) {
+                       module_memfree_exec(mod->module_core_rx);
+                       if (mod->module_init_rw)
+                               module_memfree(mod->module_init_rw);
+                       module_memfree(mod->module_core_rw);
                        return -ENOMEM;
                }
-               memset(ptr, 0, mod->init_size);
-               mod->module_init = ptr;
+
+               pax_open_kernel();
+               memset(ptr, 0, mod->init_size_rx);
+               pax_close_kernel();
+               mod->module_init_rx = ptr;
        } else
-               mod->module_init = NULL;
+               mod->module_init_rx = NULL;
 
        /* Transfer each section which specifies SHF_ALLOC */
        pr_debug("final section addresses:\n");
@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
                if (!(shdr->sh_flags & SHF_ALLOC))
                        continue;
 
-               if (shdr->sh_entsize & INIT_OFFSET_MASK)
-                       dest = mod->module_init
-                               + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
-               else
-                       dest = mod->module_core + shdr->sh_entsize;
+               if (shdr->sh_entsize & INIT_OFFSET_MASK) {
+                       if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
+                               dest = mod->module_init_rw
+                                       + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+                       else
+                               dest = mod->module_init_rx
+                                       + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+               } else {
+                       if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
+                               dest = mod->module_core_rw + shdr->sh_entsize;
+                       else
+                               dest = mod->module_core_rx + shdr->sh_entsize;
+               }
+
+               if (shdr->sh_type != SHT_NOBITS) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_64
+                       if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
+                               set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
+#endif
+                       if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
+                               pax_open_kernel();
+                               memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
+                               pax_close_kernel();
+                       } else
+#endif
 
-               if (shdr->sh_type != SHT_NOBITS)
                        memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
+               }
                /* Update sh_addr to point to copy in image. */
-               shdr->sh_addr = (unsigned long)dest;
+
+#ifdef CONFIG_PAX_KERNEXEC
+               if (shdr->sh_flags & SHF_EXECINSTR)
+                       shdr->sh_addr = ktva_ktla((unsigned long)dest);
+               else
+#endif
+
+                       shdr->sh_addr = (unsigned long)dest;
                pr_debug("\t0x%lx %s\n",
                         (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
        }
@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
         * Do it before processing of module parameters, so the module
         * can provide parameter accessor functions of its own.
         */
-       if (mod->module_init)
-               flush_icache_range((unsigned long)mod->module_init,
-                                  (unsigned long)mod->module_init
-                                  + mod->init_size);
-       flush_icache_range((unsigned long)mod->module_core,
-                          (unsigned long)mod->module_core + mod->core_size);
+       if (mod->module_init_rx)
+               flush_icache_range((unsigned long)mod->module_init_rx,
+                                  (unsigned long)mod->module_init_rx
+                                  + mod->init_size_rx);
+       flush_icache_range((unsigned long)mod->module_core_rx,
+                          (unsigned long)mod->module_core_rx + mod->core_size_rx);
 
        set_fs(old_fs);
 }
@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
 {
        percpu_modfree(mod);
        module_arch_freeing_init(mod);
-       module_memfree(mod->module_init);
-       module_memfree(mod->module_core);
+       module_memfree_exec(mod->module_init_rx);
+       module_memfree_exec(mod->module_core_rx);
+       module_memfree(mod->module_init_rw);
+       module_memfree(mod->module_core_rw);
 }
 
 int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
 static int post_relocation(struct module *mod, const struct load_info *info)
 {
        /* Sort exception table now relocations are done. */
+       pax_open_kernel();
        sort_extable(mod->extable, mod->extable + mod->num_exentries);
+       pax_close_kernel();
 
        /* Copy relocated percpu area over. */
        percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
 /* For freeing module_init on success, in case kallsyms traversing */
 struct mod_initfree {
        struct rcu_head rcu;
-       void *module_init;
+       void *module_init_rw;
+       void *module_init_rx;
 };
 
 static void do_free_init(struct rcu_head *head)
 {
        struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
-       module_memfree(m->module_init);
+       module_memfree(m->module_init_rw);
+       module_memfree_exec(m->module_init_rx);
        kfree(m);
 }
 
@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
                ret = -ENOMEM;
                goto fail;
        }
-       freeinit->module_init = mod->module_init;
+       freeinit->module_init_rw = mod->module_init_rw;
+       freeinit->module_init_rx = mod->module_init_rx;
 
        /*
         * We want to find out whether @mod uses async during init.  Clear
@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
 #endif
        unset_module_init_ro_nx(mod);
        module_arch_freeing_init(mod);
-       mod->module_init = NULL;
-       mod->init_size = 0;
-       mod->init_ro_size = 0;
-       mod->init_text_size = 0;
+       mod->module_init_rw = NULL;
+       mod->module_init_rx = NULL;
+       mod->init_size_rw = 0;
+       mod->init_size_rx = 0;
        /*
         * We want to free module_init, but be aware that kallsyms may be
         * walking this with preempt disabled.  In all the failure paths,
@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
        module_bug_finalize(info->hdr, info->sechdrs, mod);
 
        /* Set RO and NX regions for core */
-       set_section_ro_nx(mod->module_core,
-                               mod->core_text_size,
-                               mod->core_ro_size,
-                               mod->core_size);
+       set_section_ro_nx(mod->module_core_rx,
+                               mod->core_size_rx,
+                               mod->core_size_rx,
+                               mod->core_size_rx);
 
        /* Set RO and NX regions for init */
-       set_section_ro_nx(mod->module_init,
-                               mod->init_text_size,
-                               mod->init_ro_size,
-                               mod->init_size);
+       set_section_ro_nx(mod->module_init_rx,
+                               mod->init_size_rx,
+                               mod->init_size_rx,
+                               mod->init_size_rx);
 
        /* Mark state as coming so strong_try_module_get() ignores us,
         * but kallsyms etc. can see us. */
@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
        if (err)
                goto free_unload;
 
+       /* Now copy in args */
+       mod->args = strndup_user(uargs, ~0UL >> 1);
+       if (IS_ERR(mod->args)) {
+               err = PTR_ERR(mod->args);
+               goto free_unload;
+       }
+
        /* Set up MODINFO_ATTR fields */
        setup_modinfo(mod, info);
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+       {
+               char *p, *p2;
+
+               if (strstr(mod->args, "grsec_modharden_netdev")) {
+                       printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
+                       err = -EPERM;
+                       goto free_modinfo;
+               } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
+                       p += sizeof("grsec_modharden_normal") - 1;
+                       p2 = strstr(p, "_");
+                       if (p2) {
+                               *p2 = '\0';
+                               printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
+                               *p2 = '_';
+                       }
+                       err = -EPERM;
+                       goto free_modinfo;
+               }
+       }
+#endif
+
        /* Fix up syms, so that st_value is a pointer to location. */
        err = simplify_symbols(mod, info);
        if (err < 0)
@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        flush_module_icache(mod);
 
-       /* Now copy in args */
-       mod->args = strndup_user(uargs, ~0UL >> 1);
-       if (IS_ERR(mod->args)) {
-               err = PTR_ERR(mod->args);
-               goto free_arch_cleanup;
-       }
-
        dynamic_debug_setup(info->debug, info->num_debug);
 
        /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
  ddebug_cleanup:
        dynamic_debug_remove(info->debug);
        synchronize_sched();
-       kfree(mod->args);
- free_arch_cleanup:
        module_arch_cleanup(mod);
  free_modinfo:
        free_modinfo(mod);
+       kfree(mod->args);
  free_unload:
        module_unload_free(mod);
  unlink_mod:
@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
        unsigned long nextval;
 
        /* At worse, next value is at end of module */
-       if (within_module_init(addr, mod))
-               nextval = (unsigned long)mod->module_init+mod->init_text_size;
+       if (within_module_init_rx(addr, mod))
+               nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
+       else if (within_module_init_rw(addr, mod))
+               nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
+       else if (within_module_core_rx(addr, mod))
+               nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
+       else if (within_module_core_rw(addr, mod))
+               nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
        else
-               nextval = (unsigned long)mod->module_core+mod->core_text_size;
+               return NULL;
 
        /* Scan for closest preceding symbol, and next symbol. (ELF
           starts real symbols at 1). */
@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
                return 0;
 
        seq_printf(m, "%s %u",
-                  mod->name, mod->init_size + mod->core_size);
+                  mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
        print_unload_info(m, mod);
 
        /* Informative for users. */
@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
                   mod->state == MODULE_STATE_COMING ? "Loading" :
                   "Live");
        /* Used by oprofile and other similar tools. */
-       seq_printf(m, " 0x%pK", mod->module_core);
+       seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
 
        /* Taints info */
        if (mod->taints)
@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
 
 static int __init proc_modules_init(void)
 {
+#ifndef CONFIG_GRKERNSEC_HIDESYM
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
+#else
        proc_create("modules", 0, NULL, &proc_modules_operations);
+#endif
+#else
+       proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
+#endif
        return 0;
 }
 module_init(proc_modules_init);
@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
 {
        struct module *mod;
 
-       if (addr < module_addr_min || addr > module_addr_max)
+       if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
+           (addr < module_addr_min_rw || addr > module_addr_max_rw))
                return NULL;
 
        list_for_each_entry_rcu(mod, &modules, list) {
@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
  */
 struct module *__module_text_address(unsigned long addr)
 {
-       struct module *mod = __module_address(addr);
+       struct module *mod;
+
+#ifdef CONFIG_X86_32
+       addr = ktla_ktva(addr);
+#endif
+
+       if (addr < module_addr_min_rx || addr > module_addr_max_rx)
+               return NULL;
+
+       mod = __module_address(addr);
+
        if (mod) {
                /* Make sure it's within the text section. */
-               if (!within(addr, mod->module_init, mod->init_text_size)
-                   && !within(addr, mod->module_core, mod->core_text_size))
+               if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
                        mod = NULL;
        }
        return mod;
index 4803da6eab62f182354707c10f48be35a8b54fb5..1c5eea68ca0505a77ac3178c294fc0d2cf0d510a 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/rcupdate.h>
 #include <linux/vmalloc.h>
 #include <linux/reboot.h>
+#include <linux/mm.h>
 
 /*
  *     Notifier list for kernel code which wants to be called
@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
        while ((*nl) != NULL) {
                if (n->priority > (*nl)->priority)
                        break;
-               nl = &((*nl)->next);
+               nl = (struct notifier_block **)&((*nl)->next);
        }
-       n->next = *nl;
+       pax_open_kernel();
+       *(const void **)&n->next = *nl;
        rcu_assign_pointer(*nl, n);
+       pax_close_kernel();
        return 0;
 }
 
@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
                        return 0;
                if (n->priority > (*nl)->priority)
                        break;
-               nl = &((*nl)->next);
+               nl = (struct notifier_block **)&((*nl)->next);
        }
-       n->next = *nl;
+       pax_open_kernel();
+       *(const void **)&n->next = *nl;
        rcu_assign_pointer(*nl, n);
+       pax_close_kernel();
        return 0;
 }
 
@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
 {
        while ((*nl) != NULL) {
                if ((*nl) == n) {
+                       pax_open_kernel();
                        rcu_assign_pointer(*nl, n->next);
+                       pax_close_kernel();
                        return 0;
                }
-               nl = &((*nl)->next);
+               nl = (struct notifier_block **)&((*nl)->next);
        }
        return -ENOENT;
 }
index 161402f0b517c9f483f166585bd61d61bac2e3f3..598814c252338a9e358eb82068c9ba9a9012b355 100644 (file)
@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
         * seq_nr mod. number of cpus in use.
         */
 
-       seq_nr = atomic_inc_return(&pd->seq_nr);
+       seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
        cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
 
        return padata_index_to_cpu(pd, cpu_index);
@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
        padata_init_pqueues(pd);
        padata_init_squeues(pd);
        setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
-       atomic_set(&pd->seq_nr, -1);
+       atomic_set_unchecked(&pd->seq_nr, -1);
        atomic_set(&pd->reorder_objects, 0);
        atomic_set(&pd->refcnt, 0);
        pd->pinst = pinst;
index 4d8d6f906decede78600ef0767770dd713f8b225..97b9b9cf1a58189e83007c3a731db8b3d554c664 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
 /*
  * Stop ourself in panic -- architecture code may override this
  */
-void __weak panic_smp_self_stop(void)
+void __weak __noreturn panic_smp_self_stop(void)
 {
        while (1)
                cpu_relax();
@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
        disable_trace_on_warning();
 
        pr_warn("------------[ cut here ]------------\n");
-       pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
+       pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
                raw_smp_processor_id(), current->pid, file, line, caller);
 
        if (args)
@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
  */
 __visible void __stack_chk_fail(void)
 {
-       panic("stack-protector: Kernel stack is corrupted in: %p\n",
+       dump_stack();
+       panic("stack-protector: Kernel stack is corrupted in: %pA\n",
                __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__stack_chk_fail);
index cd36a5e0d173062dfbdd557fa7ad5234350af2ed..11f185d3b72c4658df4a09ce537e1eebbc59caaa 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/rculist.h>
 #include <linux/bootmem.h>
 #include <linux/hash.h>
+#include <linux/security.h>
 #include <linux/pid_namespace.h>
 #include <linux/init_task.h>
 #include <linux/syscalls.h>
@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
 
 int pid_max = PID_MAX_DEFAULT;
 
-#define RESERVED_PIDS          300
+#define RESERVED_PIDS          500
 
 int pid_max_min = RESERVED_PIDS + 1;
 int pid_max_max = PID_MAX_LIMIT;
@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
+       struct task_struct *task;
+
        rcu_lockdep_assert(rcu_read_lock_held(),
                           "find_task_by_pid_ns() needs rcu_read_lock()"
                           " protection");
-       return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
+
+       task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
+
+       if (gr_pid_is_chrooted(task))
+               return NULL;
+
+       return task;
 }
 
 struct task_struct *find_task_by_vpid(pid_t vnr)
@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
        return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 }
 
+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
+{
+       rcu_lockdep_assert(rcu_read_lock_held(),
+                          "find_task_by_pid_ns() needs rcu_read_lock()"
+                          " protection");
+       return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
+}
+
 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 {
        struct pid *pid;
index a65ba137fd15b42ba840ca1e46243ef44fa6839c..f600dbb3945af0cc268351c57115b1295df448bb 100644 (file)
@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(current);
-       struct ctl_table tmp = *table;
+       ctl_table_no_const tmp = *table;
 
        if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
                return -EPERM;
index 48b28d387c7f77b2e3d36bc751b3e221c7634d67..c63ccaf0db7f41411a59c28c1d4ebc57c6ef8993 100644 (file)
@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
 config HIBERNATION
        bool "Hibernation (aka 'suspend to disk')"
        depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+       depends on !GRKERNSEC_KMEM
+       depends on !PAX_MEMORY_SANITIZE
        select HIBERNATE_CALLBACKS
        select LZO_COMPRESS
        select LZO_DECOMPRESS
index 5a6ec8678b9a0916922882589b9d32163aaefb3f..3a8c884acad5304841a627c5a196f3c81d2271ee 100644 (file)
@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
        unsigned int elapsed_msecs;
        bool wakeup = false;
        int sleep_usecs = USEC_PER_MSEC;
+       bool timedout = false;
 
        do_gettimeofday(&start);
 
@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
 
        while (true) {
                todo = 0;
+               if (time_after(jiffies, end_time))
+                       timedout = true;
                read_lock(&tasklist_lock);
                for_each_process_thread(g, p) {
                        if (p == current || !freeze_task(p))
                                continue;
 
-                       if (!freezer_should_skip(p))
+                       if (!freezer_should_skip(p)) {
                                todo++;
+                               if (timedout) {
+                                       printk(KERN_ERR "Task refusing to freeze:\n");
+                                       sched_show_task(p);
+                               }
+                       }
                }
                read_unlock(&tasklist_lock);
 
@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
                        todo += wq_busy;
                }
 
-               if (!todo || time_after(jiffies, end_time))
+               if (!todo || timedout)
                        break;
 
                if (pm_wakeup_pending()) {
index cbd69d842341175e4c6db7c68a4b6e3689f5c928..2ca4a8b5fe57960c584d74e7dabf76b22bc71613 100644 (file)
@@ -3,7 +3,7 @@
 
 struct console_cmdline
 {
-       char    name[8];                        /* Name of the driver       */
+       char    name[16];                       /* Name of the driver       */
        int     index;                          /* Minor dev. to use        */
        char    *options;                       /* Options for the driver   */
 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
index fae29e3ffbf027bfee7fbc6346b53bf76f8fce11..7df1786be9652b75d29a2795889cb14a80bd255a 100644 (file)
@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
        if (from_file && type != SYSLOG_ACTION_OPEN)
                return 0;
 
+#ifdef CONFIG_GRKERNSEC_DMESG
+       if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
+               return -EPERM;
+#endif
+
        if (syslog_action_restricted(type)) {
                if (capable(CAP_SYSLOG))
                        return 0;
@@ -2464,6 +2469,7 @@ void register_console(struct console *newcon)
        for (i = 0, c = console_cmdline;
             i < MAX_CMDLINECONSOLES && c->name[0];
             i++, c++) {
+               BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
                if (strcmp(c->name, newcon->name) != 0)
                        continue;
                if (newcon->index >= 0 &&
index 54bf5ba264205e11a4cbe6d46ee6330193c0fa8f..df6e0a216b1004b965138f8b768746c587c9207b 100644 (file)
@@ -37,7 +37,7 @@ struct profile_hit {
 #define NR_PROFILE_HIT         (PAGE_SIZE/sizeof(struct profile_hit))
 #define NR_PROFILE_GRP         (NR_PROFILE_HIT/PROFILE_GRPSZ)
 
-static atomic_t *prof_buffer;
+static atomic_unchecked_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
 
 int prof_on __read_mostly;
@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
                                        hits[i].pc = 0;
                                continue;
                        }
-                       atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+                       atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
                        hits[i].hits = hits[i].pc = 0;
                }
        }
@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
         * Add the current hit(s) and flush the write-queue out
         * to the global buffer:
         */
-       atomic_add(nr_hits, &prof_buffer[pc]);
+       atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
        for (i = 0; i < NR_PROFILE_HIT; ++i) {
-               atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+               atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
                hits[i].pc = hits[i].hits = 0;
        }
 out:
@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
        unsigned long pc;
        pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-       atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+       atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
 
@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                        return -EFAULT;
                buf++; p++; count--; read++;
        }
-       pnt = (char *)prof_buffer + p - sizeof(atomic_t);
+       pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
        if (copy_to_user(buf, (void *)pnt, count))
                return -EFAULT;
        read += count;
@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
        }
 #endif
        profile_discard_flip_buffers();
-       memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
+       memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
        return count;
 }
 
index 1eb9d90c3af926b3217a44b8355a10733a73bd7e..d40d21e75d4f7fa2f7ecbe64c8beaff5bfc62c9a 100644 (file)
@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
        if (seize)
                flags |= PT_SEIZED;
        rcu_read_lock();
-       if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
+       if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
                flags |= PT_PTRACE_CAP;
        rcu_read_unlock();
        task->ptrace = flags;
@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
                                break;
                        return -EIO;
                }
-               if (copy_to_user(dst, buf, retval))
+               if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
                        return -EFAULT;
                copied += retval;
                src += retval;
@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
        bool seized = child->ptrace & PT_SEIZED;
        int ret = -EIO;
        siginfo_t siginfo, *si;
-       void __user *datavp = (void __user *) data;
+       void __user *datavp = (__force void __user *) data;
        unsigned long __user *datalp = datavp;
        unsigned long flags;
 
@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
                goto out;
        }
 
+       if (gr_handle_ptrace(child, request)) {
+               ret = -EPERM;
+               goto out_put_task_struct;
+       }
+
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
                /*
                 * Some architectures need to do book-keeping after
                 * a ptrace attach.
                 */
-               if (!ret)
+               if (!ret) {
                        arch_ptrace_attach(child);
+                       gr_audit_ptrace(child);
+               }
                goto out_put_task_struct;
        }
 
@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
        copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
        if (copied != sizeof(tmp))
                return -EIO;
-       return put_user(tmp, (unsigned long __user *)data);
+       return put_user(tmp, (__force unsigned long __user *)data);
 }
 
 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 }
 
 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
-                      compat_long_t, addr, compat_long_t, data)
+                      compat_ulong_t, addr, compat_ulong_t, data)
 {
        struct task_struct *child;
        long ret;
@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
                goto out;
        }
 
+       if (gr_handle_ptrace(child, request)) {
+               ret = -EPERM;
+               goto out_put_task_struct;
+       }
+
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
                /*
                 * Some architectures need to do book-keeping after
                 * a ptrace attach.
                 */
-               if (!ret)
+               if (!ret) {
                        arch_ptrace_attach(child);
+                       gr_audit_ptrace(child);
+               }
                goto out_put_task_struct;
        }
 
index 4d559baf06e0c7171a7a86acbeed9926fc8c9d7d..053da379def456df330d33805ff64b2bdf5129fc 100644 (file)
@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
                      rcu_torture_count) = { 0 };
 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
                      rcu_torture_batch) = { 0 };
-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
-static atomic_t n_rcu_torture_alloc;
-static atomic_t n_rcu_torture_alloc_fail;
-static atomic_t n_rcu_torture_free;
-static atomic_t n_rcu_torture_mberror;
-static atomic_t n_rcu_torture_error;
+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+static atomic_unchecked_t n_rcu_torture_alloc;
+static atomic_unchecked_t n_rcu_torture_alloc_fail;
+static atomic_unchecked_t n_rcu_torture_free;
+static atomic_unchecked_t n_rcu_torture_mberror;
+static atomic_unchecked_t n_rcu_torture_error;
 static long n_rcu_torture_barrier_error;
 static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
 static long n_rcu_torture_timers;
 static long n_barrier_attempts;
 static long n_barrier_successes;
-static atomic_long_t n_cbfloods;
+static atomic_long_unchecked_t n_cbfloods;
 static struct list_head rcu_torture_removed;
 
 static int rcu_torture_writer_state;
@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
 
        spin_lock_bh(&rcu_torture_lock);
        if (list_empty(&rcu_torture_freelist)) {
-               atomic_inc(&n_rcu_torture_alloc_fail);
+               atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
                spin_unlock_bh(&rcu_torture_lock);
                return NULL;
        }
-       atomic_inc(&n_rcu_torture_alloc);
+       atomic_inc_unchecked(&n_rcu_torture_alloc);
        p = rcu_torture_freelist.next;
        list_del_init(p);
        spin_unlock_bh(&rcu_torture_lock);
@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
 static void
 rcu_torture_free(struct rcu_torture *p)
 {
-       atomic_inc(&n_rcu_torture_free);
+       atomic_inc_unchecked(&n_rcu_torture_free);
        spin_lock_bh(&rcu_torture_lock);
        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
        spin_unlock_bh(&rcu_torture_lock);
@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
        i = rp->rtort_pipe_count;
        if (i > RCU_TORTURE_PIPE_LEN)
                i = RCU_TORTURE_PIPE_LEN;
-       atomic_inc(&rcu_torture_wcount[i]);
+       atomic_inc_unchecked(&rcu_torture_wcount[i]);
        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
                rp->rtort_mbtest = 0;
                return true;
@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
        VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
        do {
                schedule_timeout_interruptible(cbflood_inter_holdoff);
-               atomic_long_inc(&n_cbfloods);
+               atomic_long_inc_unchecked(&n_cbfloods);
                WARN_ON(signal_pending(current));
                for (i = 0; i < cbflood_n_burst; i++) {
                        for (j = 0; j < cbflood_n_per_burst; j++) {
@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
                        i = old_rp->rtort_pipe_count;
                        if (i > RCU_TORTURE_PIPE_LEN)
                                i = RCU_TORTURE_PIPE_LEN;
-                       atomic_inc(&rcu_torture_wcount[i]);
+                       atomic_inc_unchecked(&rcu_torture_wcount[i]);
                        old_rp->rtort_pipe_count++;
                        switch (synctype[torture_random(&rand) % nsynctypes]) {
                        case RTWS_DEF_FREE:
@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
                return;
        }
        if (p->rtort_mbtest == 0)
-               atomic_inc(&n_rcu_torture_mberror);
+               atomic_inc_unchecked(&n_rcu_torture_mberror);
        spin_lock(&rand_lock);
        cur_ops->read_delay(&rand);
        n_rcu_torture_timers++;
@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
                        continue;
                }
                if (p->rtort_mbtest == 0)
-                       atomic_inc(&n_rcu_torture_mberror);
+                       atomic_inc_unchecked(&n_rcu_torture_mberror);
                cur_ops->read_delay(&rand);
                preempt_disable();
                pipe_count = p->rtort_pipe_count;
@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
                rcu_torture_current,
                rcu_torture_current_version,
                list_empty(&rcu_torture_freelist),
-               atomic_read(&n_rcu_torture_alloc),
-               atomic_read(&n_rcu_torture_alloc_fail),
-               atomic_read(&n_rcu_torture_free));
+               atomic_read_unchecked(&n_rcu_torture_alloc),
+               atomic_read_unchecked(&n_rcu_torture_alloc_fail),
+               atomic_read_unchecked(&n_rcu_torture_free));
        pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
-               atomic_read(&n_rcu_torture_mberror),
+               atomic_read_unchecked(&n_rcu_torture_mberror),
                n_rcu_torture_boost_ktrerror,
                n_rcu_torture_boost_rterror);
        pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
                n_barrier_successes,
                n_barrier_attempts,
                n_rcu_torture_barrier_error);
-       pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
+       pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
 
        pr_alert("%s%s ", torture_type, TORTURE_FLAG);
-       if (atomic_read(&n_rcu_torture_mberror) != 0 ||
+       if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
            n_rcu_torture_barrier_error != 0 ||
            n_rcu_torture_boost_ktrerror != 0 ||
            n_rcu_torture_boost_rterror != 0 ||
            n_rcu_torture_boost_failure != 0 ||
            i > 1) {
                pr_cont("%s", "!!! ");
-               atomic_inc(&n_rcu_torture_error);
+               atomic_inc_unchecked(&n_rcu_torture_error);
                WARN_ON_ONCE(1);
        }
        pr_cont("Reader Pipe: ");
@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
        pr_alert("%s%s ", torture_type, TORTURE_FLAG);
        pr_cont("Free-Block Circulation: ");
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
-               pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
+               pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
        }
        pr_cont("\n");
 
@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
 
        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
 
-       if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
+       if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
                rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
        else if (torture_onoff_failures())
                rcu_torture_print_module_parms(cur_ops,
@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
 
        rcu_torture_current = NULL;
        rcu_torture_current_version = 0;
-       atomic_set(&n_rcu_torture_alloc, 0);
-       atomic_set(&n_rcu_torture_alloc_fail, 0);
-       atomic_set(&n_rcu_torture_free, 0);
-       atomic_set(&n_rcu_torture_mberror, 0);
-       atomic_set(&n_rcu_torture_error, 0);
+       atomic_set_unchecked(&n_rcu_torture_alloc, 0);
+       atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
+       atomic_set_unchecked(&n_rcu_torture_free, 0);
+       atomic_set_unchecked(&n_rcu_torture_mberror, 0);
+       atomic_set_unchecked(&n_rcu_torture_error, 0);
        n_rcu_torture_barrier_error = 0;
        n_rcu_torture_boost_ktrerror = 0;
        n_rcu_torture_boost_rterror = 0;
        n_rcu_torture_boost_failure = 0;
        n_rcu_torture_boosts = 0;
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
-               atomic_set(&rcu_torture_wcount[i], 0);
+               atomic_set_unchecked(&rcu_torture_wcount[i], 0);
        for_each_possible_cpu(cpu) {
                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
                        per_cpu(rcu_torture_count, cpu)[i] = 0;
index 0db5649f88179958d7ab26f982036a70c388a817..e6ec167b4c7d204be6b649d6b73e45e46e195b48 100644 (file)
@@ -42,7 +42,7 @@
 /* Forward declarations for tiny_plugin.h. */
 struct rcu_ctrlblk;
 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
-static void rcu_process_callbacks(struct softirq_action *unused);
+static void rcu_process_callbacks(void);
 static void __call_rcu(struct rcu_head *head,
                       void (*func)(struct rcu_head *rcu),
                       struct rcu_ctrlblk *rcp);
@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
                                      false));
 }
 
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
 {
        __rcu_process_callbacks(&rcu_sched_ctrlblk);
        __rcu_process_callbacks(&rcu_bh_ctrlblk);
index 858c5656912724c64e1ea20d5497edc9e8946b7d..7efd9157421aa52cc547d36e764ec94f69909e65 100644 (file)
@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
                dump_stack();
        }
        if (*rcp->curtail && ULONG_CMP_GE(j, js))
-               ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
+               ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
                        3 * rcu_jiffies_till_stall_check() + 3;
        else if (ULONG_CMP_GE(j, js))
-               ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+               ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
 }
 
 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
 {
        rcp->ticks_this_gp = 0;
        rcp->gp_start = jiffies;
-       ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+       ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
 }
 
 static void check_cpu_stalls(void)
index 7680fc2750361a91fc8f749a006555899aa92677..b8e91615018ebb950939b5f754e4d1f7de1b7d37 100644 (file)
@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
                 */
                rdtp = this_cpu_ptr(&rcu_dynticks);
                smp_mb__before_atomic(); /* Earlier stuff before QS. */
-               atomic_add(2, &rdtp->dynticks);  /* QS. */
+               atomic_add_unchecked(2, &rdtp->dynticks);  /* QS. */
                smp_mb__after_atomic(); /* Later stuff after QS. */
                break;
        }
@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
        rcu_prepare_for_idle();
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
        smp_mb__before_atomic();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
+       atomic_inc_unchecked(&rdtp->dynticks);
        smp_mb__after_atomic();  /* Force ordering with next sojourn. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
        rcu_dynticks_task_enter();
 
        /*
@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
 
        rcu_dynticks_task_exit();
        smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
-       atomic_inc(&rdtp->dynticks);
+       atomic_inc_unchecked(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
        smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
        rcu_cleanup_after_idle();
        trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
        if (!user && !is_idle_task(current)) {
@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
        if (rdtp->dynticks_nmi_nesting == 0 &&
-           (atomic_read(&rdtp->dynticks) & 0x1))
+           (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
                return;
        rdtp->dynticks_nmi_nesting++;
        smp_mb__before_atomic();  /* Force delay from prior write. */
-       atomic_inc(&rdtp->dynticks);
+       atomic_inc_unchecked(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
        smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
 }
 
 /**
@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
                return;
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
        smp_mb__before_atomic();  /* See above. */
-       atomic_inc(&rdtp->dynticks);
+       atomic_inc_unchecked(&rdtp->dynticks);
        smp_mb__after_atomic();  /* Force delay to next write. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
 }
 
 /**
@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
  */
 bool notrace __rcu_is_watching(void)
 {
-       return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+       return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
 }
 
 /**
@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 static int dyntick_save_progress_counter(struct rcu_data *rdp,
                                         bool *isidle, unsigned long *maxj)
 {
-       rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+       rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
        rcu_sysidle_check_cpu(rdp, isidle, maxj);
        if ((rdp->dynticks_snap & 0x1) == 0) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        int *rcrmp;
        unsigned int snap;
 
-       curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
+       curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
        snap = (unsigned int)rdp->dynticks_snap;
 
        /*
@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                         rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
                if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
-                       ACCESS_ONCE(rdp->cond_resched_completed) =
+                       ACCESS_ONCE_RW(rdp->cond_resched_completed) =
                                ACCESS_ONCE(rdp->mynode->completed);
                        smp_mb(); /* ->cond_resched_completed before *rcrmp. */
-                       ACCESS_ONCE(*rcrmp) =
+                       ACCESS_ONCE_RW(*rcrmp) =
                                ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
                        resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
                        rdp->rsp->jiffies_resched += 5; /* Enable beating. */
@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        rsp->gp_start = j;
        smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
+       ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
        rsp->jiffies_resched = j + j1 / 2;
 }
 
@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
-       ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
+               ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
                                     3 * rcu_jiffies_till_stall_check() + 3;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp)
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
+               ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
 }
 
 /*
@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
                raw_spin_unlock_irq(&rnp->lock);
                return 0;
        }
-       ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
+       ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
 
        if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
                /*
@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
                rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
+               ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
                WARN_ON_ONCE(rnp->completed != rsp->completed);
-               ACCESS_ONCE(rnp->completed) = rsp->completed;
+               ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
        if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
-               ACCESS_ONCE(rsp->gp_flags) =
+               ACCESS_ONCE_RW(rsp->gp_flags) =
                        ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
                raw_spin_unlock_irq(&rnp->lock);
        }
@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
-               ACCESS_ONCE(rnp->completed) = rsp->gpnum;
+               ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_nocb_gp_set(rnp, nocb);
 
        /* Declare grace period done. */
-       ACCESS_ONCE(rsp->completed) = rsp->gpnum;
+       ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
        rsp->fqs_state = RCU_GP_IDLE;
        rdp = this_cpu_ptr(rsp->rda);
        /* Advance CBs to reduce false positives below. */
        needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
        if (needgp || cpu_needs_another_gp(rsp, rdp)) {
-               ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
+               ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
                trace_rcu_grace_period(rsp->name,
                                       ACCESS_ONCE(rsp->gpnum),
                                       TPS("newreq"));
@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                 */
                return false;
        }
-       ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
+       ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
        trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
                               TPS("newreq"));
 
@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                rsp->qlen += rdp->qlen;
                rdp->n_cbs_orphaned += rdp->qlen;
                rdp->qlen_lazy = 0;
-               ACCESS_ONCE(rdp->qlen) = 0;
+               ACCESS_ONCE_RW(rdp->qlen) = 0;
        }
 
        /*
@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        }
        smp_mb(); /* List handling before counting for rcu_barrier(). */
        rdp->qlen_lazy -= count_lazy;
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
+       ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
        rdp->n_cbs_invoked += count;
 
        /* Reinstate batch limit if we have worked down the excess. */
@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
                return;  /* Someone beat us to it. */
        }
-       ACCESS_ONCE(rsp->gp_flags) =
+       ACCESS_ONCE_RW(rsp->gp_flags) =
                ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
        raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
        rcu_gp_kthread_wake(rsp);
@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 /*
  * Do RCU core processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static void rcu_process_callbacks(void)
 {
        struct rcu_state *rsp;
 
@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
        if (debug_rcu_head_queue(head)) {
                /* Probable double call_rcu(), so leak the callback. */
-               ACCESS_ONCE(head->func) = rcu_leak_callback;
+               ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
                WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
                return;
        }
@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
                local_irq_restore(flags);
                return;
        }
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
+       ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
        if (lazy)
                rdp->qlen_lazy++;
        else
@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
         * counter wrap on a 32-bit system.  Quite a few more CPUs would of
         * course be required on a 64-bit system.
         */
-       if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
+       if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
                         (ulong)atomic_long_read(&rsp->expedited_done) +
                         ULONG_MAX / 8)) {
                synchronize_sched();
-               atomic_long_inc(&rsp->expedited_wrap);
+               atomic_long_inc_unchecked(&rsp->expedited_wrap);
                return;
        }
 
@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
         * Take a ticket.  Note that atomic_inc_return() implies a
         * full memory barrier.
         */
-       snap = atomic_long_inc_return(&rsp->expedited_start);
+       snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
        firstsnap = snap;
        if (!try_get_online_cpus()) {
                /* CPU hotplug operation in flight, fall back to normal GP. */
                wait_rcu_gp(call_rcu_sched);
-               atomic_long_inc(&rsp->expedited_normal);
+               atomic_long_inc_unchecked(&rsp->expedited_normal);
                return;
        }
        WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
                for_each_cpu(cpu, cm) {
                        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
-                       if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+                       if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
                                cpumask_clear_cpu(cpu, cm);
                }
                if (cpumask_weight(cm) == 0)
@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
                             synchronize_sched_expedited_cpu_stop,
                             NULL) == -EAGAIN) {
                put_online_cpus();
-               atomic_long_inc(&rsp->expedited_tryfail);
+               atomic_long_inc_unchecked(&rsp->expedited_tryfail);
 
                /* Check to see if someone else did our work for us. */
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
-                       atomic_long_inc(&rsp->expedited_workdone1);
+                       atomic_long_inc_unchecked(&rsp->expedited_workdone1);
                        free_cpumask_var(cm);
                        return;
                }
@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
                        udelay(trycount * num_online_cpus());
                } else {
                        wait_rcu_gp(call_rcu_sched);
-                       atomic_long_inc(&rsp->expedited_normal);
+                       atomic_long_inc_unchecked(&rsp->expedited_normal);
                        free_cpumask_var(cm);
                        return;
                }
@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
-                       atomic_long_inc(&rsp->expedited_workdone2);
+                       atomic_long_inc_unchecked(&rsp->expedited_workdone2);
                        free_cpumask_var(cm);
                        return;
                }
@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
                if (!try_get_online_cpus()) {
                        /* CPU hotplug operation in flight, use normal GP. */
                        wait_rcu_gp(call_rcu_sched);
-                       atomic_long_inc(&rsp->expedited_normal);
+                       atomic_long_inc_unchecked(&rsp->expedited_normal);
                        free_cpumask_var(cm);
                        return;
                }
-               snap = atomic_long_read(&rsp->expedited_start);
+               snap = atomic_long_read_unchecked(&rsp->expedited_start);
                smp_mb(); /* ensure read is before try_stop_cpus(). */
        }
-       atomic_long_inc(&rsp->expedited_stoppedcpus);
+       atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
 
 all_cpus_idle:
        free_cpumask_var(cm);
@@ -3072,16 +3072,16 @@ all_cpus_idle:
         * than we did already did their update.
         */
        do {
-               atomic_long_inc(&rsp->expedited_done_tries);
+               atomic_long_inc_unchecked(&rsp->expedited_done_tries);
                s = atomic_long_read(&rsp->expedited_done);
                if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
-                       atomic_long_inc(&rsp->expedited_done_lost);
+                       atomic_long_inc_unchecked(&rsp->expedited_done_lost);
                        break;
                }
        } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
-       atomic_long_inc(&rsp->expedited_done_exit);
+       atomic_long_inc_unchecked(&rsp->expedited_done_exit);
 
        put_online_cpus();
 }
@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * ACCESS_ONCE() to prevent the compiler from speculating
         * the increment to precede the early-exit check.
         */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
        _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /* Increment ->n_barrier_done to prevent duplicate work. */
        smp_mb(); /* Keep increment after above mechanism. */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
        _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
        smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
        init_callback_list(rdp);
        rdp->qlen_lazy = 0;
-       ACCESS_ONCE(rdp->qlen) = 0;
+       ACCESS_ONCE_RW(rdp->qlen) = 0;
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
-       WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
+       WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
        rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
        rcu_sysidle_init_percpu_data(rdp->dynticks);
-       atomic_set(&rdp->dynticks->dynticks,
-                  (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+       atomic_set_unchecked(&rdp->dynticks->dynticks,
+                  (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
        raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
 
        /* Add CPU to rcu_node bitmasks. */
index 8e7b1843896ebcc0fe13ed51da7cea68f14f0a36..9c55768d1b7a746568c7f40022ea219ccd30d267 100644 (file)
@@ -87,11 +87,11 @@ struct rcu_dynticks {
        long long dynticks_nesting; /* Track irq/process nesting level. */
                                    /* Process level is worth LLONG_MAX/2. */
        int dynticks_nmi_nesting;   /* Track NMI nesting level. */
-       atomic_t dynticks;          /* Even value for idle, else odd. */
+       atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
        long long dynticks_idle_nesting;
                                    /* irq/process nesting level from idle. */
-       atomic_t dynticks_idle;     /* Even value for idle, else odd. */
+       atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
                                    /*  "Idle" excludes userspace execution. */
        unsigned long dynticks_idle_jiffies;
                                    /* End of last non-NMI non-idle period. */
@@ -466,17 +466,17 @@ struct rcu_state {
                                                /*  _rcu_barrier(). */
        /* End of fields guarded by barrier_mutex. */
 
-       atomic_long_t expedited_start;          /* Starting ticket. */
-       atomic_long_t expedited_done;           /* Done ticket. */
-       atomic_long_t expedited_wrap;           /* # near-wrap incidents. */
-       atomic_long_t expedited_tryfail;        /* # acquisition failures. */
-       atomic_long_t expedited_workdone1;      /* # done by others #1. */
-       atomic_long_t expedited_workdone2;      /* # done by others #2. */
-       atomic_long_t expedited_normal;         /* # fallbacks to normal. */
-       atomic_long_t expedited_stoppedcpus;    /* # successful stop_cpus. */
-       atomic_long_t expedited_done_tries;     /* # tries to update _done. */
-       atomic_long_t expedited_done_lost;      /* # times beaten to _done. */
-       atomic_long_t expedited_done_exit;      /* # times exited _done loop. */
+       atomic_long_unchecked_t expedited_start;        /* Starting ticket. */
+       atomic_long_t expedited_done;                   /* Done ticket. */
+       atomic_long_unchecked_t expedited_wrap;         /* # near-wrap incidents. */
+       atomic_long_unchecked_t expedited_tryfail;      /* # acquisition failures. */
+       atomic_long_unchecked_t expedited_workdone1;    /* # done by others #1. */
+       atomic_long_unchecked_t expedited_workdone2;    /* # done by others #2. */
+       atomic_long_unchecked_t expedited_normal;       /* # fallbacks to normal. */
+       atomic_long_unchecked_t expedited_stoppedcpus;  /* # successful stop_cpus. */
+       atomic_long_unchecked_t expedited_done_tries;   /* # tries to update _done. */
+       atomic_long_unchecked_t expedited_done_lost;    /* # times beaten to _done. */
+       atomic_long_unchecked_t expedited_done_exit;    /* # times exited _done loop. */
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
                                                /*  force_quiescent_state(). */
index 3ec85cb5d544b8588fd574a80e19bd564079533f..36879253dab30be47c2c525bbd9bc5a54b9c1c92 100644 (file)
@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
        return !rcu_preempted_readers_exp(rnp) &&
-              ACCESS_ONCE(rnp->expmask) == 0;
+              ACCESS_ONCE_RW(rnp->expmask) == 0;
 }
 
 /*
@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
 
        /* Clean up and exit. */
        smp_mb(); /* ensure expedited GP seen before counter increment. */
-       ACCESS_ONCE(sync_rcu_preempt_exp_count) =
+       ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
                                        sync_rcu_preempt_exp_count + 1;
 unlock_mb_ret:
        mutex_unlock(&sync_rcu_preempt_exp_mutex);
@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        free_cpumask_var(cm);
 }
 
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
        .store                  = &rcu_cpu_kthread_task,
        .thread_should_run      = rcu_cpu_kthread_should_run,
        .thread_fn              = rcu_cpu_kthread,
@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
        pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
               cpu, ticks_value, ticks_title,
-              atomic_read(&rdtp->dynticks) & 0xfff,
+              atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
               fast_no_hz);
@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
                return;
        if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
                /* Prior smp_mb__after_atomic() orders against prior enqueue. */
-               ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
+               ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
                wake_up(&rdp_leader->nocb_wq);
        }
 }
@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 
        /* Enqueue the callback on the nocb list and update counts. */
        old_rhpp = xchg(&rdp->nocb_tail, rhtp);
-       ACCESS_ONCE(*old_rhpp) = rhp;
+       ACCESS_ONCE_RW(*old_rhpp) = rhp;
        atomic_long_add(rhcount, &rdp->nocb_q_count);
        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
@@ -2286,7 +2286,7 @@ wait_again:
                        continue;  /* No CBs here, try next follower. */
 
                /* Move callbacks to wait-for-GP list, which is empty. */
-               ACCESS_ONCE(rdp->nocb_head) = NULL;
+               ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
                rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
                rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
                rdp->nocb_gp_count_lazy =
@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
                list = ACCESS_ONCE(rdp->nocb_follower_head);
                BUG_ON(!list);
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
-               ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
+               ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
                tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
                c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
                cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
                        list = next;
                }
                trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
-               ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
-               ACCESS_ONCE(rdp->nocb_p_count_lazy) =
+               ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
+               ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
                                                rdp->nocb_p_count_lazy - cl;
                rdp->n_nocbs_invoked += c;
        }
@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
        if (!rcu_nocb_need_deferred_wakeup(rdp))
                return;
        ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
-       ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
+       ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
        wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
 }
@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
        t = kthread_run(rcu_nocb_kthread, rdp_spawn,
                        "rcuo%c/%d", rsp->abbr, cpu);
        BUG_ON(IS_ERR(t));
-       ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
+       ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
 }
 
 /*
@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
 
        /* Record start of fully idle period. */
        j = jiffies;
-       ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
+       ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
        smp_mb__before_atomic();
-       atomic_inc(&rdtp->dynticks_idle);
+       atomic_inc_unchecked(&rdtp->dynticks_idle);
        smp_mb__after_atomic();
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
+       WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
 }
 
 /*
@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
 
        /* Record end of idle period. */
        smp_mb__before_atomic();
-       atomic_inc(&rdtp->dynticks_idle);
+       atomic_inc_unchecked(&rdtp->dynticks_idle);
        smp_mb__after_atomic();
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
+       WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
 
        /*
         * If we are the timekeeping CPU, we are permitted to be non-idle
@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
                WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
 
        /* Pick up current idle and NMI-nesting counter and check. */
-       cur = atomic_read(&rdtp->dynticks_idle);
+       cur = atomic_read_unchecked(&rdtp->dynticks_idle);
        if (cur & 0x1) {
                *isidle = false; /* We are not idle! */
                return;
@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
        case RCU_SYSIDLE_NOT:
 
                /* First time all are idle, so note a short idle period. */
-               ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
+               ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
                break;
 
        case RCU_SYSIDLE_SHORT:
@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
 {
        smp_mb();
        if (full_sysidle_state > RCU_SYSIDLE_SHORT)
-               ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
+               ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
 }
 
 /*
@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
        smp_mb();  /* grace period precedes setting inuse. */
 
        rshp = container_of(rhp, struct rcu_sysidle_head, rh);
-       ACCESS_ONCE(rshp->inuse) = 0;
+       ACCESS_ONCE_RW(rshp->inuse) = 0;
 }
 
 /*
@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
 static void rcu_dynticks_task_enter(void)
 {
 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
-       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
+       ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
 }
 
@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
 static void rcu_dynticks_task_exit(void)
 {
 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
-       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
+       ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
 }
index 5cdc62e1beeb635a36ee87098a7f38110a651382..cc52e884ff00e67c5b115b47941f4b3a37928d05 100644 (file)
@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
                   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
                   rdp->passed_quiesce, rdp->qs_pending);
        seq_printf(m, " dt=%d/%llx/%d df=%lu",
-                  atomic_read(&rdp->dynticks->dynticks),
+                  atomic_read_unchecked(&rdp->dynticks->dynticks),
                   rdp->dynticks->dynticks_nesting,
                   rdp->dynticks->dynticks_nmi_nesting,
                   rdp->dynticks_fqs);
@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
        struct rcu_state *rsp = (struct rcu_state *)m->private;
 
        seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
-                  atomic_long_read(&rsp->expedited_start),
+                  atomic_long_read_unchecked(&rsp->expedited_start),
                   atomic_long_read(&rsp->expedited_done),
-                  atomic_long_read(&rsp->expedited_wrap),
-                  atomic_long_read(&rsp->expedited_tryfail),
-                  atomic_long_read(&rsp->expedited_workdone1),
-                  atomic_long_read(&rsp->expedited_workdone2),
-                  atomic_long_read(&rsp->expedited_normal),
-                  atomic_long_read(&rsp->expedited_stoppedcpus),
-                  atomic_long_read(&rsp->expedited_done_tries),
-                  atomic_long_read(&rsp->expedited_done_lost),
-                  atomic_long_read(&rsp->expedited_done_exit));
+                  atomic_long_read_unchecked(&rsp->expedited_wrap),
+                  atomic_long_read_unchecked(&rsp->expedited_tryfail),
+                  atomic_long_read_unchecked(&rsp->expedited_workdone1),
+                  atomic_long_read_unchecked(&rsp->expedited_workdone2),
+                  atomic_long_read_unchecked(&rsp->expedited_normal),
+                  atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
+                  atomic_long_read_unchecked(&rsp->expedited_done_tries),
+                  atomic_long_read_unchecked(&rsp->expedited_done_lost),
+                  atomic_long_read_unchecked(&rsp->expedited_done_exit));
        return 0;
 }
 
index e0d31a345ee6574fa5d7c949a8165f8374a9909f..f4dafe3c89a3ab0961799125f88ec2e83dace76a 100644 (file)
@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
         * for CONFIG_RCU_CPU_STALL_TIMEOUT.
         */
        if (till_stall_check < 3) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
+               ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
                till_stall_check = 3;
        } else if (till_stall_check > 300) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
+               ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
                till_stall_check = 300;
        }
        return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
            !ACCESS_ONCE(t->on_rq) ||
            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
             !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
-               ACCESS_ONCE(t->rcu_tasks_holdout) = false;
+               ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
                list_del_init(&t->rcu_tasks_holdout_list);
                put_task_struct(t);
                return;
@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                            !is_idle_task(t)) {
                                get_task_struct(t);
                                t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
-                               ACCESS_ONCE(t->rcu_tasks_holdout) = true;
+                               ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
                                list_add(&t->rcu_tasks_holdout_list,
                                         &rcu_tasks_holdouts);
                        }
@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
        t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
        BUG_ON(IS_ERR(t));
        smp_mb(); /* Ensure others see full kthread. */
-       ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
+       ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
        mutex_unlock(&rcu_tasks_kthread_mutex);
 }
 
index 0bcebffc4e77d5f45571e38ddc5dcc38f40594f4..e7cd5b2f6c69f799a6e26c514b202e2de710b7b5 100644 (file)
@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
 
 static int __init ioresources_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+       proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
+       proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+       proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
+       proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
+#endif
+#else
        proc_create("ioports", 0, NULL, &proc_ioports_operations);
        proc_create("iomem", 0, NULL, &proc_iomem_operations);
+#endif
        return 0;
 }
 __initcall(ioresources_init);
index eae160dd669d9d8d58bb911595c6391b2732edb4..c9aa22ece82ec88d15bdb35341b687546fc47fea 100644 (file)
@@ -11,7 +11,7 @@
 
 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
 static struct autogroup autogroup_default;
-static atomic_t autogroup_seq_nr;
+static atomic_unchecked_t autogroup_seq_nr;
 
 void __init autogroup_init(struct task_struct *init_task)
 {
@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
 
        kref_init(&ag->kref);
        init_rwsem(&ag->lock);
-       ag->id = atomic_inc_return(&autogroup_seq_nr);
+       ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
        ag->tg = tg;
 #ifdef CONFIG_RT_GROUP_SCHED
        /*
index 607f852b4d04ab3b70379bf42528aa40e5f588c7..486bc8768ee3d8a3c356c9ea7225f4be8ceca773 100644 (file)
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
  * or number of jiffies left till timeout) if completed.
  */
-long __sched
+long __sched __intentional_overflow(-1)
 wait_for_completion_interruptible_timeout(struct completion *x,
                                          unsigned long timeout)
 {
@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  *
  * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
-int __sched wait_for_completion_killable(struct completion *x)
+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
 {
        long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
        if (t == -ERESTARTSYS)
@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
  * or number of jiffies left till timeout) if completed.
  */
-long __sched
+long __sched __intentional_overflow(-1)
 wait_for_completion_killable_timeout(struct completion *x,
                                     unsigned long timeout)
 {
index 44dfc8b46bd08806ebdd6d1904e4e8e837bd9206..56d160d6fe88908ae9ac054d5de9b8e197ca761b 100644 (file)
@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
 int sysctl_numa_balancing(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table t;
+       ctl_table_no_const t;
        int err;
        int state = numabalancing_enabled;
 
@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
-       } else
+       } else {
                switch_mm(oldmm, mm, next);
+               populate_stack();
+       }
 
        if (!prev->mm) {
                prev->active_mm = NULL;
@@ -3152,6 +3154,8 @@ int can_nice(const struct task_struct *p, const int nice)
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = nice_to_rlimit(nice);
 
+       gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
+
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
 }
@@ -3178,7 +3182,8 @@ SYSCALL_DEFINE1(nice, int, increment)
        nice = task_nice(current) + increment;
 
        nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-       if (increment < 0 && !can_nice(current, nice))
+       if (increment < 0 && (!can_nice(current, nice) ||
+                             gr_handle_chroot_nice()))
                return -EPERM;
 
        retval = security_task_setnice(current, nice);
@@ -3473,6 +3478,7 @@ recheck:
                        if (policy != p->policy && !rlim_rtprio)
                                return -EPERM;
 
+                       gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
                        /* can't increase priority */
                        if (attr->sched_priority > p->rt_priority &&
                            attr->sched_priority > rlim_rtprio)
@@ -4973,6 +4979,7 @@ void idle_task_exit(void)
 
        if (mm != &init_mm) {
                switch_mm(mm, &init_mm, current);
+               populate_stack();
                finish_arch_post_lock_switch();
        }
        mmdrop(mm);
@@ -5068,7 +5075,7 @@ static void migrate_tasks(unsigned int dead_cpu)
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
 
-static struct ctl_table sd_ctl_dir[] = {
+static ctl_table_no_const sd_ctl_dir[] __read_only = {
        {
                .procname       = "sched_domain",
                .mode           = 0555,
@@ -5085,17 +5092,17 @@ static struct ctl_table sd_ctl_root[] = {
        {}
 };
 
-static struct ctl_table *sd_alloc_ctl_entry(int n)
+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
 {
-       struct ctl_table *entry =
+       ctl_table_no_const *entry =
                kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 
        return entry;
 }
 
-static void sd_free_ctl_entry(struct ctl_table **tablep)
+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
 {
-       struct ctl_table *entry;
+       ctl_table_no_const *entry;
 
        /*
         * In the intermediate directories, both the child directory and
@@ -5103,22 +5110,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
         * will always be set. In the lowest directory the names are
         * static strings and all have proc handlers.
         */
-       for (entry = *tablep; entry->mode; entry++) {
-               if (entry->child)
-                       sd_free_ctl_entry(&entry->child);
+       for (entry = tablep; entry->mode; entry++) {
+               if (entry->child) {
+                       sd_free_ctl_entry(entry->child);
+                       pax_open_kernel();
+                       entry->child = NULL;
+                       pax_close_kernel();
+               }
                if (entry->proc_handler == NULL)
                        kfree(entry->procname);
        }
 
-       kfree(*tablep);
-       *tablep = NULL;
+       kfree(tablep);
 }
 
 static int min_load_idx = 0;
 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
 
 static void
-set_table_entry(struct ctl_table *entry,
+set_table_entry(ctl_table_no_const *entry,
                const char *procname, void *data, int maxlen,
                umode_t mode, proc_handler *proc_handler,
                bool load_idx)
@@ -5138,7 +5148,7 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-       struct ctl_table *table = sd_alloc_ctl_entry(14);
+       ctl_table_no_const *table = sd_alloc_ctl_entry(14);
 
        if (table == NULL)
                return NULL;
@@ -5176,9 +5186,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
        return table;
 }
 
-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
 {
-       struct ctl_table *entry, *table;
+       ctl_table_no_const *entry, *table;
        struct sched_domain *sd;
        int domain_num = 0, i;
        char buf[32];
@@ -5205,11 +5215,13 @@ static struct ctl_table_header *sd_sysctl_header;
 static void register_sched_domain_sysctl(void)
 {
        int i, cpu_num = num_possible_cpus();
-       struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+       ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
        WARN_ON(sd_ctl_dir[0].child);
+       pax_open_kernel();
        sd_ctl_dir[0].child = entry;
+       pax_close_kernel();
 
        if (entry == NULL)
                return;
@@ -5232,8 +5244,12 @@ static void unregister_sched_domain_sysctl(void)
        if (sd_sysctl_header)
                unregister_sysctl_table(sd_sysctl_header);
        sd_sysctl_header = NULL;
-       if (sd_ctl_dir[0].child)
-               sd_free_ctl_entry(&sd_ctl_dir[0].child);
+       if (sd_ctl_dir[0].child) {
+               sd_free_ctl_entry(sd_ctl_dir[0].child);
+               pax_open_kernel();
+               sd_ctl_dir[0].child = NULL;
+               pax_close_kernel();
+       }
 }
 #else
 static void register_sched_domain_sysctl(void)
index fe331fc391f53382f4999c0cd0f13367a827b3cc..29d620eadedae63bab061946732d7c9a88fa405f 100644 (file)
@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
 static void reset_ptenuma_scan(struct task_struct *p)
 {
-       ACCESS_ONCE(p->mm->numa_scan_seq)++;
+       ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
        p->mm->numa_scan_offset = 0;
 }
 
@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
  * run_rebalance_domains is triggered when needed from the scheduler tick.
  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  */
-static void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(void)
 {
        struct rq *this_rq = this_rq();
        enum cpu_idle_type idle = this_rq->idle_balance ?
index 9a2a45c970e7dcbc0c146c027acc1bab713ff4ee..bb91aceb50e36727ac60e9a4298e24def37e735d 100644 (file)
@@ -1182,7 +1182,7 @@ struct sched_class {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        void (*task_move_group) (struct task_struct *p, int on_rq);
 #endif
-};
+} __do_const;
 
 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
 {
index 4ef9687ac115f21aae6ce4321e2639866892389d..4f44028943e663391fe35827c8c5acb4966b5101 100644 (file)
@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
 
        switch (action) {
        case SECCOMP_RET_ERRNO:
-               /* Set the low-order 16-bits as a errno. */
+               /* Set low-order bits as an errno, capped at MAX_ERRNO. */
+               if (data > MAX_ERRNO)
+                       data = MAX_ERRNO;
                syscall_set_return_value(current, task_pt_regs(current),
                                         -data, 0);
                goto skip;
index 16a305295256dd2b4b50eb0e0e5e07c34a803e0e..25ad033f08d179ffbfeecfb9083546e164f747ff 100644 (file)
@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
 
 int print_fatal_signals __read_mostly;
 
-static void __user *sig_handler(struct task_struct *t, int sig)
+static __sighandler_t sig_handler(struct task_struct *t, int sig)
 {
        return t->sighand->action[sig - 1].sa.sa_handler;
 }
 
-static int sig_handler_ignored(void __user *handler, int sig)
+static int sig_handler_ignored(__sighandler_t handler, int sig)
 {
        /* Is it explicitly or implicitly ignored? */
        return handler == SIG_IGN ||
@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
 
 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
 {
-       void __user *handler;
+       __sighandler_t handler;
 
        handler = sig_handler(t, sig);
 
@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
        atomic_inc(&user->sigpending);
        rcu_read_unlock();
 
+       if (!override_rlimit)
+               gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
+
        if (override_rlimit ||
            atomic_read(&user->sigpending) <=
                        task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
 
 int unhandled_signal(struct task_struct *tsk, int sig)
 {
-       void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
+       __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
        if (is_global_init(tsk))
                return 1;
        if (handler != SIG_IGN && handler != SIG_DFL)
@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
                }
        }
 
+       /* allow glibc communication via tgkill to other threads in our
+          thread group */
+       if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
+            sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
+           && gr_handle_signal(t, sig))
+               return -EPERM;
+
        return security_task_kill(t, info, sig, 0);
 }
 
@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        return send_signal(sig, info, p, 1);
 }
 
-static int
+int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
        return send_signal(sig, info, t, 0);
@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        unsigned long int flags;
        int ret, blocked, ignored;
        struct k_sigaction *action;
+       int is_unhandled = 0;
 
        spin_lock_irqsave(&t->sighand->siglock, flags);
        action = &t->sighand->action[sig-1];
@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        }
        if (action->sa.sa_handler == SIG_DFL)
                t->signal->flags &= ~SIGNAL_UNKILLABLE;
+       if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
+               is_unhandled = 1;
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
+       /* only deal with unhandled signals, java etc trigger SIGSEGV during
+          normal operation */
+       if (is_unhandled) {
+               gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
+               gr_handle_crash(t, sig);
+       }
+
        return ret;
 }
 
@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        ret = check_kill_permission(sig, info, p);
        rcu_read_unlock();
 
-       if (!ret && sig)
+       if (!ret && sig) {
                ret = do_send_sig_info(sig, info, p, true);
+               if (!ret)
+                       gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
+       }
 
        return ret;
 }
@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
        int error = -ESRCH;
 
        rcu_read_lock();
-       p = find_task_by_vpid(pid);
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+       /* allow glibc communication via tgkill to other threads in our
+          thread group */
+       if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
+           sig == (SIGRTMIN+1) && tgid == info->si_pid)            
+               p = find_task_by_vpid_unrestricted(pid);
+       else
+#endif
+               p = find_task_by_vpid(pid);
        if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
                error = check_kill_permission(sig, info, p);
                /*
@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
        }
        seg = get_fs();
        set_fs(KERNEL_DS);
-       ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
-                            (stack_t __force __user *) &uoss,
+       ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
+                            (stack_t __force_user *) &uoss,
                             compat_user_stack_pointer());
        set_fs(seg);
        if (ret >= 0 && uoss_ptr)  {
index 40190f28db3590140cb903d3f596883c61faaa74..8861d40745f5cf0ada4db9fa735c03f67efaf018 100644 (file)
@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
                }
                smpboot_unpark_thread(plug_thread, cpu);
        }
-       list_add(&plug_thread->list, &hotplug_threads);
+       pax_list_add(&plug_thread->list, &hotplug_threads);
 out:
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
 {
        get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
-       list_del(&plug_thread->list);
+       pax_list_del(&plug_thread->list);
        smpboot_destroy_threads(plug_thread);
        mutex_unlock(&smpboot_threads_lock);
        put_online_cpus();
index c497fcdf0d1e672230a7bd941a4b6de9c22b6204..e8f90a9ca69c34d88175ebfad5f28bf72c8f1936 100644 (file)
@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 EXPORT_SYMBOL(irq_stat);
 #endif
 
-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
@@ -266,7 +266,7 @@ restart:
                kstat_incr_softirqs_this_cpu(vec_nr);
 
                trace_softirq_entry(vec_nr);
-               h->action(h);
+               h->action();
                trace_softirq_exit(vec_nr);
                if (unlikely(prev_count != preempt_count())) {
                        pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
        or_softirq_pending(1UL << nr);
 }
 
-void open_softirq(int nr, void (*action)(struct softirq_action *))
+void __init open_softirq(int nr, void (*action)(void))
 {
        softirq_vec[nr].action = action;
 }
@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
 }
 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
 
-static void tasklet_action(struct softirq_action *a)
+static void tasklet_action(void)
 {
        struct tasklet_struct *list;
 
@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
        }
 }
 
-static void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(void)
 {
        struct tasklet_struct *list;
 
@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
        .notifier_call = cpu_callback
 };
 
-static struct smp_hotplug_thread softirq_threads = {
+static struct smp_hotplug_thread softirq_threads __read_only = {
        .store                  = &ksoftirqd,
        .thread_should_run      = ksoftirqd_should_run,
        .thread_fn              = run_ksoftirqd,
index ea9c881098941ecd9bbb42fa69a3ddb958d2ec41..2194af5c0bf505ff1a3def5cd73863965d117ca0 100644 (file)
@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
                error = -EACCES;
                goto out;
        }
+
+       if (gr_handle_chroot_setpriority(p, niceval)) {
+               error = -EACCES;
+               goto out;
+       }
+
        no_nice = security_task_setnice(p, niceval);
        if (no_nice) {
                error = no_nice;
@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
                        goto error;
        }
 
+       if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
+               goto error;
+
+       if (!gid_eq(new->gid, old->gid)) {
+               /* make sure we generate a learn log for what will
+                  end up being a role transition after a full-learning
+                  policy is generated
+                  CAP_SETGID is required to perform a transition
+                  we may not log a CAP_SETGID check above, e.g.
+                  in the case where new rgid = old egid
+               */
+               gr_learn_cap(current, new, CAP_SETGID);
+       }
+
        if (rgid != (gid_t) -1 ||
            (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
                new->sgid = new->egid;
@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
        old = current_cred();
 
        retval = -EPERM;
+
+       if (gr_check_group_change(kgid, kgid, kgid))
+               goto error;
+
        if (ns_capable(old->user_ns, CAP_SETGID))
                new->gid = new->egid = new->sgid = new->fsgid = kgid;
        else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
@@ -411,7 +435,7 @@ error:
 /*
  * change the user struct in a credentials set to match the new UID
  */
-static int set_user(struct cred *new)
+int set_user(struct cred *new)
 {
        struct user_struct *new_user;
 
@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
                        goto error;
        }
 
+       if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
+               goto error;
+
        if (!uid_eq(new->uid, old->uid)) {
+               /* make sure we generate a learn log for what will
+                  end up being a role transition after a full-learning
+                  policy is generated
+                  CAP_SETUID is required to perform a transition
+                  we may not log a CAP_SETUID check above, e.g.
+                  in the case where new ruid = old euid
+               */
+               gr_learn_cap(current, new, CAP_SETUID);
                retval = set_user(new);
                if (retval < 0)
                        goto error;
@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
        old = current_cred();
 
        retval = -EPERM;
+
+       if (gr_check_crash_uid(kuid))
+               goto error;
+       if (gr_check_user_change(kuid, kuid, kuid))
+               goto error;
+
        if (ns_capable(old->user_ns, CAP_SETUID)) {
                new->suid = new->uid = kuid;
                if (!uid_eq(kuid, old->uid)) {
@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
                        goto error;
        }
 
+       if (gr_check_user_change(kruid, keuid, INVALID_UID))
+               goto error;
+
        if (ruid != (uid_t) -1) {
                new->uid = kruid;
                if (!uid_eq(kruid, old->uid)) {
@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
                        goto error;
        }
 
+       if (gr_check_group_change(krgid, kegid, INVALID_GID))
+               goto error;
+
        if (rgid != (gid_t) -1)
                new->gid = krgid;
        if (egid != (gid_t) -1)
@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
            uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
            ns_capable(old->user_ns, CAP_SETUID)) {
                if (!uid_eq(kuid, old->fsuid)) {
+                       if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
+                               goto error;
+
                        new->fsuid = kuid;
                        if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
                                goto change_okay;
                }
        }
 
+error:
        abort_creds(new);
        return old_fsuid;
 
@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
        if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
            gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
            ns_capable(old->user_ns, CAP_SETGID)) {
+               if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
+                       goto error;
+
                if (!gid_eq(kgid, old->fsgid)) {
                        new->fsgid = kgid;
                        goto change_okay;
                }
        }
 
+error:
        abort_creds(new);
        return old_fsgid;
 
@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
                return -EFAULT;
 
        down_read(&uts_sem);
-       error = __copy_to_user(&name->sysname, &utsname()->sysname,
+       error = __copy_to_user(name->sysname, &utsname()->sysname,
                               __OLD_UTS_LEN);
        error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
-       error |= __copy_to_user(&name->nodename, &utsname()->nodename,
+       error |= __copy_to_user(name->nodename, &utsname()->nodename,
                                __OLD_UTS_LEN);
        error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
-       error |= __copy_to_user(&name->release, &utsname()->release,
+       error |= __copy_to_user(name->release, &utsname()->release,
                                __OLD_UTS_LEN);
        error |= __put_user(0, name->release + __OLD_UTS_LEN);
-       error |= __copy_to_user(&name->version, &utsname()->version,
+       error |= __copy_to_user(name->version, &utsname()->version,
                                __OLD_UTS_LEN);
        error |= __put_user(0, name->version + __OLD_UTS_LEN);
-       error |= __copy_to_user(&name->machine, &utsname()->machine,
+       error |= __copy_to_user(name->machine, &utsname()->machine,
                                __OLD_UTS_LEN);
        error |= __put_user(0, name->machine + __OLD_UTS_LEN);
        up_read(&uts_sem);
@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
                         */
                        new_rlim->rlim_cur = 1;
                }
+               /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
+                  is changed to a lower value.  Since tasks can be created by the same
+                  user in between this limit change and an execve by this task, force
+                  a recheck only for this task by setting PF_NPROC_EXCEEDED
+               */
+               if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
+                       tsk->flags |= PF_NPROC_EXCEEDED;
        }
        if (!retval) {
                if (old_rlim)
index 88ea2d6e00314059b96adb0505ffc5f9c98fcf73..88acc7708522669df7a0ee1dde7626434284c5b3 100644 (file)
@@ -94,7 +94,6 @@
 
 
 #if defined(CONFIG_SYSCTL)
-
 /* External variables not in a header file. */
 extern int max_threads;
 extern int suid_dumpable;
@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
 
 /* Constants used for minimum and  maximum */
 #ifdef CONFIG_LOCKUP_DETECTOR
-static int sixty = 60;
+static int sixty __read_only = 60;
 #endif
 
-static int __maybe_unused neg_one = -1;
+static int __maybe_unused neg_one __read_only = -1;
 
-static int zero;
-static int __maybe_unused one = 1;
-static int __maybe_unused two = 2;
-static int __maybe_unused four = 4;
-static unsigned long one_ul = 1;
-static int one_hundred = 100;
+static int zero __read_only = 0;
+static int __maybe_unused one __read_only = 1;
+static int __maybe_unused two __read_only = 2;
+static int __maybe_unused three __read_only = 3;
+static int __maybe_unused four __read_only = 4;
+static unsigned long one_ul __read_only = 1;
+static int one_hundred __read_only = 100;
 #ifdef CONFIG_PRINTK
-static int ten_thousand = 10000;
+static int ten_thousand __read_only = 10000;
 #endif
 
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
                               void __user *buffer, size_t *lenp, loff_t *ppos);
 #endif
 
-#ifdef CONFIG_PRINTK
 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
 
 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
 
 #endif
 
+extern struct ctl_table grsecurity_table[];
+
 static struct ctl_table kern_table[];
 static struct ctl_table vm_table[];
 static struct ctl_table fs_table[];
@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
 int sysctl_legacy_va_layout;
 #endif
 
+#ifdef CONFIG_PAX_SOFTMODE
+static struct ctl_table pax_table[] = {
+       {
+               .procname       = "softmode",
+               .data           = &pax_softmode,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0600,
+               .proc_handler   = &proc_dointvec,
+       },
+
+       { }
+};
+#endif
+
 /* The default sysctl tables: */
 
 static struct ctl_table sysctl_base_table[] = {
@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
 #endif
 
 static struct ctl_table kern_table[] = {
+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
+       {
+               .procname       = "grsecurity",
+               .mode           = 0500,
+               .child          = grsecurity_table,
+       },
+#endif
+
+#ifdef CONFIG_PAX_SOFTMODE
+       {
+               .procname       = "pax",
+               .mode           = 0500,
+               .child          = pax_table,
+       },
+#endif
+
        {
                .procname       = "sched_child_runs_first",
                .data           = &sysctl_sched_child_runs_first,
@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
                .data           = &modprobe_path,
                .maxlen         = KMOD_PATH_LEN,
                .mode           = 0644,
-               .proc_handler   = proc_dostring,
+               .proc_handler   = proc_dostring_modpriv,
        },
        {
                .procname       = "modules_disabled",
@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#endif
        {
                .procname       = "kptr_restrict",
                .data           = &kptr_restrict,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax_sysadmin,
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+               .extra1         = &two,
+#else
                .extra1         = &zero,
+#endif
                .extra2         = &two,
        },
-#endif
        {
                .procname       = "ngroups_max",
                .data           = &ngroups_max,
@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
         */
        {
                .procname       = "perf_event_paranoid",
-               .data           = &sysctl_perf_event_paranoid,
-               .maxlen         = sizeof(sysctl_perf_event_paranoid),
+               .data           = &sysctl_perf_event_legitimately_concerned,
+               .maxlen         = sizeof(sysctl_perf_event_legitimately_concerned),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               /* go ahead, be a hero */
+               .proc_handler   = proc_dointvec_minmax_sysadmin,
+               .extra1         = &neg_one,
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+               .extra2         = &three,
+#else
+               .extra2         = &two,
+#endif
        },
        {
                .procname       = "perf_event_mlock_kb",
@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &zero,
        },
+       {
+               .procname       = "heap_stack_gap",
+               .data           = &sysctl_heap_stack_gap,
+               .maxlen         = sizeof(sysctl_heap_stack_gap),
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax,
+       },
 #else
        {
                .procname       = "nr_trim_pages",
@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
                               (char __user *)buffer, lenp, ppos);
 }
 
+int proc_dostring_modpriv(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       if (write && !capable(CAP_SYS_MODULE))
+               return -EPERM;
+
+       return _proc_do_string(table->data, table->maxlen, write,
+                              buffer, lenp, ppos);
+}
+
 static size_t proc_skip_spaces(char **buf)
 {
        size_t ret;
@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
        len = strlen(tmp);
        if (len > *size)
                len = *size;
+       if (len > sizeof(tmp))
+               len = sizeof(tmp);
        if (copy_to_user(*buf, tmp, len))
                return -EFAULT;
        *size -= len;
@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
 static int proc_taint(struct ctl_table *table, int write,
                               void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table t;
+       ctl_table_no_const t;
        unsigned long tmptaint = get_taint();
        int err;
 
@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
        return err;
 }
 
-#ifdef CONFIG_PRINTK
 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 
        return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
-#endif
 
 struct do_proc_dointvec_minmax_conv_param {
        int *min;
@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
        return -ENOSYS;
 }
 
+int proc_dostring_modpriv(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return -ENOSYS;
+}
+
 int proc_dointvec(struct ctl_table *table, int write,
                  void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
 EXPORT_SYMBOL(proc_dostring);
+EXPORT_SYMBOL(proc_dostring_modpriv);
 EXPORT_SYMBOL(proc_doulongvec_minmax);
 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
index 670fff88a9613e07ac52d70a5365875d93e1e29c..a247812095da1b8fe0ae1c53f78f0601571b5a97 100644 (file)
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/pid_namespace.h>
+#include <linux/grsecurity.h>
 #include <net/genetlink.h>
 #include <linux/atomic.h>
 
+extern int gr_is_taskstats_denied(int pid);
+
 /*
  * Maximum length of a cpumask that can be specified in
  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
@@ -576,6 +579,9 @@ err:
 
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
+       if (gr_is_taskstats_denied(current->pid))
+               return -EACCES;
+
        if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
                return cmd_attr_register_cpumask(info);
        else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
index a7077d3ae52fe2e9bcd43bf076d748eb5bf6ef2a..dd48a494444714a80619d3f61eff2fa2c4acf0e9 100644 (file)
@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
        struct platform_device *pdev;
        int error = 0;
        int i;
-       struct k_clock alarm_clock = {
+       static struct k_clock alarm_clock = {
                .clock_getres   = alarm_clock_getres,
                .clock_get      = alarm_clock_get,
                .timer_create   = alarm_timer_create,
index d8c724cda37b3db78983661a3f93e95195f6fd7c..6b331a49cd211df1565e37c7437cbf72e8a58bbf 100644 (file)
@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
        local_irq_restore(flags);
 }
 
-static void run_hrtimer_softirq(struct softirq_action *h)
+static __latent_entropy void run_hrtimer_softirq(void)
 {
        hrtimer_peek_ahead_timers();
 }
index a16b67859e2a79929331c5008f96fd7f29ebbfc1..8c5bd9d5722f0c42c9e48fb550e9b4ee3b936154 100644 (file)
@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
 
 static __init int init_posix_cpu_timers(void)
 {
-       struct k_clock process = {
+       static struct k_clock process = {
                .clock_getres   = process_cpu_clock_getres,
                .clock_get      = process_cpu_clock_get,
                .timer_create   = process_cpu_timer_create,
                .nsleep         = process_cpu_nsleep,
                .nsleep_restart = process_cpu_nsleep_restart,
        };
-       struct k_clock thread = {
+       static struct k_clock thread = {
                .clock_getres   = thread_cpu_clock_getres,
                .clock_get      = thread_cpu_clock_get,
                .timer_create   = thread_cpu_timer_create,
index 31ea01f42e1f088786a291199cc54e9bde4658c9..7fc61ef4dce7fb5269e4ab021dc6a90f95d6833a 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/hash.h>
 #include <linux/posix-clock.h>
 #include <linux/posix-timers.h>
+#include <linux/grsecurity.h>
 #include <linux/syscalls.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
  *         which we beg off on and pass to do_sys_settimeofday().
  */
 
-static struct k_clock posix_clocks[MAX_CLOCKS];
+static struct k_clock *posix_clocks[MAX_CLOCKS];
 
 /*
  * These ones are defined below.
@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
  */
 static __init int init_posix_timers(void)
 {
-       struct k_clock clock_realtime = {
+       static struct k_clock clock_realtime = {
                .clock_getres   = hrtimer_get_res,
                .clock_get      = posix_clock_realtime_get,
                .clock_set      = posix_clock_realtime_set,
@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
                .timer_get      = common_timer_get,
                .timer_del      = common_timer_del,
        };
-       struct k_clock clock_monotonic = {
+       static struct k_clock clock_monotonic = {
                .clock_getres   = hrtimer_get_res,
                .clock_get      = posix_ktime_get_ts,
                .nsleep         = common_nsleep,
@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
                .timer_get      = common_timer_get,
                .timer_del      = common_timer_del,
        };
-       struct k_clock clock_monotonic_raw = {
+       static struct k_clock clock_monotonic_raw = {
                .clock_getres   = hrtimer_get_res,
                .clock_get      = posix_get_monotonic_raw,
        };
-       struct k_clock clock_realtime_coarse = {
+       static struct k_clock clock_realtime_coarse = {
                .clock_getres   = posix_get_coarse_res,
                .clock_get      = posix_get_realtime_coarse,
        };
-       struct k_clock clock_monotonic_coarse = {
+       static struct k_clock clock_monotonic_coarse = {
                .clock_getres   = posix_get_coarse_res,
                .clock_get      = posix_get_monotonic_coarse,
        };
-       struct k_clock clock_tai = {
+       static struct k_clock clock_tai = {
                .clock_getres   = hrtimer_get_res,
                .clock_get      = posix_get_tai,
                .nsleep         = common_nsleep,
@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
                .timer_get      = common_timer_get,
                .timer_del      = common_timer_del,
        };
-       struct k_clock clock_boottime = {
+       static struct k_clock clock_boottime = {
                .clock_getres   = hrtimer_get_res,
                .clock_get      = posix_get_boottime,
                .nsleep         = common_nsleep,
@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
                return;
        }
 
-       posix_clocks[clock_id] = *new_clock;
+       posix_clocks[clock_id] = new_clock;
 }
 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
 
@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
                return (id & CLOCKFD_MASK) == CLOCKFD ?
                        &clock_posix_dynamic : &clock_posix_cpu;
 
-       if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+       if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
                return NULL;
-       return &posix_clocks[id];
+       return posix_clocks[id];
 }
 
 static int common_timer_create(struct k_itimer *new_timer)
@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
        struct k_clock *kc = clockid_to_kclock(which_clock);
        struct k_itimer *new_timer;
        int error, new_timer_id;
-       sigevent_t event;
+       sigevent_t event = { };
        int it_id_set = IT_ID_NOT_SET;
 
        if (!kc)
@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
        if (copy_from_user(&new_tp, tp, sizeof (*tp)))
                return -EFAULT;
 
+       /* only the CLOCK_REALTIME clock can be set, all other clocks
+          have their clock_set fptr set to a nosettime dummy function
+          CLOCK_REALTIME has a NULL clock_set fptr which causes it to
+          call common_clock_set, which calls do_sys_settimeofday, which
+          we hook
+       */
+
        return kc->clock_set(which_clock, &new_tp);
 }
 
index 2c85b7724af4b0081a112e1b12cbcce4ef831117..6530536fad3410a05a3ad14024884522f2654de2 100644 (file)
@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
                return error;
 
        if (tz) {
+               /* we log in do_settimeofday called below, so don't log twice
+               */
+               if (!tv)
+                       gr_log_timechange();
+
                sys_tz = *tz;
                update_vsyscall_tz();
                if (firsttime) {
index 6a931852082f83a0c9c139a0b34d98e3d1483119..288c331d9506340b1a0b4695ff2c798343c789f7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/grsecurity.h>
 #include <linux/syscore_ops.h>
 #include <linux/clocksource.h>
 #include <linux/jiffies.h>
@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
        if (!timespec64_valid_strict(ts))
                return -EINVAL;
 
+       gr_log_timechange();
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
 
index 2d3f5c5049394615912b09ad16a4e639cb6cba9b..7ed7dc5a936047e02c62ea80e1f8b0ea67243444 100644 (file)
@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */
-static void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(void)
 {
        struct tvec_base *base = __this_cpu_read(tvec_bases);
 
@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
  *
  * In all cases the return value is guaranteed to be non-negative.
  */
-signed long __sched schedule_timeout(signed long timeout)
+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
 {
        struct timer_list timer;
        unsigned long expire;
index 61ed862cdd376222dedfa317301f3d6c3dbd3404..3b52c65062fee1da2b485350211b2735a00698a4 100644 (file)
@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 static void print_name_offset(struct seq_file *m, void *sym)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       SEQ_printf(m, "<%p>", NULL);
+#else
        char symname[KSYM_NAME_LEN];
 
        if (lookup_symbol_name((unsigned long)sym, symname) < 0)
                SEQ_printf(m, "<%pK>", sym);
        else
                SEQ_printf(m, "%s", symname);
+#endif
 }
 
 static void
@@ -119,7 +123,11 @@ next_one:
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       SEQ_printf(m, "  .base:       %p\n", NULL);
+#else
        SEQ_printf(m, "  .base:       %pK\n", base);
+#endif
        SEQ_printf(m, "  .index:      %d\n",
                        base->index);
        SEQ_printf(m, "  .resolution: %Lu nsecs\n",
@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
 {
        struct proc_dir_entry *pe;
 
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+       pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
+#else
        pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+#endif
        if (!pe)
                return -ENOMEM;
        return 0;
index 1fb08f21302ece707ac7ea3b5210863cae8b19c2..ca4bb1ec9534ad660f39aa8606db5462cc80eed6 100644 (file)
@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
 static unsigned long nr_entries;
 static struct entry entries[MAX_ENTRIES];
 
-static atomic_t overflow_count;
+static atomic_unchecked_t overflow_count;
 
 /*
  * The entries are in a hash-table, for fast lookup:
@@ -140,7 +140,7 @@ static void reset_entries(void)
        nr_entries = 0;
        memset(entries, 0, sizeof(entries));
        memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
-       atomic_set(&overflow_count, 0);
+       atomic_set_unchecked(&overflow_count, 0);
 }
 
 static struct entry *alloc_entry(void)
@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
        if (likely(entry))
                entry->count++;
        else
-               atomic_inc(&overflow_count);
+               atomic_inc_unchecked(&overflow_count);
 
  out_unlock:
        raw_spin_unlock_irqrestore(lock, flags);
@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
 
 static void print_name_offset(struct seq_file *m, unsigned long addr)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       seq_printf(m, "<%p>", NULL);
+#else
        char symname[KSYM_NAME_LEN];
 
        if (lookup_symbol_name(addr, symname) < 0)
-               seq_printf(m, "<%p>", (void *)addr);
+               seq_printf(m, "<%pK>", (void *)addr);
        else
                seq_printf(m, "%s", symname);
+#endif
 }
 
 static int tstats_show(struct seq_file *m, void *v)
@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
 
        seq_puts(m, "Timer Stats Version: v0.3\n");
        seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
-       if (atomic_read(&overflow_count))
-               seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
+       if (atomic_read_unchecked(&overflow_count))
+               seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
        seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
 
        for (i = 0; i < nr_entries; i++) {
@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
 {
        struct proc_dir_entry *pe;
 
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+       pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
+#else
        pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
+#endif
        if (!pe)
                return -ENOMEM;
        return 0;
index dd70993c266c38785510ab09f0315d1f1775d05b..0bf694b7755659106304ab516b53ed4534bec357 100644 (file)
@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
        mutex_lock(&fullstop_mutex);
        if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
                VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
-               ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
+               ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
        } else {
                pr_warn("Concurrent rmmod and shutdown illegal!\n");
        }
@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
                if (!torture_must_stop()) {
                        if (stutter > 1) {
                                schedule_timeout_interruptible(stutter - 1);
-                               ACCESS_ONCE(stutter_pause_test) = 2;
+                               ACCESS_ONCE_RW(stutter_pause_test) = 2;
                        }
                        schedule_timeout_interruptible(1);
-                       ACCESS_ONCE(stutter_pause_test) = 1;
+                       ACCESS_ONCE_RW(stutter_pause_test) = 1;
                }
                if (!torture_must_stop())
                        schedule_timeout_interruptible(stutter);
-               ACCESS_ONCE(stutter_pause_test) = 0;
+               ACCESS_ONCE_RW(stutter_pause_test) = 0;
                torture_shutdown_absorb("torture_stutter");
        } while (!torture_must_stop());
        torture_kthread_stopping("torture_stutter");
@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
                schedule_timeout_uninterruptible(10);
                return true;
        }
-       ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
+       ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
        mutex_unlock(&fullstop_mutex);
        torture_shutdown_cleanup();
        torture_shuffle_cleanup();
index 483cecfa5c174d6a74c54d5663b8b00578759d11..ac460913174859319ac068dc38bf6c047b3528b3 100644 (file)
@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
        struct blk_trace *bt = filp->private_data;
        char buf[16];
 
-       snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+       snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
 
        return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 }
@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
                return 1;
 
        bt = buf->chan->private_data;
-       atomic_inc(&bt->dropped);
+       atomic_inc_unchecked(&bt->dropped);
        return 0;
 }
 
@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
 
        bt->dir = dir;
        bt->dev = dev;
-       atomic_set(&bt->dropped, 0);
+       atomic_set_unchecked(&bt->dropped, 0);
        INIT_LIST_HEAD(&bt->running_list);
 
        ret = -EIO;
index 224e768bdc738da7c47aca41fcc6d9ecd4c190b4..8303c84b7c6a26c0a025da7c5c94aa3b051fbd00 100644 (file)
@@ -2372,12 +2372,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
        if (unlikely(ftrace_disabled))
                return 0;
 
+       ret = ftrace_arch_code_modify_prepare();
+       FTRACE_WARN_ON(ret);
+       if (ret)
+               return 0;
+
        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+       FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
        if (ret) {
                ftrace_bug(ret, rec);
-               return 0;
        }
-       return 1;
+       return ret ? 0 : 1;
 }
 
 /*
@@ -4754,8 +4759,10 @@ static int ftrace_process_locs(struct module *mod,
        if (!count)
                return 0;
 
+       pax_open_kernel();
        sort(start, count, sizeof(*start),
             ftrace_cmp_ips, ftrace_swap_ips);
+       pax_close_kernel();
 
        start_pg = ftrace_allocate_pages(count);
        if (!start_pg)
@@ -5633,7 +5640,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
 
                if (t->ret_stack == NULL) {
                        atomic_set(&t->tracing_graph_pause, 0);
-                       atomic_set(&t->trace_overrun, 0);
+                       atomic_set_unchecked(&t->trace_overrun, 0);
                        t->curr_ret_stack = -1;
                        /* Make sure the tasks see the -1 first: */
                        smp_wmb();
@@ -5856,7 +5863,7 @@ static void
 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
 {
        atomic_set(&t->tracing_graph_pause, 0);
-       atomic_set(&t->trace_overrun, 0);
+       atomic_set_unchecked(&t->trace_overrun, 0);
        t->ftrace_timestamp = 0;
        /* make curr_ret_stack visible before we add the ret_stack */
        smp_wmb();
index d2e151c83bd5ef58b3e09136dd26773b9d6a951c..b68c8352db32fe55d76b4255a12d326f16a66404 100644 (file)
@@ -350,9 +350,9 @@ struct buffer_data_page {
  */
 struct buffer_page {
        struct list_head list;          /* list of buffer pages */
-       local_t          write;         /* index for next write */
+       local_unchecked_t        write;         /* index for next write */
        unsigned         read;          /* index for next read */
-       local_t          entries;       /* entries on this page */
+       local_unchecked_t        entries;       /* entries on this page */
        unsigned long    real_end;      /* real end of data */
        struct buffer_data_page *page;  /* Actual data page */
 };
@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
        unsigned long                   last_overrun;
        local_t                         entries_bytes;
        local_t                         entries;
-       local_t                         overrun;
-       local_t                         commit_overrun;
+       local_unchecked_t               overrun;
+       local_unchecked_t               commit_overrun;
        local_t                         dropped_events;
        local_t                         committing;
        local_t                         commits;
@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
         *
         * We add a counter to the write field to denote this.
         */
-       old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
-       old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+       old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
+       old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
 
        /*
         * Just make sure we have seen our old_write and synchronize
@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
                 * cmpxchg to only update if an interrupt did not already
                 * do it for us. If the cmpxchg fails, we don't care.
                 */
-               (void)local_cmpxchg(&next_page->write, old_write, val);
-               (void)local_cmpxchg(&next_page->entries, old_entries, eval);
+               (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
+               (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
 
                /*
                 * No need to worry about races with clearing out the commit.
@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
 
 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
 {
-       return local_read(&bpage->entries) & RB_WRITE_MASK;
+       return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
 }
 
 static inline unsigned long rb_page_write(struct buffer_page *bpage)
 {
-       return local_read(&bpage->write) & RB_WRITE_MASK;
+       return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
 }
 
 static int
@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
                         * bytes consumed in ring buffer from here.
                         * Increment overrun to account for the lost events.
                         */
-                       local_add(page_entries, &cpu_buffer->overrun);
+                       local_add_unchecked(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
                }
 
@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
                 * it is our responsibility to update
                 * the counters.
                 */
-               local_add(entries, &cpu_buffer->overrun);
+               local_add_unchecked(entries, &cpu_buffer->overrun);
                local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
                /*
@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
                if (tail == BUF_PAGE_SIZE)
                        tail_page->real_end = 0;
 
-               local_sub(length, &tail_page->write);
+               local_sub_unchecked(length, &tail_page->write);
                return;
        }
 
@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
                rb_event_set_padding(event);
 
                /* Set the write back to the previous setting */
-               local_sub(length, &tail_page->write);
+               local_sub_unchecked(length, &tail_page->write);
                return;
        }
 
@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
 
        /* Set write to end of buffer */
        length = (tail + length) - BUF_PAGE_SIZE;
-       local_sub(length, &tail_page->write);
+       local_sub_unchecked(length, &tail_page->write);
 }
 
 /*
@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
         * about it.
         */
        if (unlikely(next_page == commit_page)) {
-               local_inc(&cpu_buffer->commit_overrun);
+               local_inc_unchecked(&cpu_buffer->commit_overrun);
                goto out_reset;
        }
 
@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
                                      cpu_buffer->tail_page) &&
                                     (cpu_buffer->commit_page ==
                                      cpu_buffer->reader_page))) {
-                               local_inc(&cpu_buffer->commit_overrun);
+                               local_inc_unchecked(&cpu_buffer->commit_overrun);
                                goto out_reset;
                        }
                }
@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                length += RB_LEN_TIME_EXTEND;
 
        tail_page = cpu_buffer->tail_page;
-       write = local_add_return(length, &tail_page->write);
+       write = local_add_return_unchecked(length, &tail_page->write);
 
        /* set write to only the index of the write */
        write &= RB_WRITE_MASK;
@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        kmemcheck_annotate_bitfield(event, bitfield);
        rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
 
-       local_inc(&tail_page->entries);
+       local_inc_unchecked(&tail_page->entries);
 
        /*
         * If this is the first commit on the page, then update
@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
 
        if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
                unsigned long write_mask =
-                       local_read(&bpage->write) & ~RB_WRITE_MASK;
+                       local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
                unsigned long event_length = rb_event_length(event);
                /*
                 * This is on the tail page. It is possible that
@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
                 */
                old_index += write_mask;
                new_index += write_mask;
-               index = local_cmpxchg(&bpage->write, old_index, new_index);
+               index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
                if (index == old_index) {
                        /* update counters */
                        local_sub(event_length, &cpu_buffer->entries_bytes);
@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
 
        /* Do the likely case first */
        if (likely(bpage->page == (void *)addr)) {
-               local_dec(&bpage->entries);
+               local_dec_unchecked(&bpage->entries);
                return;
        }
 
@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
        start = bpage;
        do {
                if (bpage->page == (void *)addr) {
-                       local_dec(&bpage->entries);
+                       local_dec_unchecked(&bpage->entries);
                        return;
                }
                rb_inc_page(cpu_buffer, &bpage);
@@ -3200,7 +3200,7 @@ static inline unsigned long
 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
 {
        return local_read(&cpu_buffer->entries) -
-               (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+               (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
 }
 
 /**
@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
                return 0;
 
        cpu_buffer = buffer->buffers[cpu];
-       ret = local_read(&cpu_buffer->overrun);
+       ret = local_read_unchecked(&cpu_buffer->overrun);
 
        return ret;
 }
@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
                return 0;
 
        cpu_buffer = buffer->buffers[cpu];
-       ret = local_read(&cpu_buffer->commit_overrun);
+       ret = local_read_unchecked(&cpu_buffer->commit_overrun);
 
        return ret;
 }
@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
        /* if you care about this being correct, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
-               overruns += local_read(&cpu_buffer->overrun);
+               overruns += local_read_unchecked(&cpu_buffer->overrun);
        }
 
        return overruns;
@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        /*
         * Reset the reader page to size zero.
         */
-       local_set(&cpu_buffer->reader_page->write, 0);
-       local_set(&cpu_buffer->reader_page->entries, 0);
+       local_set_unchecked(&cpu_buffer->reader_page->write, 0);
+       local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
        cpu_buffer->reader_page->real_end = 0;
 
@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
         * want to compare with the last_overrun.
         */
        smp_mb();
-       overwrite = local_read(&(cpu_buffer->overrun));
+       overwrite = local_read_unchecked(&(cpu_buffer->overrun));
 
        /*
         * Here's the tricky part.
@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
 
        cpu_buffer->head_page
                = list_entry(cpu_buffer->pages, struct buffer_page, list);
-       local_set(&cpu_buffer->head_page->write, 0);
-       local_set(&cpu_buffer->head_page->entries, 0);
+       local_set_unchecked(&cpu_buffer->head_page->write, 0);
+       local_set_unchecked(&cpu_buffer->head_page->entries, 0);
        local_set(&cpu_buffer->head_page->page->commit, 0);
 
        cpu_buffer->head_page->read = 0;
@@ -4186,14 +4186,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
        INIT_LIST_HEAD(&cpu_buffer->new_pages);
-       local_set(&cpu_buffer->reader_page->write, 0);
-       local_set(&cpu_buffer->reader_page->entries, 0);
+       local_set_unchecked(&cpu_buffer->reader_page->write, 0);
+       local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
        local_set(&cpu_buffer->reader_page->page->commit, 0);
        cpu_buffer->reader_page->read = 0;
 
        local_set(&cpu_buffer->entries_bytes, 0);
-       local_set(&cpu_buffer->overrun, 0);
-       local_set(&cpu_buffer->commit_overrun, 0);
+       local_set_unchecked(&cpu_buffer->overrun, 0);
+       local_set_unchecked(&cpu_buffer->commit_overrun, 0);
        local_set(&cpu_buffer->dropped_events, 0);
        local_set(&cpu_buffer->entries, 0);
        local_set(&cpu_buffer->committing, 0);
@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                rb_init_page(bpage);
                bpage = reader->page;
                reader->page = *data_page;
-               local_set(&reader->write, 0);
-               local_set(&reader->entries, 0);
+               local_set_unchecked(&reader->write, 0);
+               local_set_unchecked(&reader->entries, 0);
                reader->read = 0;
                *data_page = bpage;
 
index 361a827b4962e8b14c0ec2a40f51eeb44d61d38b..6a319a3468e8f9aa84dcb7ee8d1d8073f4f166cc 100644 (file)
@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
        return 0;
 }
 
-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
 {
        /* do nothing if flag is already set */
        if (!!(trace_flags & mask) == !!enabled)
index 8de48bac1ce2a54aeb3948bf758212b3f0f5148b..3e5b4fa0ce8f96b5bbba0b2d2c11b702dfe6c540 100644 (file)
@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
 void trace_printk_init_buffers(void);
 void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
 
 /*
  * Normal trace_printk() and friends allocates special buffers
index 57b67b1f24d1a141f88163c385e62be25cd275cf..66082a90daefe2e823c7a2e67c871f2c63d968b0 100644 (file)
@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
        return now;
 }
 
-static atomic64_t trace_counter;
+static atomic64_unchecked_t trace_counter;
 
 /*
  * trace_clock_counter(): simply an atomic counter.
@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
  */
 u64 notrace trace_clock_counter(void)
 {
-       return atomic64_add_return(1, &trace_counter);
+       return atomic64_inc_return_unchecked(&trace_counter);
 }
index b03a0ea77b993cf9f175ed7b44fc239832de7def..2df3168fd204a341627d3fa19560b727eb448ed2 100644 (file)
@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
        return 0;
 }
 
-struct ftrace_module_file_ops;
 static void __add_event_to_tracers(struct ftrace_event_call *call);
 
 /* Add an additional event_call dynamically */
index ba476009e5de49a89321f7b1d31f2e0d0d5bd63b..d0e47faded4cba67e48f404eeec17d661e952253 100644 (file)
@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 
        /* The return trace stack is full */
        if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(&current->trace_overrun);
+               atomic_inc_unchecked(&current->trace_overrun);
                return -EBUSY;
        }
 
@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
        *ret = current->ret_stack[index].ret;
        trace->func = current->ret_stack[index].func;
        trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->overrun = atomic_read_unchecked(&current->trace_overrun);
        trace->depth = index;
 }
 
index 7a9ba62e9fefadef3180f618e4ed86ef2c354910..2e0e4a1cffec365842dd170c0111b8f5ff71c592 100644 (file)
@@ -24,7 +24,7 @@ struct header_iter {
 static struct trace_array *mmio_trace_array;
 static bool overrun_detected;
 static unsigned long prev_overruns;
-static atomic_t dropped_count;
+static atomic_unchecked_t dropped_count;
 
 static void mmio_reset_data(struct trace_array *tr)
 {
@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
 
 static unsigned long count_overruns(struct trace_iterator *iter)
 {
-       unsigned long cnt = atomic_xchg(&dropped_count, 0);
+       unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
        unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
 
        if (over > prev_overruns)
@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
        event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
                                          sizeof(*entry), 0, pc);
        if (!event) {
-               atomic_inc(&dropped_count);
+               atomic_inc_unchecked(&dropped_count);
                return;
        }
        entry   = ring_buffer_event_data(event);
@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
        event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
                                          sizeof(*entry), 0, pc);
        if (!event) {
-               atomic_inc(&dropped_count);
+               atomic_inc_unchecked(&dropped_count);
                return;
        }
        entry   = ring_buffer_event_data(event);
index b77b9a697619f2c59f690bdf74757dc219fe3246..82f19bd8b6508e831db2f63a60a5136dd73d9b9f 100644 (file)
@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
                        goto out;
        }
 
+       pax_open_kernel();
        if (event->funcs->trace == NULL)
-               event->funcs->trace = trace_nop_print;
+               *(void **)&event->funcs->trace = trace_nop_print;
        if (event->funcs->raw == NULL)
-               event->funcs->raw = trace_nop_print;
+               *(void **)&event->funcs->raw = trace_nop_print;
        if (event->funcs->hex == NULL)
-               event->funcs->hex = trace_nop_print;
+               *(void **)&event->funcs->hex = trace_nop_print;
        if (event->funcs->binary == NULL)
-               event->funcs->binary = trace_nop_print;
+               *(void **)&event->funcs->binary = trace_nop_print;
+       pax_close_kernel();
 
        key = event->type & (EVENT_HASHSIZE - 1);
 
index f8b45d8792f96cd4f5b3cc8428d435da0c84f96a..70ff6c881a84946c757244d6069e868c7b2ef361 100644 (file)
@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
                return 0;
        }
 
-       seq_buf_path(&s->seq, path, "\n");
+       seq_buf_path(&s->seq, path, "\n\\");
 
        if (unlikely(seq_buf_has_overflowed(&s->seq))) {
                s->seq.len = save_len;
index 16eddb308c336aac2636ee43c4dcdfab7bb4a3f7..758b30815a5232fecd50244e5504bac9e9d3c0a7 100644 (file)
@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
                return;
 
        /* we do not handle interrupt stacks yet */
-       if (!object_is_on_stack(stack))
+       if (!object_starts_on_stack(stack))
                return;
 
        local_irq_save(flags);
index c6ee36fcbf9071a22e4b86d7ba4b3da3bd047a32..78513f3c5e5d444a00c49add44970c71103fe2da 100644 (file)
@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
+               return -EINVAL;
 
        mutex_lock(&syscall_trace_lock);
        if (!sys_perf_refcount_enter)
@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
+               return;
 
        mutex_lock(&syscall_trace_lock);
        sys_perf_refcount_enter--;
@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
+               return -EINVAL;
 
        mutex_lock(&syscall_trace_lock);
        if (!sys_perf_refcount_exit)
@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
        int num;
 
        num = ((struct syscall_metadata *)call->data)->syscall_nr;
+       if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
+               return;
 
        mutex_lock(&syscall_trace_lock);
        sys_perf_refcount_exit--;
index 4109f8320684a81af4cd9d0c7262f83812c300f2..fe1f830909de975332d57898ade3ac69f1b2baa8 100644 (file)
@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
            !kgid_has_mapping(parent_ns, group))
                return -EPERM;
 
+#ifdef CONFIG_GRKERNSEC
+       /*
+        * This doesn't really inspire confidence:
+        * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
+        * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
+        * Increases kernel attack surface in areas developers
+        * previously cared little about ("low importance due
+        * to requiring "root" capability")
+        * To be removed when this code receives *proper* review
+        */
+       if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
+                       !capable(CAP_SETGID))
+               return -EPERM;
+#endif
+
        ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
        if (!ns)
                return -ENOMEM;
@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
        if (atomic_read(&current->mm->mm_users) > 1)
                return -EINVAL;
 
-       if (current->fs->users != 1)
+       if (atomic_read(&current->fs->users) != 1)
                return -EINVAL;
 
        if (!ns_capable(user_ns, CAP_SYS_ADMIN))
index c8eac43267e90d13aee860f9a10eb89282e408f2..4b5f08f4769b6ee158f15d6b0c551af18f6cea4d 100644 (file)
@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
 static int proc_do_uts_string(struct ctl_table *table, int write,
                  void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table uts_table;
+       ctl_table_no_const uts_table;
        int r;
        memcpy(&uts_table, table, sizeof(uts_table));
        uts_table.data = get_uts(table, write);
index 70bf11815f844554e267e648ffa7196ee849caab..4be3c37f8243fa379d702efaa55402251d139117 100644 (file)
@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
-static struct smp_hotplug_thread watchdog_threads = {
+static struct smp_hotplug_thread watchdog_threads __read_only = {
        .store                  = &softlockup_watchdog,
        .thread_should_run      = watchdog_should_run,
        .thread_fn              = watchdog,
index beeeac9e0e3e6829faa3fe16304cb3e7adbe8850..65cbfb33a650469b1ca5e04a4deded1561a80e30 100644 (file)
@@ -4517,7 +4517,7 @@ static void rebind_workers(struct worker_pool *pool)
                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
                worker_flags |= WORKER_REBOUND;
                worker_flags &= ~WORKER_UNBOUND;
-               ACCESS_ONCE(worker->flags) = worker_flags;
+               ACCESS_ONCE_RW(worker->flags) = worker_flags;
        }
 
        spin_unlock_irq(&pool->lock);
index 5f2ce616c0462db9b9055528110268385b2b653e..85a0b1b785fc4aa54f60121b0412ddc4843a1603 100644 (file)
@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
 
 config DEBUG_WW_MUTEX_SLOWPATH
        bool "Wait/wound mutex debugging: Slowpath testing"
-       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
        select DEBUG_LOCK_ALLOC
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
 
 config DEBUG_LOCK_ALLOC
        bool "Lock debugging: detect incorrect freeing of live locks"
-       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
        select LOCKDEP
@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
 
 config PROVE_LOCKING
        bool "Lock debugging: prove locking correctness"
-       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
        select LOCKDEP
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
@@ -992,7 +992,7 @@ config LOCKDEP
 
 config LOCK_STAT
        bool "Lock usage statistics"
-       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+       depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
        select LOCKDEP
        select DEBUG_SPINLOCK
        select DEBUG_MUTEXES
@@ -1453,6 +1453,7 @@ config LATENCYTOP
        depends on DEBUG_KERNEL
        depends on STACKTRACE_SUPPORT
        depends on PROC_FS
+       depends on !GRKERNSEC_HIDESYM
        select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
        select KALLSYMS
        select KALLSYMS_ALL
@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 config DEBUG_STRICT_USER_COPY_CHECKS
        bool "Strict user copy size checks"
        depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+       depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
        help
          Enabling this option turns a certain set of sanity checks for user
          copy operations into compile time failures.
@@ -1597,7 +1598,7 @@ endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
        bool "Remote debugging over FireWire early on boot"
-       depends on PCI && X86
+       depends on PCI && X86 && !GRKERNSEC
        help
          If you want to debug problems which hang or crash the kernel early
          on boot and the crashing machine has a FireWire port, you can use
index 3c3b30b9e020d2e4bc02edc73575555641aad057..ca2910254d5bcb50aa0fd3bece53ae0fb714f262 100644 (file)
@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-y += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
index 114d1beae0c785a358bf709dd6f9a1e7ee46ce1d..ab0350c2b76b9038392423e7fdc1094f2bfd72c2 100644 (file)
@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
 {
        unsigned long internal = ACCESS_ONCE(avg->internal);
 
-       ACCESS_ONCE(avg->internal) = internal ?
+       ACCESS_ONCE_RW(avg->internal) = internal ?
                (((internal << avg->weight) - internal) +
                        (val << avg->factor)) >> avg->weight :
                (val << avg->factor);
index 324ea9eab8c1c6f2abbe6daf546370f0cdbafa5b..46b1ae22361fbbc147f336023027ee4387b16694 100644 (file)
@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
 }
 EXPORT_SYMBOL(__bitmap_subset);
 
-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
 {
        unsigned int k, lim = bits/BITS_PER_LONG;
        int w = 0;
@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
 {
        int c, old_c, totaldigits, ndigits, nchunks, nbits;
        u32 chunk;
-       const char __user __force *ubuf = (const char __user __force *)buf;
+       const char __user *ubuf = (const char __force_user *)buf;
 
        bitmap_zero(maskp, nmaskbits);
 
@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
 {
        if (!access_ok(VERIFY_READ, ubuf, ulen))
                return -EFAULT;
-       return __bitmap_parse((const char __force *)ubuf,
+       return __bitmap_parse((const char __force_kernel *)ubuf,
                                ulen, 1, maskp, nmaskbits);
 
 }
@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
 {
        unsigned a, b;
        int c, old_c, totaldigits;
-       const char __user __force *ubuf = (const char __user __force *)buf;
+       const char __user *ubuf = (const char __force_user *)buf;
        int exp_digit, in_range;
 
        totaldigits = c = 0;
@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
 {
        if (!access_ok(VERIFY_READ, ubuf, ulen))
                return -EFAULT;
-       return __bitmap_parselist((const char __force *)ubuf,
+       return __bitmap_parselist((const char __force_kernel *)ubuf,
                                        ulen, 1, maskp, nmaskbits);
 }
 EXPORT_SYMBOL(bitmap_parselist_user);
index 0c3bd9552b6fc4fa5e380ac013caf5ac618b1faf..5a615a1eddd8e1656a1d7ceb140063429286f665 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
                return BUG_TRAP_TYPE_NONE;
 
        bug = find_bug(bugaddr);
+       if (!bug)
+               return BUG_TRAP_TYPE_NONE;
 
        file = NULL;
        line = 0;
index 547f7f923dbcbd24f8bb99e13ef7c13db947b8dd..a6d4ba011c1a00c587b514e10c9c65156ebf6e81 100644 (file)
@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
        if (limit > 4)
                return;
 
-       is_on_stack = object_is_on_stack(addr);
+       is_on_stack = object_starts_on_stack(addr);
        if (is_on_stack == onstack)
                return;
 
index 4382ad77777ebcb17bc4b25fe606f4371b563a5d..08aa55850adce24fc513a351f4dc0656e2b969ae 100644 (file)
@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 EXPORT_SYMBOL(__div64_32);
 
 #ifndef div_s64_rem
-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 {
        u64 quotient;
 
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
  * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
  */
 #ifndef div64_u64
-u64 div64_u64(u64 dividend, u64 divisor)
+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
 {
        u32 high = divisor >> 32;
        u64 quot;
index 9722bd2dbc9bd5782e867c72f8b6893f9ab2c5ab..0d826f418ec56eb7c8def5f84acbd23cf5dbf2d1 100644 (file)
@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
 
 void dma_debug_add_bus(struct bus_type *bus)
 {
-       struct notifier_block *nb;
+       notifier_block_no_const *nb;
 
        if (dma_debug_disabled())
                return;
@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
 
 static void check_for_stack(struct device *dev, void *addr)
 {
-       if (object_is_on_stack(addr))
+       if (object_starts_on_stack(addr))
                err_printk(dev, NULL, "DMA-API: device driver maps memory from "
                                "stack [addr=%p]\n", addr);
 }
index 013a7619348125b1a9d4de74546d927acf2616c1..c28f3fcda6b87b0992b2c24583203bf21fbedbd6 100644 (file)
@@ -269,7 +269,7 @@ static void free(void *where)
                malloc_ptr = free_mem_ptr;
 }
 #else
-#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define malloc(a) kmalloc((a), GFP_KERNEL)
 #define free(a) kfree(a)
 #endif
 
index 0c9216c48762a0e941910f8707c0a276f62a91ba..863bd89cfff71eb67d10f862f21613b3710a4ba6 100644 (file)
@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
        unsigned long next;
 
        phys_addr -= addr;
-       pmd = pmd_alloc(&init_mm, pud, addr);
+       pmd = pmd_alloc_kernel(&init_mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
        do {
@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
        unsigned long next;
 
        phys_addr -= addr;
-       pud = pud_alloc(&init_mm, pgd, addr);
+       pud = pud_alloc_kernel(&init_mm, pgd, addr);
        if (!pud)
                return -ENOMEM;
        do {
index bd2bea963364c757e90db1e47ce557a94d053c0b..6b3c95e1ebc379e095baedf0674c15275676611a 100644 (file)
@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
        struct task_struct *p, *t;
        bool ret;
 
+       if (!mm)
+               return true;
+
        if (atomic_read(&task->signal->live) != 1)
                return false;
 
index 03d4ab349fa749cd907291cc8e36641522d86241..46f637409021f1ed1086b1cd58e650648bc25b1a 100644 (file)
@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
 
 
 static DEFINE_SPINLOCK(kobj_ns_type_lock);
-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
 
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
 {
        enum kobj_ns_type type = ops->type;
        int error;
index c24c2f7e296fa15e5431ef7dc4a31f8551e37c15..f0296f47a2defcf61735e57d896d3919f504b482 100644 (file)
@@ -11,7 +11,9 @@
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/rculist.h>
+#include <linux/mm.h>
 
+#ifdef CONFIG_DEBUG_LIST
 /*
  * Insert a new entry between two known consecutive entries.
  *
  * the prev/next entries already!
  */
 
+static bool __list_add_debug(struct list_head *new,
+                            struct list_head *prev,
+                            struct list_head *next)
+{
+       if (unlikely(next->prev != prev)) {
+               printk(KERN_ERR "list_add corruption. next->prev should be "
+                       "prev (%p), but was %p. (next=%p).\n",
+                       prev, next->prev, next);
+               BUG();
+               return false;
+       }
+       if (unlikely(prev->next != next)) {
+               printk(KERN_ERR "list_add corruption. prev->next should be "
+                       "next (%p), but was %p. (prev=%p).\n",
+                       next, prev->next, prev);
+               BUG();
+               return false;
+       }
+       if (unlikely(new == prev || new == next)) {
+               printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
+                       new, prev, next);
+               BUG();
+               return false;
+       }
+       return true;
+}
+
 void __list_add(struct list_head *new,
-                             struct list_head *prev,
-                             struct list_head *next)
+               struct list_head *prev,
+               struct list_head *next)
 {
-       WARN(next->prev != prev,
-               "list_add corruption. next->prev should be "
-               "prev (%p), but was %p. (next=%p).\n",
-               prev, next->prev, next);
-       WARN(prev->next != next,
-               "list_add corruption. prev->next should be "
-               "next (%p), but was %p. (prev=%p).\n",
-               next, prev->next, prev);
-       WARN(new == prev || new == next,
-            "list_add double add: new=%p, prev=%p, next=%p.\n",
-            new, prev, next);
+       if (!__list_add_debug(new, prev, next))
+               return;
+
        next->prev = new;
        new->next = next;
        new->prev = prev;
@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
 }
 EXPORT_SYMBOL(__list_add);
 
-void __list_del_entry(struct list_head *entry)
+static bool __list_del_entry_debug(struct list_head *entry)
 {
        struct list_head *prev, *next;
 
        prev = entry->prev;
        next = entry->next;
 
-       if (WARN(next == LIST_POISON1,
-               "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
-               entry, LIST_POISON1) ||
-           WARN(prev == LIST_POISON2,
-               "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
-               entry, LIST_POISON2) ||
-           WARN(prev->next != entry,
-               "list_del corruption. prev->next should be %p, "
-               "but was %p\n", entry, prev->next) ||
-           WARN(next->prev != entry,
-               "list_del corruption. next->prev should be %p, "
-               "but was %p\n", entry, next->prev))
+       if (unlikely(next == LIST_POISON1)) {
+               printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+                       entry, LIST_POISON1);
+               BUG();
+               return false;
+       }
+       if (unlikely(prev == LIST_POISON2)) {
+               printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+                       entry, LIST_POISON2);
+               BUG();
+               return false;
+       }
+       if (unlikely(entry->prev->next != entry)) {
+               printk(KERN_ERR "list_del corruption. prev->next should be %p, "
+                       "but was %p\n", entry, prev->next);
+               BUG();
+               return false;
+       }
+       if (unlikely(entry->next->prev != entry)) {
+               printk(KERN_ERR "list_del corruption. next->prev should be %p, "
+                       "but was %p\n", entry, next->prev);
+               BUG();
+               return false;
+       }
+       return true;
+}
+
+void __list_del_entry(struct list_head *entry)
+{
+       if (!__list_del_entry_debug(entry))
                return;
 
-       __list_del(prev, next);
+       __list_del(entry->prev, entry->next);
 }
 EXPORT_SYMBOL(__list_del_entry);
 
@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
 void __list_add_rcu(struct list_head *new,
                    struct list_head *prev, struct list_head *next)
 {
-       WARN(next->prev != prev,
-               "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
-               prev, next->prev, next);
-       WARN(prev->next != next,
-               "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
-               next, prev->next, prev);
+       if (!__list_add_debug(new, prev, next))
+               return;
+
        new->next = next;
        new->prev = prev;
        rcu_assign_pointer(list_next_rcu(prev), new);
        next->prev = new;
 }
 EXPORT_SYMBOL(__list_add_rcu);
+#endif
+
+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
+{
+#ifdef CONFIG_DEBUG_LIST
+       if (!__list_add_debug(new, prev, next))
+               return;
+#endif
+
+       pax_open_kernel();
+       next->prev = new;
+       new->next = next;
+       new->prev = prev;
+       prev->next = new;
+       pax_close_kernel();
+}
+EXPORT_SYMBOL(__pax_list_add);
+
+void pax_list_del(struct list_head *entry)
+{
+#ifdef CONFIG_DEBUG_LIST
+       if (!__list_del_entry_debug(entry))
+               return;
+#endif
+
+       pax_open_kernel();
+       __list_del(entry->prev, entry->next);
+       entry->next = LIST_POISON1;
+       entry->prev = LIST_POISON2;
+       pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del);
+
+void pax_list_del_init(struct list_head *entry)
+{
+       pax_open_kernel();
+       __list_del(entry->prev, entry->next);
+       INIT_LIST_HEAD(entry);
+       pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del_init);
+
+void __pax_list_add_rcu(struct list_head *new,
+                       struct list_head *prev, struct list_head *next)
+{
+#ifdef CONFIG_DEBUG_LIST
+       if (!__list_add_debug(new, prev, next))
+               return;
+#endif
+
+       pax_open_kernel();
+       new->next = next;
+       new->prev = prev;
+       rcu_assign_pointer(list_next_rcu(prev), new);
+       next->prev = new;
+       pax_close_kernel();
+}
+EXPORT_SYMBOL(__pax_list_add_rcu);
+
+void pax_list_del_rcu(struct list_head *entry)
+{
+#ifdef CONFIG_DEBUG_LIST
+       if (!__list_del_entry_debug(entry))
+               return;
+#endif
+
+       pax_open_kernel();
+       __list_del(entry->prev, entry->next);
+       entry->next = LIST_POISON1;
+       entry->prev = LIST_POISON2;
+       pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del_rcu);
index d2233de9a86e564f297e01305ddbc41392b3d2ee..fa1a2f69a0cbcc3ac89770d4a2584992a87649ea 100644 (file)
 void lockref_get(struct lockref *lockref)
 {
        CMPXCHG_LOOP(
-               new.count++;
+               __lockref_inc(&new);
        ,
                return;
        );
 
        spin_lock(&lockref->lock);
-       lockref->count++;
+       __lockref_inc(lockref);
        spin_unlock(&lockref->lock);
 }
 EXPORT_SYMBOL(lockref_get);
@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
        int retval;
 
        CMPXCHG_LOOP(
-               new.count++;
+               __lockref_inc(&new);
                if (!old.count)
                        return 0;
        ,
@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
        spin_lock(&lockref->lock);
        retval = 0;
        if (lockref->count) {
-               lockref->count++;
+               __lockref_inc(lockref);
                retval = 1;
        }
        spin_unlock(&lockref->lock);
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
 int lockref_get_or_lock(struct lockref *lockref)
 {
        CMPXCHG_LOOP(
-               new.count++;
+               __lockref_inc(&new);
                if (!old.count)
                        break;
        ,
@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
        spin_lock(&lockref->lock);
        if (!lockref->count)
                return 0;
-       lockref->count++;
+       __lockref_inc(lockref);
        spin_unlock(&lockref->lock);
        return 1;
 }
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
 int lockref_put_or_lock(struct lockref *lockref)
 {
        CMPXCHG_LOOP(
-               new.count--;
+               __lockref_dec(&new);
                if (old.count <= 1)
                        break;
        ,
@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
        spin_lock(&lockref->lock);
        if (lockref->count <= 1)
                return 0;
-       lockref->count--;
+       __lockref_dec(lockref);
        spin_unlock(&lockref->lock);
        return 1;
 }
@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
        int retval;
 
        CMPXCHG_LOOP(
-               new.count++;
+               __lockref_inc(&new);
                if ((int)old.count < 0)
                        return 0;
        ,
@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
        spin_lock(&lockref->lock);
        retval = 0;
        if ((int) lockref->count >= 0) {
-               lockref->count++;
+               __lockref_inc(lockref);
                retval = 1;
        }
        spin_unlock(&lockref->lock);
index 6111bcb28376bfe412830431ca32f3e4607cfc29..02e816b895ff89571f5d0bf1d540c286f172e234 100644 (file)
@@ -31,7 +31,7 @@
  * atomic_long_t can't hit 0 before we've added up all the percpu refs.
  */
 
-#define PERCPU_COUNT_BIAS      (1LU << (BITS_PER_LONG - 1))
+#define PERCPU_COUNT_BIAS      (1LU << (BITS_PER_LONG - 2))
 
 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
 
index 3291a8e37490ecc9d9221f389d26d02c0ae1f103..346a91ecdd22fc02f49f4f96ab1a701ceac29542 100644 (file)
@@ -67,7 +67,7 @@ struct radix_tree_preload {
        int nr;
        struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
 };
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
 
 static inline void *ptr_to_indirect(void *ptr)
 {
index 0bee183fa18faaf3314ac37244469f111408cd26..526f12f7b4a1e5ca57add665d5d0e89bf32c07a8 100644 (file)
@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
 }
 #endif
 
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
 
 /**
  *     prandom_u32_state - seeded pseudo-random number generator.
index c16c81a3d430e84a8084de93e97900715a8e6b8e..4dcbda19d7fcf59312236ad5c687bf84508375ee 100644 (file)
@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
 
 static const struct rb_augment_callbacks dummy_callbacks = {
-       dummy_propagate, dummy_copy, dummy_rotate
+       .propagate = dummy_propagate,
+       .copy = dummy_copy,
+       .rotate = dummy_rotate
 };
 
 void rb_insert_color(struct rb_node *node, struct rb_root *root)
index 7de89f4a36cfa54dd01980898a4646f7c41ec550..00d70b720620c3288ac250383ec1f7f4141cb17b 100644 (file)
@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
                quicklist_total_size());
 #endif
 #ifdef CONFIG_MEMORY_FAILURE
-       printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
+       printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
 #endif
 }
index bb2b201d6ad0397a77df53d1841f148af4465170..46abaf9a46227f28cebab5f66a23cf8b2bc4a3d0 100644 (file)
@@ -21,7 +21,7 @@
  */
 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
 {
-       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        long res = 0;
 
        /*
index a28df5206d95c24d6f3b4116753747f1fb2a67e3..3d55877fcfe82135f9e5f557e9be85468c9e2300 100644 (file)
@@ -26,7 +26,7 @@
  */
 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
 {
-       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        long align, res = 0;
        unsigned long c;
 
index 4abda074ea458947390b84c36f3eaad7095a2ceb..b9d3765b7e2c11cf151c0ea45ee910b185570445 100644 (file)
@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
 
 void
 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-                     dma_addr_t dev_addr)
+                     dma_addr_t dev_addr, struct dma_attrs *attrs)
 {
        phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
 
index 4f5b1ddbcd2566b8c0e654c4c776a7b3843c3713..7cab4184dadcf084932984476b805081528cfe7d 100644 (file)
@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
        WARN(1, "Buffer overflow detected!\n");
 }
 EXPORT_SYMBOL(copy_from_user_overflow);
+
+void copy_to_user_overflow(void)
+{
+       WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_to_user_overflow);
index ec337f64f52ddada251c9126561e5e38a38093e5..8484eb2d5c9870e9391a5529b5fc3e74d764bc2e 100644 (file)
@@ -16,6 +16,9 @@
  * - scnprintf and vscnprintf
  */
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <stdarg.h>
 #include <linux/module.h>      /* for KSYM_SYMBOL_LEN */
 #include <linux/types.h>
@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
 #ifdef CONFIG_KALLSYMS
        if (*fmt == 'B')
                sprint_backtrace(sym, value);
-       else if (*fmt != 'f' && *fmt != 's')
+       else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
                sprint_symbol(sym, value);
        else
                sprint_symbol_no_offset(sym, value);
@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
        return number(buf, end, num, spec);
 }
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+int kptr_restrict __read_mostly = 2;
+#else
 int kptr_restrict __read_mostly;
+#endif
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
  *
  * - 'F' For symbolic function descriptor pointers with offset
  * - 'f' For simple symbolic function names without offset
+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
  * - 'S' For symbolic direct pointers with offset
  * - 's' For symbolic direct pointers without offset
+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
  * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
  * - 'B' For backtraced symbolic direct pointers with offset
  * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
 
        if (!ptr && *fmt != 'K') {
                /*
-                * Print (null) with the same width as a pointer so it makes
+                * Print (nil) with the same width as a pointer so it makes
                 * tabular output look nice.
                 */
                if (spec.field_width == -1)
                        spec.field_width = default_width;
-               return string(buf, end, "(null)", spec);
+               return string(buf, end, "(nil)", spec);
        }
 
        switch (*fmt) {
@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                /* Fallthrough */
        case 'S':
        case 's':
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+               break;
+#else
+               return symbol_string(buf, end, ptr, spec, fmt);
+#endif
+       case 'X':
+               ptr = dereference_function_descriptor(ptr);
+       case 'A':
        case 'B':
                return symbol_string(buf, end, ptr, spec, fmt);
        case 'R':
@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                        va_end(va);
                        return buf;
                }
+       case 'P':
+               break;
        case 'K':
                /*
                 * %pK cannot be used in IRQ context because its test
@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                                   ((const struct file *)ptr)->f_path.dentry,
                                   spec, fmt);
        }
+
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       /* 'P' = approved pointers to copy to userland,
+          as in the /proc/kallsyms case, as we make it display nothing
+          for non-root users, and the real contents for root users
+          'X' = approved simple symbols
+          Also ignore 'K' pointers, since we force their NULLing for non-root users
+          above
+       */
+       if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
+               printk(KERN_ALERT "grsec: kernel infoleak detected!  Please report this log to spender@grsecurity.net.\n");
+               dump_stack();
+               ptr = NULL;
+       }
+#endif
+
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
                spec.field_width = default_width;
@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
        typeof(type) value;                                             \
        if (sizeof(type) == 8) {                                        \
                args = PTR_ALIGN(args, sizeof(u32));                    \
-               *(u32 *)&value = *(u32 *)args;                          \
-               *((u32 *)&value + 1) = *(u32 *)(args + 4);              \
+               *(u32 *)&value = *(const u32 *)args;                    \
+               *((u32 *)&value + 1) = *(const u32 *)(args + 4);        \
        } else {                                                        \
                args = PTR_ALIGN(args, sizeof(type));                   \
-               value = *(typeof(type) *)args;                          \
+               value = *(const typeof(type) *)args;                    \
        }                                                               \
        args += sizeof(type);                                           \
        value;                                                          \
@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
                case FORMAT_TYPE_STR: {
                        const char *str_arg = args;
                        args += strlen(str_arg) + 1;
-                       str = string(str, end, (char *)str_arg, spec);
+                       str = string(str, end, str_arg, spec);
                        break;
                }
 
index 1d1ae6b078fdd9121abbd01409f01437bb67e1e8..0f05885380b92b480a74358e8946c3ece5fd00f7 100644 (file)
@@ -341,10 +341,11 @@ config KSM
          root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
 
 config DEFAULT_MMAP_MIN_ADDR
-        int "Low address space to protect from user allocation"
+       int "Low address space to protect from user allocation"
        depends on MMU
-        default 4096
-        help
+       default 32768 if ALPHA || ARM || PARISC || SPARC32
+       default 65536
+       help
          This is the portion of low virtual memory which should be protected
          from userspace allocation.  Keeping a user from writing to low pages
          can help reduce the impact of kernel NULL pointer bugs.
@@ -375,7 +376,7 @@ config MEMORY_FAILURE
 
 config HWPOISON_INJECT
        tristate "HWPoison pages injector"
-       depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
+       depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
        select PROC_PAGE_MONITOR
 
 config NOMMU_INITIAL_TRIM_EXCESS
index 0ae0df55000bb0cac674159a20ca90345ce0c44c..82ac56b4c0d09ce703d951e2c41b966af9c05e02 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/device.h>
 #include <trace/events/writeback.h>
 
-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
 
 struct backing_dev_info default_backing_dev_info = {
        .name           = "default",
@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
                return err;
 
        err = bdi_register(bdi, NULL, "%.28s-%ld", name,
-                          atomic_long_inc_return(&bdi_seq));
+                          atomic_long_inc_return_unchecked(&bdi_seq));
        if (err) {
                bdi_destroy(bdi);
                return err;
index 673e4581a2e541b44b02cd8ef201772dad5311a1..71920139556e814c3d4b940cb1a5550312ae975d 100644 (file)
@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
        struct address_space *mapping = file->f_mapping;
 
        if (!mapping->a_ops->readpage)
-               return -ENOEXEC;
+               return -ENODEV;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
        return 0;
@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
                         *pos = i_size_read(inode);
 
                if (limit != RLIM_INFINITY) {
+                       gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
                        if (*pos >= limit) {
                                send_sig(SIGXFSZ, current, 0);
                                return -EFBIG;
index 2805d71cf47644858ef77f94560694de5d8b10d7..8b56e7dfc9c6998d37f847d2ba897cef3456ede0 100644 (file)
@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
  retry:
        vma = find_vma(mm, start);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
+               goto out;
+#endif
+
        /*
         * Make sure the vma is shared, that it supports prefaulting,
         * and that the remapped range is valid and fully within
index 9b2afbfe67e3905da6d7413621c01e8674a4d759..647297cf51782bfadfdb454eebe6855c7a5a9072 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
        unsigned int fault_flags = 0;
        int ret;
 
-       /* For mlock, just skip the stack guard page. */
-       if ((*flags & FOLL_MLOCK) &&
-                       (stack_guard_page_start(vma, address) ||
-                        stack_guard_page_end(vma, address + PAGE_SIZE)))
-               return -ENOENT;
        if (*flags & FOLL_WRITE)
                fault_flags |= FAULT_FLAG_WRITE;
        if (nonblocking)
@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        if (!(gup_flags & FOLL_FORCE))
                gup_flags |= FOLL_NUMA;
 
-       do {
+       while (nr_pages) {
                struct page *page;
                unsigned int foll_flags = gup_flags;
                unsigned int page_increm;
 
                /* first iteration or cross vma bound */
                if (!vma || start >= vma->vm_end) {
-                       vma = find_extend_vma(mm, start);
+                       vma = find_vma(mm, start);
                        if (!vma && in_gate_area(mm, start)) {
                                int ret;
                                ret = get_gate_page(mm, start & PAGE_MASK,
@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                goto next_page;
                        }
 
-                       if (!vma || check_vma_flags(vma, gup_flags))
+                       if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
                                return i ? : -EFAULT;
                        if (is_vm_hugetlb_page(vma)) {
                                i = follow_hugetlb_page(mm, vma, pages, vmas,
@@ -518,7 +513,7 @@ next_page:
                i += page_increm;
                start += page_increm * PAGE_SIZE;
                nr_pages -= page_increm;
-       } while (nr_pages);
+       }
        return i;
 }
 EXPORT_SYMBOL(__get_user_pages);
index 123bcd3ed4f209ba3710d9bfcaf8725d0a105534..0de52ba4f309bc5cbb43f609b91ff747f417c661 100644 (file)
@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
                 * So no dangers, even with speculative execution.
                 */
                page = pte_page(pkmap_page_table[i]);
+               pax_open_kernel();
                pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
-
+               pax_close_kernel();
                set_page_address(page, NULL);
                need_flush = 1;
        }
@@ -259,9 +260,11 @@ start:
                }
        }
        vaddr = PKMAP_ADDR(last_pkmap_nr);
+
+       pax_open_kernel();
        set_pte_at(&init_mm, vaddr,
                   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
-
+       pax_close_kernel();
        pkmap_count[last_pkmap_nr] = 1;
        set_page_address(page, (void *)vaddr);
 
index 267e41971100794cace59255ce49fc54eed08df3..394bed91bed0e74ff147f2cdfc32c1cba1bfe574 100644 (file)
@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
                         struct ctl_table *table, int write,
                         void __user *buffer, size_t *length, loff_t *ppos)
 {
+       ctl_table_no_const t;
        struct hstate *h = &default_hstate;
        unsigned long tmp = h->max_huge_pages;
        int ret;
@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        if (!hugepages_supported())
                return -ENOTSUPP;
 
-       table->data = &tmp;
-       table->maxlen = sizeof(unsigned long);
-       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       t = *table;
+       t.data = &tmp;
+       t.maxlen = sizeof(unsigned long);
+       ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
        if (ret)
                goto out;
 
@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        struct hstate *h = &default_hstate;
        unsigned long tmp;
        int ret;
+       ctl_table_no_const hugetlb_table;
 
        if (!hugepages_supported())
                return -ENOTSUPP;
@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        if (write && hstate_is_gigantic(h))
                return -EINVAL;
 
-       table->data = &tmp;
-       table->maxlen = sizeof(unsigned long);
-       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       hugetlb_table = *table;
+       hugetlb_table.data = &tmp;
+       hugetlb_table.maxlen = sizeof(unsigned long);
+       ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
        if (ret)
                goto out;
 
@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
        i_mmap_unlock_write(mapping);
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct vm_area_struct *vma_m;
+       unsigned long address_m;
+       pte_t *ptep_m;
+
+       vma_m = pax_find_mirror_vma(vma);
+       if (!vma_m)
+               return;
+
+       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+       address_m = address + SEGMEXEC_TASK_SIZE;
+       ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
+       get_page(page_m);
+       hugepage_add_anon_rmap(page_m, vma_m, address_m);
+       set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
+}
+#endif
+
 /*
  * Hugetlb_cow() should be called with page lock of the original hugepage held.
  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
@@ -2910,6 +2935,11 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page);
                hugepage_add_new_anon_rmap(new_page, vma, address);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               pax_mirror_huge_pte(vma, address, new_page);
+#endif
+
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -3070,6 +3100,10 @@ retry:
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_huge_pte(vma, address, page);
+#endif
+
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct address_space *mapping;
        int need_wait_lock = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m;
+#endif
+
        address &= huge_page_mask(h);
 
        ptep = huge_pte_offset(mm, address);
@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                VM_FAULT_SET_HINDEX(hstate_index(h));
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma_m = pax_find_mirror_vma(vma);
+       if (vma_m) {
+               unsigned long address_m;
+
+               if (vma->vm_start > vma_m->vm_start) {
+                       address_m = address;
+                       address -= SEGMEXEC_TASK_SIZE;
+                       vma = vma_m;
+                       h = hstate_vma(vma);
+               } else
+                       address_m = address + SEGMEXEC_TASK_SIZE;
+
+               if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
+                       return VM_FAULT_OOM;
+               address_m &= HPAGE_MASK;
+               unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
+       }
+#endif
+
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
        if (!ptep)
                return VM_FAULT_OOM;
index efad241f7014805fbe39ae30f1e4c03a6046cfa6..57ae4cafcd5c9587cb1c4b1f8bb5b4a10559d679 100644 (file)
@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
 
 extern int __isolate_free_page(struct page *page, unsigned int order);
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
+extern void free_compound_page(struct page *page);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
 extern bool is_free_buddy_page(struct page *page);
@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
 
 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
-        unsigned long, unsigned long);
+        unsigned long, unsigned long) __intentional_overflow(-1);
 
 extern void set_pageblock_order(void);
 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
index 3cda50c1e3942100bcd3f8715a9062fe1c6d7528..032ba6349f9ccf848224ac2de8235565991f5f07 100644 (file)
@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
 
        for (i = 0; i < object->trace_len; i++) {
                void *ptr = (void *)object->trace[i];
-               seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
+               seq_printf(seq, "    [<%pP>] %pA\n", ptr, ptr);
        }
 }
 
@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
                return -ENOMEM;
        }
 
-       dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
+       dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
                                     &kmemleak_fops);
        if (!dentry)
                pr_warning("Failed to create the debugfs kmemleak file\n");
index d53adf9ba84bc340d6fe8d53a26a2c626a032c48..03a24bf0e5ebbe5b9e9695a666f7492c6e164bfe 100644 (file)
@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
        set_fs(KERNEL_DS);
        pagefault_disable();
        ret = __copy_from_user_inatomic(dst,
-                       (__force const void __user *)src, size);
+                       (const void __force_user *)src, size);
        pagefault_enable();
        set_fs(old_fs);
 
@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
 
        set_fs(KERNEL_DS);
        pagefault_disable();
-       ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+       ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
        pagefault_enable();
        set_fs(old_fs);
 
index a271adc93289f7e84a9a5e56b3b5ffb6b914db63..831d82f44291da35f10ccdf02ca1a9d98a10c4c1 100644 (file)
@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
        pgoff_t pgoff;
        unsigned long new_flags = vma->vm_flags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m;
+#endif
+
        switch (behavior) {
        case MADV_NORMAL:
                new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
@@ -126,6 +130,13 @@ success:
        /*
         * vm_flags is protected by the mmap_sem held in write mode.
         */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma_m = pax_find_mirror_vma(vma);
+       if (vma_m)
+               vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
+#endif
+
        vma->vm_flags = new_flags;
 
 out:
@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
                             struct vm_area_struct **prev,
                             unsigned long start, unsigned long end)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m;
+#endif
+
        *prev = vma;
        if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
                return -EINVAL;
@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
                zap_page_range(vma, start, end - start, &details);
        } else
                zap_page_range(vma, start, end - start, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma_m = pax_find_mirror_vma(vma);
+       if (vma_m) {
+               if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
+                       struct zap_details details = {
+                               .nonlinear_vma = vma_m,
+                               .last_index = ULONG_MAX,
+                       };
+                       zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
+               } else
+                       zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
+       }
+#endif
+
        return 0;
 }
 
@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
        if (end < start)
                return error;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+               if (end > SEGMEXEC_TASK_SIZE)
+                       return error;
+       } else
+#endif
+
+       if (end > TASK_SIZE)
+               return error;
+
        error = 0;
        if (end == start)
                return error;
index 20c29ddff17b2b067a70e7f0ba39316b97eeb0c5..22bd8e2bb3458c5de76bf6a1bc741bc7627bc9cd 100644 (file)
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
 
 int sysctl_memory_failure_recovery __read_mostly = 1;
 
-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
 
@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
                pfn, t->comm, t->pid);
        si.si_signo = SIGBUS;
        si.si_errno = 0;
-       si.si_addr = (void *)addr;
+       si.si_addr = (void __user *)addr;
 #ifdef __ARCH_SI_TRAPNO
        si.si_trapno = trapno;
 #endif
@@ -786,7 +786,7 @@ static struct page_state {
        unsigned long res;
        char *msg;
        int (*action)(struct page *p, unsigned long pfn);
-} error_states[] = {
+} __do_const error_states[] = {
        { reserved,     reserved,       "reserved kernel",      me_kernel },
        /*
         * free pages are specially detected outside this table:
@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                nr_pages = 1 << compound_order(hpage);
        else /* normal page or thp */
                nr_pages = 1;
-       atomic_long_add(nr_pages, &num_poisoned_pages);
+       atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
 
        /*
         * We need/can do nothing about count=0 pages.
@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                        if (PageHWPoison(hpage)) {
                                if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
                                    || (p != hpage && TestSetPageHWPoison(hpage))) {
-                                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                                       atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
                                        unlock_page(hpage);
                                        return 0;
                                }
@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
-               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
                put_page(hpage);
                res = 0;
                goto out;
        }
        if (hwpoison_filter(p)) {
                if (TestClearPageHWPoison(p))
-                       atomic_long_sub(nr_pages, &num_poisoned_pages);
+                       atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
                unlock_page(hpage);
                put_page(hpage);
                return 0;
@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
                        return 0;
                }
                if (TestClearPageHWPoison(p))
-                       atomic_long_dec(&num_poisoned_pages);
+                       atomic_long_dec_unchecked(&num_poisoned_pages);
                pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
                return 0;
        }
@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
         */
        if (TestClearPageHWPoison(page)) {
                pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
-               atomic_long_sub(nr_pages, &num_poisoned_pages);
+               atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
                freeit = 1;
                if (PageHuge(page))
                        clear_page_hwpoison_huge_page(page);
@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
                if (PageHuge(page)) {
                        set_page_hwpoison_huge_page(hpage);
                        dequeue_hwpoisoned_huge_page(hpage);
-                       atomic_long_add(1 << compound_order(hpage),
+                       atomic_long_add_unchecked(1 << compound_order(hpage),
                                        &num_poisoned_pages);
                } else {
                        SetPageHWPoison(page);
-                       atomic_long_inc(&num_poisoned_pages);
+                       atomic_long_inc_unchecked(&num_poisoned_pages);
                }
        }
        return ret;
@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
                put_page(page);
                pr_info("soft_offline: %#lx: invalidated\n", pfn);
                SetPageHWPoison(page);
-               atomic_long_inc(&num_poisoned_pages);
+               atomic_long_inc_unchecked(&num_poisoned_pages);
                return 0;
        }
 
@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
                        if (!is_free_buddy_page(page))
                                pr_info("soft offline: %#lx: page leaked\n",
                                        pfn);
-                       atomic_long_inc(&num_poisoned_pages);
+                       atomic_long_inc_unchecked(&num_poisoned_pages);
                }
        } else {
                pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
                if (PageHuge(page)) {
                        set_page_hwpoison_huge_page(hpage);
                        dequeue_hwpoisoned_huge_page(hpage);
-                       atomic_long_add(1 << compound_order(hpage),
+                       atomic_long_add_unchecked(1 << compound_order(hpage),
                                        &num_poisoned_pages);
                } else {
                        SetPageHWPoison(page);
-                       atomic_long_inc(&num_poisoned_pages);
+                       atomic_long_inc_unchecked(&num_poisoned_pages);
                }
        }
        unset_migratetype_isolate(page, MIGRATE_MOVABLE);
index 6aa7822bb64d29cdc9255cd83f55c088184d6701..3c76005c7e8f1a0ee620246ffb2862703af076a2 100644 (file)
@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
                free_pte_range(tlb, pmd, addr);
        } while (pmd++, addr = next, addr != end);
 
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
        start &= PUD_MASK;
        if (start < floor)
                return;
@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
        pmd_free_tlb(tlb, pmd, start);
+#endif
+
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                free_pmd_range(tlb, pud, addr, next, floor, ceiling);
        } while (pud++, addr = next, addr != end);
 
+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
        start &= PGDIR_MASK;
        if (start < floor)
                return;
@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
        pud_free_tlb(tlb, pud, start);
+#endif
+
 }
 
 /*
@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
         * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
         */
        if (vma->vm_ops)
-               printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
+               printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
                       vma->vm_ops->fault);
        if (vma->vm_file)
-               printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
+               printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
                       vma->vm_file->f_op->mmap);
        dump_stack();
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
        page_add_file_rmap(page);
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_file_pte(vma, addr, page, ptl);
+#endif
+
        retval = 0;
        pte_unmap_unlock(pte, ptl);
        return retval;
@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
        if (!page_count(page))
                return -EINVAL;
        if (!(vma->vm_flags & VM_MIXEDMAP)) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               struct vm_area_struct *vma_m;
+#endif
+
                BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
                BUG_ON(vma->vm_flags & VM_PFNMAP);
                vma->vm_flags |= VM_MIXEDMAP;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               vma_m = pax_find_mirror_vma(vma);
+               if (vma_m)
+                       vma_m->vm_flags |= VM_MIXEDMAP;
+#endif
+
        }
        return insert_page(vma, addr, page, vma->vm_page_prot);
 }
@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
        BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+       BUG_ON(vma->vm_mirror);
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
 
        BUG_ON(pud_huge(*pud));
 
-       pmd = pmd_alloc(mm, pud, addr);
+       pmd = (mm == &init_mm) ?
+               pmd_alloc_kernel(mm, pud, addr) :
+               pmd_alloc(mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
        do {
@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
        unsigned long next;
        int err;
 
-       pud = pud_alloc(mm, pgd, addr);
+       pud = (mm == &init_mm) ?
+               pud_alloc_kernel(mm, pgd, addr) :
+               pud_alloc(mm, pgd, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
        return ret;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       spinlock_t *ptl;
+       pte_t *pte, entry;
+
+       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+       entry = *pte;
+       if (!pte_present(entry)) {
+               if (!pte_none(entry)) {
+                       BUG_ON(pte_file(entry));
+                       free_swap_and_cache(pte_to_swp_entry(entry));
+                       pte_clear_not_present_full(mm, address, pte, 0);
+               }
+       } else {
+               struct page *page;
+
+               flush_cache_page(vma, address, pte_pfn(entry));
+               entry = ptep_clear_flush(vma, address, pte);
+               BUG_ON(pte_dirty(entry));
+               page = vm_normal_page(vma, address, entry);
+               if (page) {
+                       update_hiwater_rss(mm);
+                       if (PageAnon(page))
+                               dec_mm_counter_fast(mm, MM_ANONPAGES);
+                       else
+                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+                       page_remove_rmap(page);
+                       page_cache_release(page);
+               }
+       }
+       pte_unmap_unlock(pte, ptl);
+}
+
+/* PaX: if vma is mirrored, synchronize the mirror's PTE
+ *
+ * the ptl of the lower mapped page is held on entry and is not released on exit
+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
+ */
+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address_m;
+       spinlock_t *ptl_m;
+       struct vm_area_struct *vma_m;
+       pmd_t *pmd_m;
+       pte_t *pte_m, entry_m;
+
+       BUG_ON(!page_m || !PageAnon(page_m));
+
+       vma_m = pax_find_mirror_vma(vma);
+       if (!vma_m)
+               return;
+
+       BUG_ON(!PageLocked(page_m));
+       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+       address_m = address + SEGMEXEC_TASK_SIZE;
+       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+       pte_m = pte_offset_map(pmd_m, address_m);
+       ptl_m = pte_lockptr(mm, pmd_m);
+       if (ptl != ptl_m) {
+               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+               if (!pte_none(*pte_m))
+                       goto out;
+       }
+
+       entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
+       page_cache_get(page_m);
+       page_add_anon_rmap(page_m, vma_m, address_m);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
+       set_pte_at(mm, address_m, pte_m, entry_m);
+       update_mmu_cache(vma_m, address_m, pte_m);
+out:
+       if (ptl != ptl_m)
+               spin_unlock(ptl_m);
+       pte_unmap(pte_m);
+       unlock_page(page_m);
+}
+
+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address_m;
+       spinlock_t *ptl_m;
+       struct vm_area_struct *vma_m;
+       pmd_t *pmd_m;
+       pte_t *pte_m, entry_m;
+
+       BUG_ON(!page_m || PageAnon(page_m));
+
+       vma_m = pax_find_mirror_vma(vma);
+       if (!vma_m)
+               return;
+
+       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+       address_m = address + SEGMEXEC_TASK_SIZE;
+       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+       pte_m = pte_offset_map(pmd_m, address_m);
+       ptl_m = pte_lockptr(mm, pmd_m);
+       if (ptl != ptl_m) {
+               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+               if (!pte_none(*pte_m))
+                       goto out;
+       }
+
+       entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
+       page_cache_get(page_m);
+       page_add_file_rmap(page_m);
+       inc_mm_counter_fast(mm, MM_FILEPAGES);
+       set_pte_at(mm, address_m, pte_m, entry_m);
+       update_mmu_cache(vma_m, address_m, pte_m);
+out:
+       if (ptl != ptl_m)
+               spin_unlock(ptl_m);
+       pte_unmap(pte_m);
+}
+
+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address_m;
+       spinlock_t *ptl_m;
+       struct vm_area_struct *vma_m;
+       pmd_t *pmd_m;
+       pte_t *pte_m, entry_m;
+
+       vma_m = pax_find_mirror_vma(vma);
+       if (!vma_m)
+               return;
+
+       BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+       address_m = address + SEGMEXEC_TASK_SIZE;
+       pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+       pte_m = pte_offset_map(pmd_m, address_m);
+       ptl_m = pte_lockptr(mm, pmd_m);
+       if (ptl != ptl_m) {
+               spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+               if (!pte_none(*pte_m))
+                       goto out;
+       }
+
+       entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
+       set_pte_at(mm, address_m, pte_m, entry_m);
+out:
+       if (ptl != ptl_m)
+               spin_unlock(ptl_m);
+       pte_unmap(pte_m);
+}
+
+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
+{
+       struct page *page_m;
+       pte_t entry;
+
+       if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
+               goto out;
+
+       entry = *pte;
+       page_m  = vm_normal_page(vma, address, entry);
+       if (!page_m)
+       pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
+       else if (PageAnon(page_m)) {
+               if (pax_find_mirror_vma(vma)) {
+                       pte_unmap_unlock(pte, ptl);
+                       lock_page(page_m);
+                       pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
+                       if (pte_same(entry, *pte))
+                               pax_mirror_anon_pte(vma, address, page_m, ptl);
+                       else
+                               unlock_page(page_m);
+               }
+       } else
+               pax_mirror_file_pte(vma, address, page_m, ptl);
+
+out:
+       pte_unmap_unlock(pte, ptl);
+}
+#endif
+
 /*
  * This routine handles present pages, when users try to write
  * to a shared page. It is done by copying the page to a new address
@@ -2212,6 +2419,12 @@ gotten:
         */
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               if (pax_find_mirror_vma(vma))
+                       BUG_ON(!trylock_page(new_page));
+#endif
+
                if (old_page) {
                        if (!PageAnon(old_page)) {
                                dec_mm_counter_fast(mm, MM_FILEPAGES);
@@ -2265,6 +2478,10 @@ gotten:
                        page_remove_rmap(old_page);
                }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               pax_mirror_anon_pte(vma, address, new_page, ptl);
+#endif
+
                /* Free the old page.. */
                new_page = old_page;
                ret |= VM_FAULT_WRITE;
@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
+#endif
+
        unlock_page(page);
        if (page != swapcache) {
                /*
@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(vma, address, page_table);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_anon_pte(vma, address, page, ptl);
+#endif
+
 unlock:
        pte_unmap_unlock(page_table, ptl);
 out:
@@ -2580,40 +2807,6 @@ out_release:
        return ret;
 }
 
-/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
-       address &= PAGE_MASK;
-       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-               struct vm_area_struct *prev = vma->vm_prev;
-
-               /*
-                * Is there a mapping abutting this one below?
-                *
-                * That's only ok if it's the same stack mapping
-                * that has gotten split..
-                */
-               if (prev && prev->vm_end == address)
-                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-               return expand_downwards(vma, address - PAGE_SIZE);
-       }
-       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-               struct vm_area_struct *next = vma->vm_next;
-
-               /* As VM_GROWSDOWN but s/below/above/ */
-               if (next && next->vm_start == address + PAGE_SIZE)
-                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-               return expand_upwards(vma, address + PAGE_SIZE);
-       }
-       return 0;
-}
-
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags)
 {
        struct mem_cgroup *memcg;
-       struct page *page;
+       struct page *page = NULL;
        spinlock_t *ptl;
        pte_t entry;
 
-       pte_unmap(page_table);
-
-       /* Check if we need to add a guard page to the stack */
-       if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGSEGV;
-
-       /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
-               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+               ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
                if (!pte_none(*page_table))
                        goto unlock;
                goto setpte;
        }
 
        /* Allocate our own private page. */
+       pte_unmap(page_table);
+
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
        page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (pax_find_mirror_vma(vma))
+               BUG_ON(!trylock_page(page));
+#endif
+
        inc_mm_counter_fast(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, address);
        mem_cgroup_commit_charge(page, memcg, false);
@@ -2677,6 +2871,12 @@ setpte:
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(vma, address, page_table);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (page)
+               pax_mirror_anon_pte(vma, address, page, ptl);
+#endif
+
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return ret;
        }
        do_set_pte(vma, address, fault_page, pte, false, false);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_file_pte(vma, address, fault_page, ptl);
+#endif
+
        unlock_page(fault_page);
 unlock_out:
        pte_unmap_unlock(pte, ptl);
@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                page_cache_release(fault_page);
                goto uncharge_out;
        }
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (pax_find_mirror_vma(vma))
+               BUG_ON(!trylock_page(new_page));
+#endif
+
        do_set_pte(vma, address, new_page, pte, true, true);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_anon_pte(vma, address, new_page, ptl);
+#endif
+
        mem_cgroup_commit_charge(new_page, memcg, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return ret;
        }
        do_set_pte(vma, address, fault_page, pte, true, false);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_file_pte(vma, address, fault_page, ptl);
+#endif
+
        pte_unmap_unlock(pte, ptl);
 
        if (set_page_dirty(fault_page))
@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
                if (flags & FAULT_FLAG_WRITE)
                        flush_tlb_fix_spurious_fault(vma, address);
        }
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       pax_mirror_pte(vma, address, pte, pmd, ptl);
+       return 0;
+#endif
+
 unlock:
        pte_unmap_unlock(pte, ptl);
        return 0;
@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pmd_t *pmd;
        pte_t *pte;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m;
+#endif
+
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma_m = pax_find_mirror_vma(vma);
+       if (vma_m) {
+               unsigned long address_m;
+               pgd_t *pgd_m;
+               pud_t *pud_m;
+               pmd_t *pmd_m;
+
+               if (vma->vm_start > vma_m->vm_start) {
+                       address_m = address;
+                       address -= SEGMEXEC_TASK_SIZE;
+                       vma = vma_m;
+               } else
+                       address_m = address + SEGMEXEC_TASK_SIZE;
+
+               pgd_m = pgd_offset(mm, address_m);
+               pud_m = pud_alloc(mm, pgd_m, address_m);
+               if (!pud_m)
+                       return VM_FAULT_OOM;
+               pmd_m = pmd_alloc(mm, pud_m, address_m);
+               if (!pmd_m)
+                       return VM_FAULT_OOM;
+               if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
+                       return VM_FAULT_OOM;
+               pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
+       }
+#endif
+
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
+
+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+       pud_t *new = pud_alloc_one(mm, address);
+       if (!new)
+               return -ENOMEM;
+
+       smp_wmb(); /* See comment in __pte_alloc */
+
+       spin_lock(&mm->page_table_lock);
+       if (pgd_present(*pgd))          /* Another has populated it */
+               pud_free(mm, new);
+       else
+               pgd_populate_kernel(mm, pgd, new);
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+}
 #endif /* __PAGETABLE_PUD_FOLDED */
 
 #ifndef __PAGETABLE_PMD_FOLDED
@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        spin_unlock(&mm->page_table_lock);
        return 0;
 }
+
+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+       pmd_t *new = pmd_alloc_one(mm, address);
+       if (!new)
+               return -ENOMEM;
+
+       smp_wmb(); /* See comment in __pte_alloc */
+
+       spin_lock(&mm->page_table_lock);
+#ifndef __ARCH_HAS_4LEVEL_HACK
+       if (pud_present(*pud))          /* Another has populated it */
+               pmd_free(mm, new);
+       else
+               pud_populate_kernel(mm, pud, new);
+#else
+       if (pgd_present(*pud))          /* Another has populated it */
+               pmd_free(mm, new);
+       else
+               pgd_populate_kernel(mm, pud, new);
+#endif /* __ARCH_HAS_4LEVEL_HACK */
+       spin_unlock(&mm->page_table_lock);
+       return 0;
+}
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 static int __follow_pte(struct mm_struct *mm, unsigned long address,
@@ -3550,8 +3850,8 @@ out:
        return ret;
 }
 
-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-                       void *buf, int len, int write)
+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+                       void *buf, size_t len, int write)
 {
        resource_size_t phys_addr;
        unsigned long prot = 0;
@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
  * Access another process' address space as given in mm.  If non-NULL, use the
  * given task for page fault accounting.
  */
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long addr, void *buf, int len, int write)
+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long addr, void *buf, size_t len, int write)
 {
        struct vm_area_struct *vma;
        void *old_buf = buf;
@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
        down_read(&mm->mmap_sem);
        /* ignore errors, just check how much was successfully transferred */
        while (len) {
-               int bytes, ret, offset;
+               ssize_t bytes, ret, offset;
                void *maddr;
                struct page *page = NULL;
 
@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  *
  * The caller must hold a reference on @mm.
  */
-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write)
+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+               void *buf, size_t len, int write)
 {
        return __access_remote_vm(NULL, mm, addr, buf, len, write);
 }
@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  * Source/target buffer must be kernel space,
  * Do not walk the page table directly, use get_user_pages
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr,
-               void *buf, int len, int write)
+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
+               void *buf, size_t len, int write)
 {
        struct mm_struct *mm;
-       int ret;
+       ssize_t ret;
 
        mm = get_task_mm(tsk);
        if (!mm)
index 0e0961b8c39ceb18a7eca485753d1a74c905d353..c9143b9bb8e1a46947eb025648fcbb5eb8ab7ab8 100644 (file)
@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        unsigned long vmstart;
        unsigned long vmend;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m;
+#endif
+
        vma = find_vma(mm, start);
        if (!vma || vma->vm_start > start)
                return -EFAULT;
@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                err = vma_replace_policy(vma, new_pol);
                if (err)
                        goto out;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               vma_m = pax_find_mirror_vma(vma);
+               if (vma_m) {
+                       err = vma_replace_policy(vma_m, new_pol);
+                       if (err)
+                               goto out;
+               }
+#endif
+
        }
 
  out:
@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
 
        if (end < start)
                return -EINVAL;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+               if (end > SEGMEXEC_TASK_SIZE)
+                       return -EINVAL;
+       } else
+#endif
+
+       if (end > TASK_SIZE)
+               return -EINVAL;
+
        if (end == start)
                return 0;
 
@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
         */
        tcred = __task_cred(task);
        if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
-           !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
-           !capable(CAP_SYS_NICE)) {
+           !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
                rcu_read_unlock();
                err = -EPERM;
                goto out_put;
@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
                goto out;
        }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       if (mm != current->mm &&
+           (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
+               mmput(mm);
+               err = -EPERM;
+               goto out;
+       }
+#endif
+
        err = do_migrate_pages(mm, old, new,
                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
 
index 344cdf692fc8060b20022a559dd222fded2a6e12..073995009a1b93fe5efd74559c4beac6c41fcfe0 100644 (file)
@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
         */
        tcred = __task_cred(task);
        if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
-           !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
-           !capable(CAP_SYS_NICE)) {
+           !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
                rcu_read_unlock();
                err = -EPERM;
                goto out;
index 73cf0987088c36647fbb805278978bb656ff1fda..ab547c73af7b952961d089f60bb6c24d75c7a206 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagevec.h>
 #include <linux/mempolicy.h>
 #include <linux/syscalls.h>
+#include <linux/security.h>
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/rmap.h>
@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
 {
        unsigned long nstart, end, tmp;
        struct vm_area_struct * vma, * prev;
-       int error;
+       int error = 0;
 
        VM_BUG_ON(start & ~PAGE_MASK);
        VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
                return -EINVAL;
        if (end == start)
                return 0;
+       if (end > TASK_SIZE)
+               return -EINVAL;
+
        vma = find_vma(current->mm, start);
        if (!vma || vma->vm_start > start)
                return -ENOMEM;
@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
        for (nstart = start ; ; ) {
                vm_flags_t newflags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
+                       break;
+#endif
+
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
                newflags = vma->vm_flags & ~VM_LOCKED;
@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
        locked += current->mm->locked_vm;
 
        /* check against resource limits */
+       gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
        if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
                error = do_mlock(start, len, 1);
 
@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
        for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
                vm_flags_t newflags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
+                       break;
+#endif
+
                newflags = vma->vm_flags & ~VM_LOCKED;
                if (flags & MCL_CURRENT)
                        newflags |= VM_LOCKED;
@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
        lock_limit >>= PAGE_SHIFT;
 
        ret = -ENOMEM;
-       down_write(&current->mm->mmap_sem);
 
+       gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
+
+       down_write(&current->mm->mmap_sem);
        if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
            capable(CAP_IPC_LOCK))
                ret = do_mlockall(flags);
index e5cc3ca1d869f30998ec0e740a4646b8c3c029a5..bb9333f395434cb693741b044104730392821a1c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -41,6 +41,7 @@
 #include <linux/notifier.h>
 #include <linux/memory.h>
 #include <linux/printk.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #define arch_rebalance_pgtables(addr, len)             (addr)
 #endif
 
+static inline void verify_mm_writelocked(struct mm_struct *mm)
+{
+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
+       if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+               up_read(&mm->mmap_sem);
+               BUG();
+       }
+#endif
+}
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
  *
  */
-pgprot_t protection_map[16] = {
+pgprot_t protection_map[16] __read_only = {
        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
 {
-       return __pgprot(pgprot_val(protection_map[vm_flags &
+       pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
                                (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
                        pgprot_val(arch_vm_get_page_prot(vm_flags)));
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+       if (!(__supported_pte_mask & _PAGE_NX) &&
+           (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
+           (vm_flags & (VM_READ | VM_WRITE)))
+               prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
+#endif
+
+       return prot;
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
 /*
  * Make sure vm_committed_as in one cacheline and not cacheline shared with
  * other variables. It can be updated by several CPUs frequently.
@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        struct vm_area_struct *next = vma->vm_next;
 
        might_sleep();
+       BUG_ON(vma->vm_mirror);
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
        if (vma->vm_file)
@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
 
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
+       unsigned long rlim;
        unsigned long retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         * segment grow beyond its set limit the in case where the limit is
         * not page aligned -Ram Gupta
         */
-       if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
+       rlim = rlimit(RLIMIT_DATA);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+       /* force a minimum 16MB brk heap on setuid/setgid binaries */
+       if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
+               rlim = 4096 * PAGE_SIZE;
+#endif
+       if (check_data_rlimit(rlim, brk, mm->start_brk,
                              mm->end_data, mm->start_data))
                goto out;
 
@@ -978,6 +1007,12 @@ static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
+               return 0;
+#endif
+
        if (is_mergeable_vma(vma, file, vm_flags) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
@@ -997,6 +1032,12 @@ static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
+               return 0;
+#endif
+
        if (is_mergeable_vma(vma, file, vm_flags) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
@@ -1046,6 +1087,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        struct vm_area_struct *area, *next;
        int err;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
+       struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
+
+       BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
+#endif
+
        /*
         * We later require that vma->vm_flags == vm_flags,
         * so this tests vma->vm_flags & VM_SPECIAL, too.
@@ -1061,6 +1109,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        if (next && next->vm_end == end)                /* cases 6, 7, 8 */
                next = next->vm_next;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (prev)
+               prev_m = pax_find_mirror_vma(prev);
+       if (area)
+               area_m = pax_find_mirror_vma(area);
+       if (next)
+               next_m = pax_find_mirror_vma(next);
+#endif
+
        /*
         * Can it merge with the predecessor?
         */
@@ -1080,9 +1137,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                                        /* cases 1, 6 */
                        err = vma_adjust(prev, prev->vm_start,
                                next->vm_end, prev->vm_pgoff, NULL);
-               } else                                  /* cases 2, 5, 7 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                       if (!err && prev_m)
+                               err = vma_adjust(prev_m, prev_m->vm_start,
+                                       next_m->vm_end, prev_m->vm_pgoff, NULL);
+#endif
+
+               } else {                                /* cases 2, 5, 7 */
                        err = vma_adjust(prev, prev->vm_start,
                                end, prev->vm_pgoff, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                       if (!err && prev_m)
+                               err = vma_adjust(prev_m, prev_m->vm_start,
+                                               end_m, prev_m->vm_pgoff, NULL);
+#endif
+
+               }
                if (err)
                        return NULL;
                khugepaged_enter_vma_merge(prev, vm_flags);
@@ -1096,12 +1168,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        mpol_equal(policy, vma_policy(next)) &&
                        can_vma_merge_before(next, vm_flags,
                                        anon_vma, file, pgoff+pglen)) {
-               if (prev && addr < prev->vm_end)        /* case 4 */
+               if (prev && addr < prev->vm_end) {      /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
-               else                                    /* cases 3, 8 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                       if (!err && prev_m)
+                               err = vma_adjust(prev_m, prev_m->vm_start,
+                                               addr_m, prev_m->vm_pgoff, NULL);
+#endif
+
+               } else {                                /* cases 3, 8 */
                        err = vma_adjust(area, addr, next->vm_end,
                                next->vm_pgoff - pglen, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                       if (!err && area_m)
+                               err = vma_adjust(area_m, addr_m, next_m->vm_end,
+                                               next_m->vm_pgoff - pglen, NULL);
+#endif
+
+               }
                if (err)
                        return NULL;
                khugepaged_enter_vma_merge(area, vm_flags);
@@ -1210,8 +1297,10 @@ none:
 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
                                                struct file *file, long pages)
 {
-       const unsigned long stack_flags
-               = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
+#endif
 
        mm->total_vm += pages;
 
@@ -1219,7 +1308,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
                mm->shared_vm += pages;
                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
                        mm->exec_vm += pages;
-       } else if (flags & stack_flags)
+       } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
                mm->stack_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
@@ -1249,6 +1338,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
                locked += mm->locked_vm;
                lock_limit = rlimit(RLIMIT_MEMLOCK);
                lock_limit >>= PAGE_SHIFT;
+               gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
        }
@@ -1275,7 +1365,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
         * (the exception is when the underlying filesystem is noexec
         *  mounted, in which case we dont add PROT_EXEC.)
         */
-       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+       if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
                if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
                        prot |= PROT_EXEC;
 
@@ -1301,7 +1391,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
+       addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -1312,6 +1402,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+#ifdef CONFIG_PAX_MPROTECT
+       if (mm->pax_flags & MF_PAX_MPROTECT) {
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+               if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
+                   mm->binfmt->handle_mmap)
+                       mm->binfmt->handle_mmap(file);
+#endif
+
+#ifndef CONFIG_PAX_MPROTECT_COMPAT
+               if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+                       gr_log_rwxmmap(file);
+
+#ifdef CONFIG_PAX_EMUPLT
+                       vm_flags &= ~VM_EXEC;
+#else
+                       return -EPERM;
+#endif
+
+               }
+
+               if (!(vm_flags & VM_EXEC))
+                       vm_flags &= ~VM_MAYEXEC;
+#else
+               if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
+                       vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+               else
+                       vm_flags &= ~VM_MAYWRITE;
+       }
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+       if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
+               vm_flags &= ~VM_PAGEEXEC;
+#endif
+
        if (flags & MAP_LOCKED)
                if (!can_do_mlock())
                        return -EPERM;
@@ -1399,6 +1526,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        vm_flags |= VM_NORESERVE;
        }
 
+       if (!gr_acl_handle_mmap(file, prot))
+               return -EACCES;
+       
        addr = mmap_region(file, addr, len, vm_flags, pgoff);
        if (!IS_ERR_VALUE(addr) &&
            ((vm_flags & VM_LOCKED) ||
@@ -1492,7 +1622,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
        vm_flags_t vm_flags = vma->vm_flags;
 
        /* If it was private or non-writable, the write bit is already clear */
-       if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+       if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
                return 0;
 
        /* The backer wishes to know when pages are first written to? */
@@ -1543,7 +1673,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m = NULL;
+#endif
+
+       /*
+        * mm->mmap_sem is required to protect against another thread
+        * changing the mappings in case we sleep.
+        */
+       verify_mm_writelocked(mm);
+
        /* Check against address space limit. */
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
+#endif
+
        if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
                unsigned long nr_pages;
 
@@ -1562,11 +1707,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 
        /* Clear old maps */
        error = -ENOMEM;
-munmap_back:
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
                if (do_munmap(mm, addr, len))
                        return -ENOMEM;
-               goto munmap_back;
+               BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
        }
 
        /*
@@ -1597,6 +1741,16 @@ munmap_back:
                goto unacct_error;
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
+               vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+               if (!vma_m) {
+                       error = -ENOMEM;
+                       goto free_vma;
+               }
+       }
+#endif
+
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
@@ -1627,6 +1781,13 @@ munmap_back:
                if (error)
                        goto unmap_and_free_vma;
 
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+               if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
+                       vma->vm_flags |= VM_PAGEEXEC;
+                       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+               }
+#endif
+
                /* Can addr have changed??
                 *
                 * Answer: Yes, several device drivers can do it in their
@@ -1645,6 +1806,12 @@ munmap_back:
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma_m)
+               BUG_ON(pax_mirror_vma(vma_m, vma));
+#endif
+
        /* Once vma denies write, undo our temporary denial count */
        if (file) {
                if (vm_flags & VM_SHARED)
@@ -1657,6 +1824,7 @@ out:
        perf_event_mmap(vma);
 
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+       track_exec_limit(mm, addr, addr + len, vm_flags);
        if (vm_flags & VM_LOCKED) {
                if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
                                        vma == get_gate_vma(current->mm)))
@@ -1694,6 +1862,12 @@ allow_write_and_free_vma:
        if (vm_flags & VM_DENYWRITE)
                allow_write_access(file);
 free_vma:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma_m)
+               kmem_cache_free(vm_area_cachep, vma_m);
+#endif
+
        kmem_cache_free(vm_area_cachep, vma);
 unacct_error:
        if (charged)
@@ -1701,7 +1875,63 @@ unacct_error:
        return error;
 }
 
-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
+{
+       if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
+               return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
+
+       return 0;
+}
+#endif
+
+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
+{
+       if (!vma) {
+#ifdef CONFIG_STACK_GROWSUP
+               if (addr > sysctl_heap_stack_gap)
+                       vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
+               else
+                       vma = find_vma(current->mm, 0);
+               if (vma && (vma->vm_flags & VM_GROWSUP))
+                       return false;
+#endif
+               return true;
+       }
+
+       if (addr + len > vma->vm_start)
+               return false;
+
+       if (vma->vm_flags & VM_GROWSDOWN)
+               return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
+#ifdef CONFIG_STACK_GROWSUP
+       else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
+               return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
+#endif
+       else if (offset)
+               return offset <= vma->vm_start - addr - len;
+
+       return true;
+}
+
+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
+{
+       if (vma->vm_start < len)
+               return -ENOMEM;
+
+       if (!(vma->vm_flags & VM_GROWSDOWN)) {
+               if (offset <= vma->vm_start - len)
+                       return vma->vm_start - len - offset;
+               else
+                       return -ENOMEM;
+       }
+
+       if (sysctl_heap_stack_gap <= vma->vm_start - len)
+               return vma->vm_start - len - sysctl_heap_stack_gap;
+       return -ENOMEM;
+}
+
+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
 {
        /*
         * We implement the search by looking for an rbtree node that
@@ -1749,11 +1979,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
                        }
                }
 
-               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
 check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
                        return -ENOMEM;
+
+               if (gap_end - gap_start > info->threadstack_offset)
+                       gap_start += info->threadstack_offset;
+               else
+                       gap_start = gap_end;
+
+               if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
+                       if (gap_end - gap_start > sysctl_heap_stack_gap)
+                               gap_start += sysctl_heap_stack_gap;
+                       else
+                               gap_start = gap_end;
+               }
+               if (vma->vm_flags & VM_GROWSDOWN) {
+                       if (gap_end - gap_start > sysctl_heap_stack_gap)
+                               gap_end -= sysctl_heap_stack_gap;
+                       else
+                               gap_end = gap_start;
+               }
                if (gap_end >= low_limit && gap_end - gap_start >= length)
                        goto found;
 
@@ -1803,7 +2051,7 @@ found:
        return gap_start;
 }
 
-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
@@ -1857,6 +2105,24 @@ check_current:
                gap_end = vma->vm_start;
                if (gap_end < low_limit)
                        return -ENOMEM;
+
+               if (gap_end - gap_start > info->threadstack_offset)
+                       gap_end -= info->threadstack_offset;
+               else
+                       gap_end = gap_start;
+
+               if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
+                       if (gap_end - gap_start > sysctl_heap_stack_gap)
+                               gap_start += sysctl_heap_stack_gap;
+                       else
+                               gap_start = gap_end;
+               }
+               if (vma->vm_flags & VM_GROWSDOWN) {
+                       if (gap_end - gap_start > sysctl_heap_stack_gap)
+                               gap_end -= sysctl_heap_stack_gap;
+                       else
+                               gap_end = gap_start;
+               }
                if (gap_start <= high_limit && gap_end - gap_start >= length)
                        goto found;
 
@@ -1920,6 +2186,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
        if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
@@ -1927,11 +2194,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (flags & MAP_FIXED)
                return addr;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -1940,6 +2211,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
        info.align_mask = 0;
+       info.threadstack_offset = offset;
        return vm_unmapped_area(&info);
 }
 #endif
@@ -1958,6 +2230,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
+       unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE - mmap_min_addr)
@@ -1966,12 +2239,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (flags & MAP_FIXED)
                return addr;
 
+#ifdef CONFIG_PAX_RANDMMAP
+       if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vma->vm_start))
+                               check_heap_stack_gap(vma, addr, len, offset))
                        return addr;
        }
 
@@ -1980,6 +2257,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.low_limit = max(PAGE_SIZE, mmap_min_addr);
        info.high_limit = mm->mmap_base;
        info.align_mask = 0;
+       info.threadstack_offset = offset;
        addr = vm_unmapped_area(&info);
 
        /*
@@ -1992,6 +2270,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+               if (mm->pax_flags & MF_PAX_RANDMMAP)
+                       info.low_limit += mm->delta_mmap;
+#endif
+
                info.high_limit = TASK_SIZE;
                addr = vm_unmapped_area(&info);
        }
@@ -2092,6 +2376,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
        return vma;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
+{
+       struct vm_area_struct *vma_m;
+
+       BUG_ON(!vma || vma->vm_start >= vma->vm_end);
+       if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
+               BUG_ON(vma->vm_mirror);
+               return NULL;
+       }
+       BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
+       vma_m = vma->vm_mirror;
+       BUG_ON(!vma_m || vma_m->vm_mirror != vma);
+       BUG_ON(vma->vm_file != vma_m->vm_file);
+       BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
+       BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
+       BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
+       BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
+       return vma_m;
+}
+#endif
+
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -2109,8 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 
        /* Stack limit test */
        actual_size = size;
-       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-               actual_size -= PAGE_SIZE;
+       gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
        if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
@@ -2121,6 +2426,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
                locked = mm->locked_vm + grow;
                limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
                limit >>= PAGE_SHIFT;
+               gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
                if (locked > limit && !capable(CAP_IPC_LOCK))
                        return -ENOMEM;
        }
@@ -2150,37 +2456,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
+#ifndef CONFIG_IA64
+static
+#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
+       bool locknext;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
+       /* Also guard against wrapping around to address 0. */
+       if (address < PAGE_ALIGN(address+1))
+               address = PAGE_ALIGN(address+1);
+       else
+               return -ENOMEM;
+
        /*
         * We must make sure the anon_vma is allocated
         * so that the anon_vma locking is not a noop.
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
+       locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
+       if (locknext && anon_vma_prepare(vma->vm_next))
+               return -ENOMEM;
        vma_lock_anon_vma(vma);
+       if (locknext)
+               vma_lock_anon_vma(vma->vm_next);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
-        * anon_vma lock to serialize against concurrent expand_stacks.
-        * Also guard against wrapping around to address 0.
+        * anon_vma locks to serialize against concurrent expand_stacks
+        * and expand_upwards.
         */
-       if (address < PAGE_ALIGN(address+4))
-               address = PAGE_ALIGN(address+4);
-       else {
-               vma_unlock_anon_vma(vma);
-               return -ENOMEM;
-       }
        error = 0;
 
        /* Somebody else might have raced and expanded it already */
-       if (address > vma->vm_end) {
+       if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
+               error = -ENOMEM;
+       else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
                unsigned long size, grow;
 
                size = address - vma->vm_start;
@@ -2215,6 +2532,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        }
                }
        }
+       if (locknext)
+               vma_unlock_anon_vma(vma->vm_next);
        vma_unlock_anon_vma(vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(vma->vm_mm);
@@ -2229,6 +2548,8 @@ int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        int error;
+       bool lockprev = false;
+       struct vm_area_struct *prev;
 
        /*
         * We must make sure the anon_vma is allocated
@@ -2242,6 +2563,15 @@ int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
+       prev = vma->vm_prev;
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
+       lockprev = prev && (prev->vm_flags & VM_GROWSUP);
+#endif
+       if (lockprev && anon_vma_prepare(prev))
+               return -ENOMEM;
+       if (lockprev)
+               vma_lock_anon_vma(prev);
+
        vma_lock_anon_vma(vma);
 
        /*
@@ -2251,9 +2581,17 @@ int expand_downwards(struct vm_area_struct *vma,
         */
 
        /* Somebody else might have raced and expanded it already */
-       if (address < vma->vm_start) {
+       if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
+               error = -ENOMEM;
+       else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
                unsigned long size, grow;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               struct vm_area_struct *vma_m;
+
+               vma_m = pax_find_mirror_vma(vma);
+#endif
+
                size = vma->vm_end - address;
                grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
@@ -2278,13 +2616,27 @@ int expand_downwards(struct vm_area_struct *vma,
                                vma->vm_pgoff -= grow;
                                anon_vma_interval_tree_post_update_vma(vma);
                                vma_gap_update(vma);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                               if (vma_m) {
+                                       anon_vma_interval_tree_pre_update_vma(vma_m);
+                                       vma_m->vm_start -= grow << PAGE_SHIFT;
+                                       vma_m->vm_pgoff -= grow;
+                                       anon_vma_interval_tree_post_update_vma(vma_m);
+                                       vma_gap_update(vma_m);
+                               }
+#endif
+
                                spin_unlock(&vma->vm_mm->page_table_lock);
 
+                               track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
                                perf_event_mmap(vma);
                        }
                }
        }
        vma_unlock_anon_vma(vma);
+       if (lockprev)
+               vma_unlock_anon_vma(prev);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(vma->vm_mm);
        return error;
@@ -2384,6 +2736,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
        do {
                long nrpages = vma_pages(vma);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
+                       vma = remove_vma(vma);
+                       continue;
+               }
+#endif
+
                if (vma->vm_flags & VM_ACCOUNT)
                        nr_accounted += nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
@@ -2428,6 +2787,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        vma->vm_prev = NULL;
        do {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               if (vma->vm_mirror) {
+                       BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
+                       vma->vm_mirror->vm_mirror = NULL;
+                       vma->vm_mirror->vm_flags &= ~VM_EXEC;
+                       vma->vm_mirror = NULL;
+               }
+#endif
+
                vma_rb_erase(vma, &mm->mm_rb);
                mm->map_count--;
                tail_vma = vma;
@@ -2455,14 +2824,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_area_struct *new;
        int err = -ENOMEM;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m, *new_m = NULL;
+       unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
+#endif
+
        if (is_vm_hugetlb_page(vma) && (addr &
                                        ~(huge_page_mask(hstate_vma(vma)))))
                return -EINVAL;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       vma_m = pax_find_mirror_vma(vma);
+#endif
+
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                goto out_err;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma_m) {
+               new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               if (!new_m) {
+                       kmem_cache_free(vm_area_cachep, new);
+                       goto out_err;
+               }
+       }
+#endif
+
        /* most fields are the same, copy all, and then fixup */
        *new = *vma;
 
@@ -2475,6 +2863,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma_m) {
+               *new_m = *vma_m;
+               INIT_LIST_HEAD(&new_m->anon_vma_chain);
+               new_m->vm_mirror = new;
+               new->vm_mirror = new_m;
+
+               if (new_below)
+                       new_m->vm_end = addr_m;
+               else {
+                       new_m->vm_start = addr_m;
+                       new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
+               }
+       }
+#endif
+
        err = vma_dup_policy(vma, new);
        if (err)
                goto out_free_vma;
@@ -2495,6 +2899,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        else
                err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (!err && vma_m) {
+               struct mempolicy *pol = vma_policy(new);
+
+               if (anon_vma_clone(new_m, vma_m))
+                       goto out_free_mpol;
+
+               mpol_get(pol);
+               set_vma_policy(new_m, pol);
+
+               if (new_m->vm_file)
+                       get_file(new_m->vm_file);
+
+               if (new_m->vm_ops && new_m->vm_ops->open)
+                       new_m->vm_ops->open(new_m);
+
+               if (new_below)
+                       err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
+                               ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
+               else
+                       err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
+
+               if (err) {
+                       if (new_m->vm_ops && new_m->vm_ops->close)
+                               new_m->vm_ops->close(new_m);
+                       if (new_m->vm_file)
+                               fput(new_m->vm_file);
+                       mpol_put(pol);
+               }
+       }
+#endif
+
        /* Success. */
        if (!err)
                return 0;
@@ -2504,10 +2940,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
                new->vm_ops->close(new);
        if (new->vm_file)
                fput(new->vm_file);
-       unlink_anon_vmas(new);
  out_free_mpol:
        mpol_put(vma_policy(new));
  out_free_vma:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (new_m) {
+               unlink_anon_vmas(new_m);
+               kmem_cache_free(vm_area_cachep, new_m);
+       }
+#endif
+
+       unlink_anon_vmas(new);
        kmem_cache_free(vm_area_cachep, new);
  out_err:
        return err;
@@ -2520,6 +2964,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
              unsigned long addr, int new_below)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+               BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
+               if (mm->map_count >= sysctl_max_map_count-1)
+                       return -ENOMEM;
+       } else
+#endif
+
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
@@ -2531,11 +2984,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  * work.  This now handles partial unmappings.
  * Jeremy Fitzhardinge <jeremy@goop.org>
  */
+#ifdef CONFIG_PAX_SEGMEXEC
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+       int ret = __do_munmap(mm, start, len);
+       if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
+               return ret;
+
+       return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
+}
+
+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+#else
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+#endif
 {
        unsigned long end;
        struct vm_area_struct *vma, *prev, *last;
 
+       /*
+        * mm->mmap_sem is required to protect against another thread
+        * changing the mappings in case we sleep.
+        */
+       verify_mm_writelocked(mm);
+
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
                return -EINVAL;
 
@@ -2613,6 +3085,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        /* Fix up all other VM information */
        remove_vma_list(mm, vma);
 
+       track_exec_limit(mm, start, end, 0UL);
+
        return 0;
 }
 
@@ -2621,6 +3095,13 @@ int vm_munmap(unsigned long start, size_t len)
        int ret;
        struct mm_struct *mm = current->mm;
 
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
+           (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
+               return -EINVAL;
+#endif
+
        down_write(&mm->mmap_sem);
        ret = do_munmap(mm, start, len);
        up_write(&mm->mmap_sem);
@@ -2634,16 +3115,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
        return vm_munmap(addr, len);
 }
 
-static inline void verify_mm_writelocked(struct mm_struct *mm)
-{
-#ifdef CONFIG_DEBUG_VM
-       if (unlikely(down_read_trylock(&mm->mmap_sem))) {
-               WARN_ON(1);
-               up_read(&mm->mmap_sem);
-       }
-#endif
-}
-
 /*
  *  this is really a simplified "do_mmap".  it only handles
  *  anonymous maps.  eventually we may be able to do some
@@ -2657,6 +3128,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
+       unsigned long charged;
 
        len = PAGE_ALIGN(len);
        if (!len)
@@ -2664,10 +3136,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
 
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+       if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+               flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+               if (mm->pax_flags & MF_PAX_MPROTECT)
+                       flags &= ~VM_MAYEXEC;
+#endif
+
+       }
+#endif
+
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
        if (error & ~PAGE_MASK)
                return error;
 
+       charged = len >> PAGE_SHIFT;
+
        error = mlock_future_check(mm, mm->def_flags, len);
        if (error)
                return error;
@@ -2681,21 +3167,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        /*
         * Clear old maps.  this also does some error checking for us
         */
- munmap_back:
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
                if (do_munmap(mm, addr, len))
                        return -ENOMEM;
-               goto munmap_back;
+               BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
        }
 
        /* Check against address space limits *after* clearing old maps... */
-       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
+       if (!may_expand_vm(mm, charged))
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
+       if (security_vm_enough_memory_mm(mm, charged))
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
@@ -2709,7 +3194,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
         */
        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
-               vm_unacct_memory(len >> PAGE_SHIFT);
+               vm_unacct_memory(charged);
                return -ENOMEM;
        }
 
@@ -2723,10 +3208,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        perf_event_mmap(vma);
-       mm->total_vm += len >> PAGE_SHIFT;
+       mm->total_vm += charged;
        if (flags & VM_LOCKED)
-               mm->locked_vm += (len >> PAGE_SHIFT);
+               mm->locked_vm += charged;
        vma->vm_flags |= VM_SOFTDIRTY;
+       track_exec_limit(mm, addr, addr + len, flags);
        return addr;
 }
 
@@ -2788,6 +3274,7 @@ void exit_mmap(struct mm_struct *mm)
        while (vma) {
                if (vma->vm_flags & VM_ACCOUNT)
                        nr_accounted += vma_pages(vma);
+               vma->vm_mirror = NULL;
                vma = remove_vma(vma);
        }
        vm_unacct_memory(nr_accounted);
@@ -2805,6 +3292,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
        struct vm_area_struct *prev;
        struct rb_node **rb_link, *rb_parent;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m = NULL;
+#endif
+
+       if (security_mmap_addr(vma->vm_start))
+               return -EPERM;
+
        /*
         * The vm_pgoff of a purely anonymous vma should be irrelevant
         * until its first write fault, when page's anon_vma and index
@@ -2828,7 +3322,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
             security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
+               vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+               if (!vma_m)
+                       return -ENOMEM;
+       }
+#endif
+
        vma_link(mm, vma, prev, rb_link, rb_parent);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (vma_m)
+               BUG_ON(pax_mirror_vma(vma_m, vma));
+#endif
+
        return 0;
 }
 
@@ -2847,6 +3355,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        struct rb_node **rb_link, *rb_parent;
        bool faulted_in_anon_vma = true;
 
+       BUG_ON(vma->vm_mirror);
+
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
@@ -2911,6 +3421,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        return NULL;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
+{
+       struct vm_area_struct *prev_m;
+       struct rb_node **rb_link_m, *rb_parent_m;
+       struct mempolicy *pol_m;
+
+       BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
+       BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
+       BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
+       *vma_m = *vma;
+       INIT_LIST_HEAD(&vma_m->anon_vma_chain);
+       if (anon_vma_clone(vma_m, vma))
+               return -ENOMEM;
+       pol_m = vma_policy(vma_m);
+       mpol_get(pol_m);
+       set_vma_policy(vma_m, pol_m);
+       vma_m->vm_start += SEGMEXEC_TASK_SIZE;
+       vma_m->vm_end += SEGMEXEC_TASK_SIZE;
+       vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
+       vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
+       if (vma_m->vm_file)
+               get_file(vma_m->vm_file);
+       if (vma_m->vm_ops && vma_m->vm_ops->open)
+               vma_m->vm_ops->open(vma_m);
+       BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
+       vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
+       vma_m->vm_mirror = vma;
+       vma->vm_mirror = vma_m;
+       return 0;
+}
+#endif
+
 /*
  * Return true if the calling process may expand its vm space by the passed
  * number of pages
@@ -2922,6 +3465,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
 
        lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
 
+       gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
        if (cur + npages > lim)
                return 0;
        return 1;
@@ -3004,6 +3548,22 @@ static struct vm_area_struct *__install_special_mapping(
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
+#ifdef CONFIG_PAX_MPROTECT
+       if (mm->pax_flags & MF_PAX_MPROTECT) {
+#ifndef CONFIG_PAX_MPROTECT_COMPAT
+               if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
+                       return ERR_PTR(-EPERM);
+               if (!(vm_flags & VM_EXEC))
+                       vm_flags &= ~VM_MAYEXEC;
+#else
+               if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
+                       vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+               else
+                       vm_flags &= ~VM_MAYWRITE;
+       }
+#endif
+
        vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
index ace93454ce8ebe10f0b3cf5278aafd0a0d5fe4df..63320dcc8159a340f3c925a61a1a5f0696bbc847 100644 (file)
 #include <linux/migrate.h>
 #include <linux/perf_event.h>
 #include <linux/ksm.h>
+#include <linux/sched/sysctl.h>
+
+#ifdef CONFIG_PAX_MPROTECT
+#include <linux/elf.h>
+#include <linux/binfmts.h>
+#endif
+
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 
 /*
  * For a prot_numa update we only hold mmap_sem for read so there is a
@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
        return pages;
 }
 
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+/* called while holding the mmap semaphor for writing except stack expansion */
+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
+{
+       unsigned long oldlimit, newlimit = 0UL;
+
+       if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
+               return;
+
+       spin_lock(&mm->page_table_lock);
+       oldlimit = mm->context.user_cs_limit;
+       if ((prot & VM_EXEC) && oldlimit < end)
+               /* USER_CS limit moved up */
+               newlimit = end;
+       else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
+               /* USER_CS limit moved down */
+               newlimit = start;
+
+       if (newlimit) {
+               mm->context.user_cs_limit = newlimit;
+
+#ifdef CONFIG_SMP
+               wmb();
+               cpus_clear(mm->context.cpu_user_cs_mask);
+               cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
+#endif
+
+               set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
+       }
+       spin_unlock(&mm->page_table_lock);
+       if (newlimit == end) {
+               struct vm_area_struct *vma = find_vma(mm, oldlimit);
+
+               for (; vma && vma->vm_start < end; vma = vma->vm_next)
+                       if (is_vm_hugetlb_page(vma))
+                               hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
+                       else
+                               change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
+       }
+}
+#endif
+
 int
 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        unsigned long start, unsigned long end, unsigned long newflags)
@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        int error;
        int dirty_accountable = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct vm_area_struct *vma_m = NULL;
+       unsigned long start_m, end_m;
+
+       start_m = start + SEGMEXEC_TASK_SIZE;
+       end_m = end + SEGMEXEC_TASK_SIZE;
+#endif
+
        if (newflags == oldflags) {
                *pprev = vma;
                return 0;
        }
 
+       if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
+               struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
+
+               if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
+                       return -ENOMEM;
+
+               if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
+                       return -ENOMEM;
+       }
+
        /*
         * If we make a private mapping writable we increase our commit;
         * but (without finer accounting) cannot reduce our commit if we
@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
                }
        }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
+               if (start != vma->vm_start) {
+                       error = split_vma(mm, vma, start, 1);
+                       if (error)
+                               goto fail;
+                       BUG_ON(!*pprev || (*pprev)->vm_next == vma);
+                       *pprev = (*pprev)->vm_next;
+               }
+
+               if (end != vma->vm_end) {
+                       error = split_vma(mm, vma, end, 0);
+                       if (error)
+                               goto fail;
+               }
+
+               if (pax_find_mirror_vma(vma)) {
+                       error = __do_munmap(mm, start_m, end_m - start_m);
+                       if (error)
+                               goto fail;
+               } else {
+                       vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+                       if (!vma_m) {
+                               error = -ENOMEM;
+                               goto fail;
+                       }
+                       vma->vm_flags = newflags;
+                       error = pax_mirror_vma(vma_m, vma);
+                       if (error) {
+                               vma->vm_flags = oldflags;
+                               goto fail;
+                       }
+               }
+       }
+#endif
+
        /*
         * First try to merge with previous and/or next vma.
         */
@@ -314,7 +418,19 @@ success:
         * vm_flags and vm_page_prot are protected by the mmap_sem
         * held in write mode.
         */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
+               pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
+#endif
+
        vma->vm_flags = newflags;
+
+#ifdef CONFIG_PAX_MPROTECT
+       if (mm->binfmt && mm->binfmt->handle_mprotect)
+               mm->binfmt->handle_mprotect(vma, newflags);
+#endif
+
        dirty_accountable = vma_wants_writenotify(vma);
        vma_set_page_prot(vma);
 
@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
        end = start + len;
        if (end <= start)
                return -ENOMEM;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+               if (end > SEGMEXEC_TASK_SIZE)
+                       return -EINVAL;
+       } else
+#endif
+
+       if (end > TASK_SIZE)
+               return -EINVAL;
+
        if (!arch_validate_prot(prot))
                return -EINVAL;
 
@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC:
         */
-       if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+       if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
                prot |= PROT_EXEC;
 
        vm_flags = calc_vm_prot_bits(prot);
@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
        if (start > vma->vm_start)
                prev = vma;
 
+#ifdef CONFIG_PAX_MPROTECT
+       if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
+               current->mm->binfmt->handle_mprotect(vma, vm_flags);
+#endif
+
        for (nstart = start ; ; ) {
                unsigned long newflags;
 
@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
 
                /* newflags >> 4 shift VM_MAY% in place of VM_% */
                if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
+                       if (prot & (PROT_WRITE | PROT_EXEC))
+                               gr_log_rwxmprotect(vma);
+
+                       error = -EACCES;
+                       goto out;
+               }
+
+               if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
                        error = -EACCES;
                        goto out;
                }
@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
                if (error)
                        goto out;
+
+               track_exec_limit(current->mm, nstart, tmp, vm_flags);
+
                nstart = tmp;
 
                if (nstart < prev->vm_end)
index 17fa018f5f3909316db297df9019a94880392f24..6f7892bbec4eb217f1b17feba95b7e5dc2dff035 100644 (file)
@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                        continue;
                pte = ptep_get_and_clear(mm, old_addr, old_pte);
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+               if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
+                       pte = pte_exprotect(pte);
+#endif
+
                pte = move_soft_dirty_pte(pte);
                set_pte_at(mm, new_addr, new_pte, pte);
        }
@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
        if (is_vm_hugetlb_page(vma))
                goto Einval;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (pax_find_mirror_vma(vma))
+               goto Einval;
+#endif
+
        /* We can't remap across vm area boundaries */
        if (old_len > vma->vm_end - addr)
                goto Efault;
@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
        unsigned long ret = -EINVAL;
        unsigned long charged = 0;
        unsigned long map_flags;
+       unsigned long pax_task_size = TASK_SIZE;
 
        if (new_addr & ~PAGE_MASK)
                goto out;
 
-       if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC)
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+       pax_task_size -= PAGE_SIZE;
+
+       if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
                goto out;
 
        /* Check if the location we're moving into overlaps the
         * old location at all, and fail if it does.
         */
-       if ((new_addr <= addr) && (new_addr+new_len) > addr)
-               goto out;
-
-       if ((addr <= new_addr) && (addr+old_len) > new_addr)
+       if (addr + old_len > new_addr && new_addr + new_len > addr)
                goto out;
 
        ret = do_munmap(mm, new_addr, new_len);
@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
        unsigned long ret = -EINVAL;
        unsigned long charged = 0;
        bool locked = false;
+       unsigned long pax_task_size = TASK_SIZE;
 
        if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
                return ret;
@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
        if (!new_len)
                return ret;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (mm->pax_flags & MF_PAX_SEGMEXEC)
+               pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+       pax_task_size -= PAGE_SIZE;
+
+       if (new_len > pax_task_size || addr > pax_task_size-new_len ||
+           old_len > pax_task_size || addr > pax_task_size-old_len)
+               return ret;
+
        down_write(&current->mm->mmap_sem);
 
        if (flags & MREMAP_FIXED) {
@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
                                new_addr = addr;
                        }
                        ret = addr;
+                       track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
                        goto out;
                }
        }
@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
                        goto out;
                }
 
+               map_flags = vma->vm_flags;
                ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
+               if (!(ret & ~PAGE_MASK)) {
+                       track_exec_limit(current->mm, addr, addr + old_len, 0UL);
+                       track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
+               }
        }
 out:
        if (ret & ~PAGE_MASK)
index ae5baae8e212bad3052b896c953eb4cf290cd1c4..cbb2ed55732b5c4a34f50bf3cb2dec959bc7986c 100644 (file)
@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
-int heap_stack_gap = 0;
 
 atomic_long_t mmap_pages_allocated;
 
@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 }
 EXPORT_SYMBOL(find_vma);
 
-/*
- * find a VMA
- * - we don't extend stack VMAs under NOMMU conditions
- */
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-       return find_vma(mm, addr);
-}
-
 /*
  * expand a stack to a given address
  * - not supported under NOMMU conditions
@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* most fields are the same, copy all, and then fixup */
        *new = *vma;
+       INIT_LIST_HEAD(&new->anon_vma_chain);
        *region = *vma->vm_region;
        new->vm_region = region;
 
@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
 }
 EXPORT_SYMBOL(generic_file_remap_pages);
 
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long addr, void *buf, int len, int write)
+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long addr, void *buf, size_t len, int write)
 {
        struct vm_area_struct *vma;
 
@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  *
  * The caller must hold a reference on @mm.
  */
-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-               void *buf, int len, int write)
+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+               void *buf, size_t len, int write)
 {
        return __access_remote_vm(NULL, mm, addr, buf, len, write);
 }
@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
  * Access another process' address space.
  * - source/target buffer must be kernel space
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
 {
        struct mm_struct *mm;
 
index 6f4335238e33311de251a647fe725d06d5897060..e44bf41d84f89dad161537daeed447f882bed489 100644 (file)
@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
  *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
  * - the bdi dirty thresh drops quickly due to change of JBOD workload
  */
-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
                                        unsigned long thresh,
                                        unsigned long bg_thresh,
                                        unsigned long dirty,
index 8bbef06de720b0d7916830269f9030c77d35930e..a8d1989937980a9ec45d136eded0a2b7236d84be 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/hugetlb.h>
 #include <linux/sched/rt.h>
 #include <linux/page_owner.h>
+#include <linux/random.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -358,7 +359,7 @@ out:
  * This usage means that zero-order pages may not be compound.
  */
 
-static void free_compound_page(struct page *page)
+void free_compound_page(struct page *page)
 {
        __free_pages_ok(page, compound_order(page));
 }
@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                __mod_zone_freepage_state(zone, (1 << order), migratetype);
 }
 #else
-struct page_ext_operations debug_guardpage_ops = { NULL, };
+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
 static inline void set_page_guard(struct zone *zone, struct page *page,
                                unsigned int order, int migratetype) {}
 static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       unsigned long index = 1UL << order;
+#endif
+
        VM_BUG_ON_PAGE(PageTail(page), page);
        VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
 
@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
                debug_check_no_obj_freed(page_address(page),
                                           PAGE_SIZE << order);
        }
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       for (; index; --index)
+               sanitize_highpage(page + index - 1);
+#endif
+
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+bool __meminitdata extra_latent_entropy;
+
+static int __init setup_pax_extra_latent_entropy(char *str)
+{
+       extra_latent_entropy = true;
+       return 0;
+}
+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
+
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
 void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
        unsigned int nr_pages = 1 << order;
@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
        __ClearPageReserved(p);
        set_page_count(p, 0);
 
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+       if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
+               u64 hash = 0;
+               size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
+               const u64 *data = lowmem_page_address(page);
+
+               for (index = 0; index < end; index++)
+                       hash ^= hash + data[index];
+               latent_entropy ^= hash;
+               add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+       }
+#endif
+
        page_zone(page)->managed_pages += nr_pages;
        set_page_refcounted(page);
        __free_pages(page, order);
@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
        arch_alloc_page(page, order);
        kernel_map_pages(page, 1 << order, 1);
 
+#ifndef CONFIG_PAX_MEMORY_SANITIZE
        if (gfp_flags & __GFP_ZERO)
                prep_zero_page(page, order, gfp_flags);
+#endif
 
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
@@ -1700,7 +1740,7 @@ again:
        }
 
        __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-       if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
+       if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
            !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
                set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
 
@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
        do {
                mod_zone_page_state(zone, NR_ALLOC_BATCH,
                        high_wmark_pages(zone) - low_wmark_pages(zone) -
-                       atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+                       atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
                clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
        } while (zone++ != preferred_zone);
 }
@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
 
                __mod_zone_page_state(zone, NR_ALLOC_BATCH,
                        high_wmark_pages(zone) - low_wmark_pages(zone) -
-                       atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+                       atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
 
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
index d39e2f4e335c36497520b02859c02eb7f46f220f..de5f4b478efbe39bf53523d07530441a974abcce 100644 (file)
@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
 static unsigned int pcpu_high_unit_cpu __read_mostly;
 
 /* the address of the first chunk which starts with the kernel static area */
-void *pcpu_base_addr __read_mostly;
+void *pcpu_base_addr __read_only;
 EXPORT_SYMBOL_GPL(pcpu_base_addr);
 
 static const int *pcpu_unit_map __read_mostly;         /* cpu -> unit */
index 5077afcd9e116b16b17c7b0ed51930d570f88701..846c9ef21d679c3fe97ae403e2aeb857e5ccf28c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/uio.h>
 #include <linux/sched.h>
 #include <linux/highmem.h>
+#include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
        ssize_t iov_len;
        size_t total_len = iov_iter_count(iter);
 
+       return -ENOSYS; // PaX: until properly audited
+
        /*
         * Work out how many pages of struct pages we're going to need
         * when eventually calling get_user_pages
         */
        for (i = 0; i < riovcnt; i++) {
                iov_len = rvec[i].iov_len;
-               if (iov_len > 0) {
-                       nr_pages_iov = ((unsigned long)rvec[i].iov_base
-                                       + iov_len)
-                               / PAGE_SIZE - (unsigned long)rvec[i].iov_base
-                               / PAGE_SIZE + 1;
-                       nr_pages = max(nr_pages, nr_pages_iov);
-               }
+               if (iov_len <= 0)
+                       continue;
+               nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
+                               (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
+               nr_pages = max(nr_pages, nr_pages_iov);
        }
 
        if (nr_pages == 0)
@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
                goto free_proc_pages;
        }
 
+       if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
+               rc = -EPERM;
+               goto put_task_struct;
+       }
+
        mm = mm_access(task, PTRACE_MODE_ATTACH);
        if (!mm || IS_ERR(mm)) {
                rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
index 71cd5bd0c17d760c6f6ab1af5991165a1ac05844..e259089907a18fe27b1dac20a07aa050832fde80 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
        struct anon_vma *anon_vma = vma->anon_vma;
        struct anon_vma_chain *avc;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+       struct anon_vma_chain *avc_m = NULL;
+#endif
+
        might_sleep();
        if (unlikely(!anon_vma)) {
                struct mm_struct *mm = vma->vm_mm;
@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                if (!avc)
                        goto out_enomem;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+               avc_m = anon_vma_chain_alloc(GFP_KERNEL);
+               if (!avc_m)
+                       goto out_enomem_free_avc;
+#endif
+
                anon_vma = find_mergeable_anon_vma(vma);
                allocated = NULL;
                if (!anon_vma) {
@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                /* page_table_lock to protect against threads */
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+                       struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
+
+                       if (vma_m) {
+                               BUG_ON(vma_m->anon_vma);
+                               vma_m->anon_vma = anon_vma;
+                               anon_vma_chain_link(vma_m, avc_m, anon_vma);
+                               anon_vma->degree++;
+                               avc_m = NULL;
+                       }
+#endif
+
                        vma->anon_vma = anon_vma;
                        anon_vma_chain_link(vma, avc, anon_vma);
                        /* vma reference or self-parent link for new root */
@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
 
                if (unlikely(allocated))
                        put_anon_vma(allocated);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+               if (unlikely(avc_m))
+                       anon_vma_chain_free(avc_m);
+#endif
+
                if (unlikely(avc))
                        anon_vma_chain_free(avc);
        }
        return 0;
 
  out_enomem_free_avc:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+       if (avc_m)
+               anon_vma_chain_free(avc_m);
+#endif
+
        anon_vma_chain_free(avc);
  out_enomem:
        return -ENOMEM;
@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
  * good chance of avoiding scanning the whole hierarchy when it searches where
  * page is mapped.
  */
-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
 {
        struct anon_vma_chain *avc, *pavc;
        struct anon_vma *root = NULL;
@@ -296,7 +331,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
  * the corresponding VMA in the parent process is attached to.
  * Returns 0 on success, non-zero on failure.
  */
-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
 {
        struct anon_vma_chain *avc;
        struct anon_vma *anon_vma;
@@ -416,8 +451,10 @@ static void anon_vma_ctor(void *data)
 void __init anon_vma_init(void)
 {
        anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
-                       0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
-       anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
+                       0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
+                       anon_vma_ctor);
+       anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
+                       SLAB_PANIC|SLAB_NO_SANITIZE);
 }
 
 /*
index 993e6ba689ccd442aa33e42489e71f0e558d1943..a962ba314d34ecf1aac762fef2659c1867eee60f 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/swap.h>
 #include <linux/aio.h>
 
-static struct vfsmount *shm_mnt;
+struct vfsmount *shm_mnt;
 
 #ifdef CONFIG_SHMEM
 /*
@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
 #define BOGO_DIRENT_SIZE 20
 
 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
-#define SHORT_SYMLINK_LEN 128
+#define SHORT_SYMLINK_LEN 64
 
 /*
  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
 static int shmem_xattr_validate(const char *name)
 {
        struct { const char *prefix; size_t len; } arr[] = {
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+               { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+#endif
+
                { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
                { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
        };
@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
        if (err)
                return err;
 
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+       if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
+               if (strcmp(name, XATTR_NAME_PAX_FLAGS))
+                       return -EOPNOTSUPP;
+               if (size > 8)
+                       return -EINVAL;
+       }
+#endif
+
        return simple_xattr_set(&info->xattrs, name, value, size, flags);
 }
 
@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        int err = -ENOMEM;
 
        /* Round up to L1_CACHE_BYTES to resist false sharing */
-       sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
-                               L1_CACHE_BYTES), GFP_KERNEL);
+       sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
        if (!sbinfo)
                return -ENOMEM;
 
index 65b5dcb6f67107d6e46515196cfdf5cfbfd8a70b..d53d866eaa1a03321e1b97e2d5dc73b8044a1f4e 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
                if ((x)->max_freeable < i)                              \
                        (x)->max_freeable = i;                          \
        } while (0)
-#define STATS_INC_ALLOCHIT(x)  atomic_inc(&(x)->allochit)
-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
-#define STATS_INC_FREEHIT(x)   atomic_inc(&(x)->freehit)
-#define STATS_INC_FREEMISS(x)  atomic_inc(&(x)->freemiss)
+#define STATS_INC_ALLOCHIT(x)  atomic_inc_unchecked(&(x)->allochit)
+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
+#define STATS_INC_FREEHIT(x)   atomic_inc_unchecked(&(x)->freehit)
+#define STATS_INC_FREEMISS(x)  atomic_inc_unchecked(&(x)->freemiss)
+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
 #else
 #define        STATS_INC_ACTIVE(x)     do { } while (0)
 #define        STATS_DEC_ACTIVE(x)     do { } while (0)
@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
 #define STATS_INC_ALLOCMISS(x) do { } while (0)
 #define STATS_INC_FREEHIT(x)   do { } while (0)
 #define STATS_INC_FREEMISS(x)  do { } while (0)
+#define STATS_INC_SANITIZED(x) do { } while (0)
+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
 #endif
 
 #if DEBUG
@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
  */
 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-                                       const struct page *page, void *obj)
+                                       const struct page *page, const void *obj)
 {
        u32 offset = (obj - page->s_mem);
        return reciprocal_divide(offset, cache->reciprocal_buffer_size);
@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
         * structures first.  Without this, further allocations will bug.
         */
        kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
-                               kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
+                               kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
        slab_state = PARTIAL_NODE;
 
        slab_early_init = 0;
@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
 
        cachep = find_mergeable(size, align, flags, name, ctor);
        if (cachep) {
-               cachep->refcount++;
+               atomic_inc(&cachep->refcount);
 
                /*
                 * Adjust the object sizes so that we clear
@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
        struct array_cache *ac = cpu_cache_get(cachep);
 
        check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
+               STATS_INC_NOT_SANITIZED(cachep);
+       else {
+               memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
+
+               if (cachep->ctor)
+                       cachep->ctor(objp);
+
+               STATS_INC_SANITIZED(cachep);
+       }
+#endif
+
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
 
@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
        return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return __do_kmalloc_node(size, flags, node, _RET_IP_);
 }
@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
  * @flags: the type of memory to allocate (see kmalloc).
  * @caller: function caller for debug tracking of the caller
  */
-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
                                          unsigned long caller)
 {
        struct kmem_cache *cachep;
@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
 
        if (unlikely(ZERO_OR_NULL_PTR(objp)))
                return;
+       VM_BUG_ON(!virt_addr_valid(objp));
        local_irq_save(flags);
        kfree_debugcheck(objp);
        c = virt_to_cache(objp);
@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
        }
        /* cpu stats */
        {
-               unsigned long allochit = atomic_read(&cachep->allochit);
-               unsigned long allocmiss = atomic_read(&cachep->allocmiss);
-               unsigned long freehit = atomic_read(&cachep->freehit);
-               unsigned long freemiss = atomic_read(&cachep->freemiss);
+               unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
+               unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
+               unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
+               unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
 
                seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
                           allochit, allocmiss, freehit, freemiss);
        }
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       {
+               unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
+               unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
+
+               seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
+       }
+#endif
 #endif
 }
 
@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
 static int __init slab_proc_init(void)
 {
 #ifdef CONFIG_DEBUG_SLAB_LEAK
-       proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
+       proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
 #endif
        return 0;
 }
 module_init(slab_proc_init);
 #endif
 
+bool is_usercopy_object(const void *ptr)
+{
+       struct page *page;
+       struct kmem_cache *cachep;
+
+       if (ZERO_OR_NULL_PTR(ptr))
+               return false;
+
+       if (!slab_is_available())
+               return false;
+
+       if (!virt_addr_valid(ptr))
+               return false;
+
+       page = virt_to_head_page(ptr);
+
+       if (!PageSlab(page))
+               return false;
+
+       cachep = page->slab_cache;
+       return cachep->flags & SLAB_USERCOPY;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+       struct page *page;
+       struct kmem_cache *cachep;
+       unsigned int objnr;
+       unsigned long offset;
+
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       if (!PageSlab(page))
+               return NULL;
+
+       cachep = page->slab_cache;
+       if (!(cachep->flags & SLAB_USERCOPY))
+               return cachep->name;
+
+       objnr = obj_to_index(cachep, page, ptr);
+       BUG_ON(objnr >= cachep->num);
+       offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+       if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+               return NULL;
+
+       return cachep->name;
+}
+#endif
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
index 1cf4005482dd1db91925c65dc394ea3c587f6a4f..10ad563ddcbcbf1769ed4a8bab4ab5053bb06a00 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -22,7 +22,7 @@ struct kmem_cache {
        unsigned int align;     /* Alignment as calculated */
        unsigned long flags;    /* Active flags on the slab */
        const char *name;       /* Slab name for sysfs */
-       int refcount;           /* Use counter */
+       atomic_t refcount;      /* Use counter */
        void (*ctor)(void *);   /* Called on object slot creation */
        struct list_head list;  /* List of all slab caches on the system */
 };
@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
 /* The slab cache that manages slab cache information */
 extern struct kmem_cache *kmem_cache;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#ifdef CONFIG_X86_64
+#define PAX_MEMORY_SANITIZE_VALUE      '\xfe'
+#else
+#define PAX_MEMORY_SANITIZE_VALUE      '\xff'
+#endif
+enum pax_sanitize_mode {
+       PAX_SANITIZE_SLAB_OFF = 0,
+       PAX_SANITIZE_SLAB_FAST,
+       PAX_SANITIZE_SLAB_FULL,
+};
+extern enum pax_sanitize_mode pax_sanitize_slab;
+#endif
+
 unsigned long calculate_alignment(unsigned long flags,
                unsigned long align, unsigned long size);
 
@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
+                        SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
+                        SLAB_USERCOPY | SLAB_NO_SANITIZE)
 
 #if defined(CONFIG_DEBUG_SLAB)
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
                return s;
 
        page = virt_to_head_page(x);
+
+       BUG_ON(!PageSlab(page));
+
        cachep = page->slab_cache;
        if (slab_equal_or_root(cachep, s))
                return cachep;
index e03dd6f2a27212768fb3f623115cea9f3579a06a..c4758387dd54bcbf4040653998cae16b63971b27 100644 (file)
 
 #include "slab.h"
 
-enum slab_state slab_state;
+enum slab_state slab_state __read_only;
 LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
 struct kmem_cache *kmem_cache;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
+static int __init pax_sanitize_slab_setup(char *str)
+{
+       if (!str)
+               return 0;
+
+       if (!strcmp(str, "0") || !strcmp(str, "off")) {
+               pr_info("PaX slab sanitization: %s\n", "disabled");
+               pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
+       } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
+               pr_info("PaX slab sanitization: %s\n", "fast");
+               pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
+       } else if (!strcmp(str, "full")) {
+               pr_info("PaX slab sanitization: %s\n", "full");
+               pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
+       } else
+               pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
+
+       return 0;
+}
+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
 /*
  * Set of flags that will prevent slab merging
  */
@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
  * Merge control. If this is set then no merging of slab caches will occur.
  * (Could be removed. This was introduced to pacify the merge skeptics.)
  */
-static int slab_nomerge;
+static int slab_nomerge = 1;
 
 static int __init setup_slab_nomerge(char *str)
 {
@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
        /*
         * We may have set a slab to be unmergeable during bootstrap.
         */
-       if (s->refcount < 0)
+       if (atomic_read(&s->refcount) < 0)
                return 1;
 
        return 0;
@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
        if (err)
                goto out_free_cache;
 
-       s->refcount = 1;
+       atomic_set(&s->refcount, 1);
        list_add(&s->list, &slab_caches);
 out:
        if (err)
@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
         */
        flags &= CACHE_CREATE_MASK;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
+               flags |= SLAB_NO_SANITIZE;
+       else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
+               flags &= ~SLAB_NO_SANITIZE;
+#endif
+
        s = __kmem_cache_alias(name, size, align, flags, ctor);
        if (s)
                goto out_unlock;
@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
 
        mutex_lock(&slab_mutex);
 
-       s->refcount--;
-       if (s->refcount)
+       if (!atomic_dec_and_test(&s->refcount))
                goto out_unlock;
 
        if (memcg_cleanup_cache_params(s) != 0)
@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
                rcu_barrier();
 
        memcg_free_cache_params(s);
-#ifdef SLAB_SUPPORTS_SYSFS
+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
        sysfs_slab_remove(s);
 #else
        slab_kmem_cache_release(s);
@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
                panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
                                        name, size, err);
 
-       s->refcount = -1;       /* Exempt from merging for now */
+       atomic_set(&s->refcount, -1);   /* Exempt from merging for now */
 }
 
 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
 
        create_boot_cache(s, name, size, flags);
        list_add(&s->list, &slab_caches);
-       s->refcount = 1;
+       atomic_set(&s->refcount, 1);
        return s;
 }
 
@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 EXPORT_SYMBOL(kmalloc_dma_caches);
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
+EXPORT_SYMBOL(kmalloc_usercopy_caches);
+#endif
+
 /*
  * Conversion table for small slabs sizes / 8 to the index in the
  * kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
                return kmalloc_dma_caches[index];
 
 #endif
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       if (unlikely((flags & GFP_USERCOPY)))
+               return kmalloc_usercopy_caches[index];
+
+#endif
+
        return kmalloc_caches[index];
 }
 
@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
        for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
                if (!kmalloc_caches[i]) {
                        kmalloc_caches[i] = create_kmalloc_cache(NULL,
-                                                       1 << i, flags);
+                                                       1 << i, SLAB_USERCOPY | flags);
                }
 
                /*
@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
                 * earlier power of two caches
                 */
                if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
-                       kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
+                       kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
 
                if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
-                       kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
+                       kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
        }
 
        /* Kmalloc array is now usable */
@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
                }
        }
 #endif
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
+               struct kmem_cache *s = kmalloc_caches[i];
+
+               if (s) {
+                       int size = kmalloc_size(i);
+                       char *n = kasprintf(GFP_NOWAIT,
+                                "usercopy-kmalloc-%d", size);
+
+                       BUG_ON(!n);
+                       kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
+                               size, SLAB_USERCOPY | flags);
+               }
+       }
+#endif
+
 }
 #endif /* !CONFIG_SLOB */
 
@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
        seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
                 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
        seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       seq_puts(m, " : pax <sanitized> <not_sanitized>");
+#endif
 #endif
        seq_putc(m, '\n');
 }
@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
 module_init(slab_proc_init);
 #endif /* CONFIG_SLABINFO */
 
-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
                                           gfp_t flags)
 {
        void *ret;
index 96a86206a26b2ce6ea4fc8a71441a5e4d4ded26a..46b3f1247c2a50869dbb2fb7cec1212e74c32295 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
 /*
  * Return the size of a slob block.
  */
-static slobidx_t slob_units(slob_t *s)
+static slobidx_t slob_units(const slob_t *s)
 {
        if (s->units > 0)
                return s->units;
@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
 /*
  * Return the next free slob block pointer after this one.
  */
-static slob_t *slob_next(slob_t *s)
+static slob_t *slob_next(const slob_t *s)
 {
        slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
        slobidx_t next;
@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
 /*
  * Returns true if s is the last free block in its page.
  */
-static int slob_last(slob_t *s)
+static int slob_last(const slob_t *s)
 {
        return !((unsigned long)slob_next(s) & ~PAGE_MASK);
 }
 
-static void *slob_new_pages(gfp_t gfp, int order, int node)
+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
 {
-       void *page;
+       struct page *page;
 
 #ifdef CONFIG_NUMA
        if (node != NUMA_NO_NODE)
@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
        if (!page)
                return NULL;
 
-       return page_address(page);
+       __SetPageSlab(page);
+       return page;
 }
 
-static void slob_free_pages(void *b, int order)
+static void slob_free_pages(struct page *sp, int order)
 {
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       free_pages((unsigned long)b, order);
+       __ClearPageSlab(sp);
+       page_mapcount_reset(sp);
+       sp->private = 0;
+       __free_pages(sp, order);
 }
 
 /*
@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 
        /* Not enough space: must allocate a new page */
        if (!b) {
-               b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
-               if (!b)
+               sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
+               if (!sp)
                        return NULL;
-               sp = virt_to_page(b);
-               __SetPageSlab(sp);
+               b = page_address(sp);
 
                spin_lock_irqsave(&slob_lock, flags);
                sp->units = SLOB_UNITS(PAGE_SIZE);
                sp->freelist = b;
+               sp->private = 0;
                INIT_LIST_HEAD(&sp->lru);
                set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
                set_slob_page_free(sp, slob_list);
@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 /*
  * slob_free: entry point into the slob allocator.
  */
-static void slob_free(void *block, int size)
+static void slob_free(struct kmem_cache *c, void *block, int size)
 {
        struct page *sp;
        slob_t *prev, *next, *b = (slob_t *)block;
@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
                if (slob_page_free(sp))
                        clear_slob_page_free(sp);
                spin_unlock_irqrestore(&slob_lock, flags);
-               __ClearPageSlab(sp);
-               page_mapcount_reset(sp);
-               slob_free_pages(b, 0);
+               slob_free_pages(sp, 0);
                return;
        }
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+               memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
        if (!slob_page_free(sp)) {
                /* This slob page is about to become partially free. Easy! */
                sp->units = units;
@@ -424,11 +431,10 @@ out:
  */
 
 static __always_inline void *
-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
 {
-       unsigned int *m;
-       int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-       void *ret;
+       slob_t *m;
+       void *ret = NULL;
 
        gfp &= gfp_allowed_mask;
 
@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
 
                if (!m)
                        return NULL;
-               *m = size;
+               BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
+               BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
+               m[0].units = size;
+               m[1].units = align;
                ret = (void *)m + align;
 
                trace_kmalloc_node(caller, ret,
                                   size, size + align, gfp, node);
        } else {
                unsigned int order = get_order(size);
+               struct page *page;
 
                if (likely(order))
                        gfp |= __GFP_COMP;
-               ret = slob_new_pages(gfp, order, node);
+               page = slob_new_pages(gfp, order, node);
+               if (page) {
+                       ret = page_address(page);
+                       page->private = size;
+               }
 
                trace_kmalloc_node(caller, ret,
                                   size, PAGE_SIZE << order, gfp, node);
        }
 
-       kmemleak_alloc(ret, size, 1, gfp);
        return ret;
 }
 
-void *__kmalloc(size_t size, gfp_t gfp)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+{
+       int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
+
+       if (!ZERO_OR_NULL_PTR(ret))
+               kmemleak_alloc(ret, size, 1, gfp);
+       return ret;
+}
+
+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
 {
        return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
 }
@@ -491,34 +515,112 @@ void kfree(const void *block)
                return;
        kmemleak_free(block);
 
+       VM_BUG_ON(!virt_addr_valid(block));
        sp = virt_to_page(block);
-       if (PageSlab(sp)) {
+       VM_BUG_ON(!PageSlab(sp));
+       if (!sp->private) {
                int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-               unsigned int *m = (unsigned int *)(block - align);
-               slob_free(m, *m + align);
-       } else
+               slob_t *m = (slob_t *)(block - align);
+               slob_free(NULL, m, m[0].units + align);
+       } else {
+               __ClearPageSlab(sp);
+               page_mapcount_reset(sp);
+               sp->private = 0;
                __free_pages(sp, compound_order(sp));
+       }
 }
 EXPORT_SYMBOL(kfree);
 
+bool is_usercopy_object(const void *ptr)
+{
+       if (!slab_is_available())
+               return false;
+
+       // PAX: TODO
+
+       return false;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+       struct page *page;
+       const slob_t *free;
+       const void *base;
+       unsigned long flags;
+
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+       if (!PageSlab(page))
+               return NULL;
+
+       if (page->private) {
+               base = page;
+               if (base <= ptr && n <= page->private - (ptr - base))
+                       return NULL;
+               return "<slob>";
+       }
+
+       /* some tricky double walking to find the chunk */
+       spin_lock_irqsave(&slob_lock, flags);
+       base = (void *)((unsigned long)ptr & PAGE_MASK);
+       free = page->freelist;
+
+       while (!slob_last(free) && (void *)free <= ptr) {
+               base = free + slob_units(free);
+               free = slob_next(free);
+       }
+
+       while (base < (void *)free) {
+               slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
+               int size = SLOB_UNIT * SLOB_UNITS(m + align);
+               int offset;
+
+               if (ptr < base + align)
+                       break;
+
+               offset = ptr - base - align;
+               if (offset >= m) {
+                       base += size;
+                       continue;
+               }
+
+               if (n > m - offset)
+                       break;
+
+               spin_unlock_irqrestore(&slob_lock, flags);
+               return NULL;
+       }
+
+       spin_unlock_irqrestore(&slob_lock, flags);
+       return "<slob>";
+}
+#endif
+
 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
 size_t ksize(const void *block)
 {
        struct page *sp;
        int align;
-       unsigned int *m;
+       slob_t *m;
 
        BUG_ON(!block);
        if (unlikely(block == ZERO_SIZE_PTR))
                return 0;
 
        sp = virt_to_page(block);
-       if (unlikely(!PageSlab(sp)))
-               return PAGE_SIZE << compound_order(sp);
+       VM_BUG_ON(!PageSlab(sp));
+       if (sp->private)
+               return sp->private;
 
        align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-       m = (unsigned int *)(block - align);
-       return SLOB_UNITS(*m) * SLOB_UNIT;
+       m = (slob_t *)(block - align);
+       return SLOB_UNITS(m[0].units) * SLOB_UNIT;
 }
 EXPORT_SYMBOL(ksize);
 
@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
 
 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
-       void *b;
+       void *b = NULL;
 
        flags &= gfp_allowed_mask;
 
        lockdep_trace_alloc(flags);
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
+#else
        if (c->size < PAGE_SIZE) {
                b = slob_alloc(c->size, flags, c->align, node);
                trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
                                            SLOB_UNITS(c->size) * SLOB_UNIT,
                                            flags, node);
        } else {
-               b = slob_new_pages(flags, get_order(c->size), node);
+               struct page *sp;
+
+               sp = slob_new_pages(flags, get_order(c->size), node);
+               if (sp) {
+                       b = page_address(sp);
+                       sp->private = c->size;
+               }
                trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
                                            PAGE_SIZE << get_order(c->size),
                                            flags, node);
        }
+#endif
 
        if (b && c->ctor)
                c->ctor(b);
@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 #ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
 {
        return __do_kmalloc_node(size, gfp, node, _RET_IP_);
 }
@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 #endif
 
-static void __kmem_cache_free(void *b, int size)
+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
 {
-       if (size < PAGE_SIZE)
-               slob_free(b, size);
+       struct page *sp;
+
+       sp = virt_to_page(b);
+       BUG_ON(!PageSlab(sp));
+       if (!sp->private)
+               slob_free(c, b, size);
        else
-               slob_free_pages(b, get_order(size));
+               slob_free_pages(sp, get_order(size));
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
        struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
        void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
 
-       __kmem_cache_free(b, slob_rcu->size);
+       __kmem_cache_free(NULL, b, slob_rcu->size);
 }
 
 void kmem_cache_free(struct kmem_cache *c, void *b)
 {
+       int size = c->size;
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       if (size + c->align < PAGE_SIZE) {
+               size += c->align;
+               b -= c->align;
+       }
+#endif
+
        kmemleak_free_recursive(b, c->flags);
        if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
                struct slob_rcu *slob_rcu;
-               slob_rcu = b + (c->size - sizeof(struct slob_rcu));
-               slob_rcu->size = c->size;
+               slob_rcu = b + (size - sizeof(struct slob_rcu));
+               slob_rcu->size = size;
                call_rcu(&slob_rcu->head, kmem_rcu_free);
        } else {
-               __kmem_cache_free(b, c->size);
+               __kmem_cache_free(c, b, size);
        }
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       trace_kfree(_RET_IP_, b);
+#else
        trace_kmem_cache_free(_RET_IP_, b);
+#endif
+
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
index fe376fe1f4fe3ce5526997ef700da9eee5c714a5..2f5757c7a84ef67832e14a06169fd1de9027d742 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -197,7 +197,7 @@ struct track {
 
 enum track_item { TRACK_ALLOC, TRACK_FREE };
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_add(struct kmem_cache *);
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
        if (!t->addr)
                return;
 
-       pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
+       pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
               s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
 #ifdef CONFIG_STACKTRACE
        {
@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
 
        slab_free_hook(s, x);
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       if (!(s->flags & SLAB_NO_SANITIZE)) {
+               memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
+               if (s->ctor)
+                       s->ctor(x);
+       }
+#endif
+
 redo:
        /*
         * Determine the currently cpus per cpu slab.
@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
        s->inuse = size;
 
        if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+               (!(flags & SLAB_NO_SANITIZE)) ||
+#endif
                s->ctor)) {
                /*
                 * Relocate free pointer after the object if it is not
@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
 
 __setup("slub_min_objects=", setup_slub_min_objects);
 
-void *__kmalloc(size_t size, gfp_t flags)
+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
 {
        struct kmem_cache *s;
        void *ret;
@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
        return ptr;
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
 {
        struct kmem_cache *s;
        void *ret;
@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+bool is_usercopy_object(const void *ptr)
+{
+       struct page *page;
+       struct kmem_cache *s;
+
+       if (ZERO_OR_NULL_PTR(ptr))
+               return false;
+
+       if (!slab_is_available())
+               return false;
+
+       if (!virt_addr_valid(ptr))
+               return false;
+
+       page = virt_to_head_page(ptr);
+
+       if (!PageSlab(page))
+               return false;
+
+       s = page->slab_cache;
+       return s->flags & SLAB_USERCOPY;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+       struct page *page;
+       struct kmem_cache *s;
+       unsigned long offset;
+
+       if (ZERO_OR_NULL_PTR(ptr))
+               return "<null>";
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       if (!PageSlab(page))
+               return NULL;
+
+       s = page->slab_cache;
+       if (!(s->flags & SLAB_USERCOPY))
+               return s->name;
+
+       offset = (ptr - page_address(page)) % s->size;
+       if (offset <= s->object_size && n <= s->object_size - offset)
+               return NULL;
+
+       return s->name;
+}
+#endif
+
 size_t ksize(const void *object)
 {
        struct page *page;
@@ -3336,6 +3400,7 @@ void kfree(const void *x)
        if (unlikely(ZERO_OR_NULL_PTR(x)))
                return;
 
+       VM_BUG_ON(!virt_addr_valid(x));
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
                int i;
                struct kmem_cache *c;
 
-               s->refcount++;
+               atomic_inc(&s->refcount);
 
                /*
                 * Adjust the object sizes so that we clear
@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
                }
 
                if (sysfs_slab_alias(s, name)) {
-                       s->refcount--;
+                       atomic_dec(&s->refcount);
                        s = NULL;
                }
        }
@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 }
 #endif
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int count_inuse(struct page *page)
 {
        return page->inuse;
@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
                len += sprintf(buf + len, "%7ld ", l->count);
 
                if (l->addr)
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+                       len += sprintf(buf + len, "%pS", NULL);
+#else
                        len += sprintf(buf + len, "%pS", (void *)l->addr);
+#endif
                else
                        len += sprintf(buf + len, "<not-available>");
 
@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
        validate_slab_cache(kmalloc_caches[9]);
 }
 #else
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static void resiliency_test(void) {};
 #endif
 #endif
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 enum slab_stat_type {
        SL_ALL,                 /* All slabs */
        SL_PARTIAL,             /* Only partially allocated slabs */
@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
        if (!s->ctor)
                return 0;
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       return sprintf(buf, "%pS\n", NULL);
+#else
        return sprintf(buf, "%pS\n", s->ctor);
+#endif
 }
 SLAB_ATTR_RO(ctor);
 
 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
+       return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
 }
 SLAB_ATTR_RO(aliases);
 
@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
 SLAB_ATTR_RO(cache_dma);
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
+{
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
+}
+SLAB_ATTR_RO(usercopy);
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
+{
+       return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
+}
+SLAB_ATTR_RO(sanitize);
+#endif
+
 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
 {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
         * as well as cause other issues like converting a mergeable
         * cache into an umergeable one.
         */
-       if (s->refcount > 1)
+       if (atomic_read(&s->refcount) > 1)
                return -EINVAL;
 
        s->flags &= ~SLAB_TRACE;
@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
                                                        size_t length)
 {
-       if (s->refcount > 1)
+       if (atomic_read(&s->refcount) > 1)
                return -EINVAL;
 
        s->flags &= ~SLAB_FAILSLAB;
@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
 #ifdef CONFIG_ZONE_DMA
        &cache_dma_attr.attr,
 #endif
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+       &usercopy_attr.attr,
+#endif
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+       &sanitize_attr.attr,
+#endif
 #ifdef CONFIG_NUMA
        &remote_node_defrag_ratio_attr.attr,
 #endif
@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
        return name;
 }
 
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_add(struct kmem_cache *s)
 {
        int err;
@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
        kobject_del(&s->kobj);
        kobject_put(&s->kobj);
 }
+#endif
 
 /*
  * Need to buffer aliases during bootup until sysfs becomes
@@ -5161,6 +5258,7 @@ struct saved_alias {
 
 static struct saved_alias *alias_list;
 
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
 {
        struct saved_alias *al;
@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
        alias_list = al;
        return 0;
 }
+#endif
 
 static int __init slab_sysfs_init(void)
 {
index 4cba9c2783a147077150505dcf6114cf4592843a..b4f9fcc3f3b8659df19391dfbda8806fc17a3b30 100644 (file)
@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
                void *p = vmemmap_alloc_block(PAGE_SIZE, node);
                if (!p)
                        return NULL;
-               pud_populate(&init_mm, pud, p);
+               pud_populate_kernel(&init_mm, pud, p);
        }
        return pud;
 }
@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
                void *p = vmemmap_alloc_block(PAGE_SIZE, node);
                if (!p)
                        return NULL;
-               pgd_populate(&init_mm, pgd, p);
+               pgd_populate_kernel(&init_mm, pgd, p);
        }
        return pgd;
 }
index d1b48b691ac8c20040a262337cc7e0cbf566420f..6e8590ee980036957a7ba12714c25f44d5991b54 100644 (file)
@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 
        for (i = 0; i < PAGES_PER_SECTION; i++) {
                if (PageHWPoison(&memmap[i])) {
-                       atomic_long_sub(1, &num_poisoned_pages);
+                       atomic_long_sub_unchecked(1, &num_poisoned_pages);
                        ClearPageHWPoison(&memmap[i]);
                }
        }
index 8a12b33936b45c1c49a312ea04e08338472b98fe..7068e78c26e0eeaa618f8c2ae9ec1f4fdf8abe5c 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,6 +31,7 @@
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
 #include <linux/uio.h>
+#include <linux/hugetlb.h>
 
 #include "internal.h"
 
@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
 
        __page_cache_release(page);
        dtor = get_compound_page_dtor(page);
+       if (!PageHuge(page))
+               BUG_ON(dtor != free_compound_page);
        (*dtor)(page);
 }
 
index 63f55ccb9b260d695ac8e9ae0635014a2c5746af..31874e6e6f9d116bbd5d7ea0b45d60c3590f34a7 100644 (file)
@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
 
 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
 /* Activity counter to indicate that a swapon or swapoff has occurred */
-static atomic_t proc_poll_event = ATOMIC_INIT(0);
+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
 
 static inline unsigned char swap_count(unsigned char ent)
 {
@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        spin_unlock(&swap_lock);
 
        err = 0;
-       atomic_inc(&proc_poll_event);
+       atomic_inc_unchecked(&proc_poll_event);
        wake_up_interruptible(&proc_poll_wait);
 
 out_dput:
@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &proc_poll_wait, wait);
 
-       if (seq->poll_event != atomic_read(&proc_poll_event)) {
-               seq->poll_event = atomic_read(&proc_poll_event);
+       if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
+               seq->poll_event = atomic_read_unchecked(&proc_poll_event);
                return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
        }
 
@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
                return ret;
 
        seq = file->private_data;
-       seq->poll_event = atomic_read(&proc_poll_event);
+       seq->poll_event = atomic_read_unchecked(&proc_poll_event);
        return 0;
 }
 
@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                (frontswap_map) ? "FS" : "");
 
        mutex_unlock(&swapon_mutex);
-       atomic_inc(&proc_poll_event);
+       atomic_inc_unchecked(&proc_poll_event);
        wake_up_interruptible(&proc_poll_wait);
 
        if (S_ISREG(inode->i_mode))
index fec39d4509a958763685fb6b70499590f9e363fb..3e6032513be502623a5ad7c496d538634586d0ea 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+       if (mm->pax_flags & MF_PAX_RANDMMAP)
+               mm->mmap_base += mm->delta_mmap;
+#endif
+
        mm->get_unmapped_area = arch_get_unmapped_area;
 }
 #endif
@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
        if (!mm->arg_end)
                goto out_mm;    /* Shh! No looking before we're done */
 
+       if (gr_acl_handle_procpidmem(task))
+               goto out_mm;
+
        len = mm->arg_end - mm->arg_start;
 
        if (len > buflen)
index 39c338896416bd9f70d197ec30ddd003d935c65e..7d976d49aaa9256f840c0e1c08d8b1b8644c57c7 100644 (file)
@@ -39,20 +39,65 @@ struct vfree_deferred {
        struct work_struct wq;
 };
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
+
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+struct stack_deferred_llist {
+       struct llist_head list;
+       void *stack;
+       void *lowmem_stack;
+};
+
+struct stack_deferred {
+       struct stack_deferred_llist list;
+       struct work_struct wq;
+};
+
+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
+#endif
 
 static void __vunmap(const void *, int);
 
-static void free_work(struct work_struct *w)
+static void vfree_work(struct work_struct *w)
+{
+       struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+       struct llist_node *llnode = llist_del_all(&p->list);
+       while (llnode) {
+               void *x = llnode;
+               llnode = llist_next(llnode);
+               __vunmap(x, 1);
+       }
+}
+
+static void vunmap_work(struct work_struct *w)
 {
        struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
        struct llist_node *llnode = llist_del_all(&p->list);
        while (llnode) {
                void *p = llnode;
                llnode = llist_next(llnode);
-               __vunmap(p, 1);
+               __vunmap(p, 0);
        }
 }
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+static void unmap_work(struct work_struct *w)
+{
+       struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
+       struct llist_node *llnode = llist_del_all(&p->list.list);
+       while (llnode) {
+               struct stack_deferred_llist *x =
+                       llist_entry((struct llist_head *)llnode,
+                                    struct stack_deferred_llist, list);
+               void *stack = ACCESS_ONCE(x->stack);
+               void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
+               llnode = llist_next(llnode);
+               __vunmap(stack, 0);
+               free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
+       }
+}
+#endif
+
 /*** Page table manipulation functions ***/
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
-               WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
+                       BUG_ON(!pte_exec(*pte));
+                       set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+                       continue;
+               }
+#endif
+
+               {
+                       pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+                       WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+               }
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
        pte = pte_alloc_kernel(pmd, addr);
        if (!pte)
                return -ENOMEM;
+
+       pax_open_kernel();
        do {
                struct page *page = pages[*nr];
 
-               if (WARN_ON(!pte_none(*pte)))
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+               if (pgprot_val(prot) & _PAGE_NX)
+#endif
+
+               if (!pte_none(*pte)) {
+                       pax_close_kernel();
+                       WARN_ON(1);
                        return -EBUSY;
-               if (WARN_ON(!page))
+               }
+               if (!page) {
+                       pax_close_kernel();
+                       WARN_ON(1);
                        return -ENOMEM;
+               }
                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
                (*nr)++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       pax_close_kernel();
        return 0;
 }
 
@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
        pmd_t *pmd;
        unsigned long next;
 
-       pmd = pmd_alloc(&init_mm, pud, addr);
+       pmd = pmd_alloc_kernel(&init_mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
        do {
@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
        pud_t *pud;
        unsigned long next;
 
-       pud = pud_alloc(&init_mm, pgd, addr);
+       pud = pud_alloc_kernel(&init_mm, pgd, addr);
        if (!pud)
                return -ENOMEM;
        do {
@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
        if (addr >= MODULES_VADDR && addr < MODULES_END)
                return 1;
 #endif
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+       if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
+               return 1;
+#endif
+
        return is_vmalloc_addr(x);
 }
 
@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
 
        if (!pgd_none(*pgd)) {
                pud_t *pud = pud_offset(pgd, addr);
+#ifdef CONFIG_X86
+               if (!pud_large(*pud))
+#endif
                if (!pud_none(*pud)) {
                        pmd_t *pmd = pmd_offset(pud, addr);
+#ifdef CONFIG_X86
+                       if (!pmd_large(*pmd))
+#endif
                        if (!pmd_none(*pmd)) {
                                pte_t *ptep, pte;
 
@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
  */
-static struct vmap_area *alloc_vmap_area(unsigned long size,
+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
                                unsigned long align,
                                unsigned long vstart, unsigned long vend,
                                int node, gfp_t gfp_mask)
@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
        for_each_possible_cpu(i) {
                struct vmap_block_queue *vbq;
                struct vfree_deferred *p;
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+               struct stack_deferred *p2;
+#endif
 
                vbq = &per_cpu(vmap_block_queue, i);
                spin_lock_init(&vbq->lock);
                INIT_LIST_HEAD(&vbq->free);
+
                p = &per_cpu(vfree_deferred, i);
                init_llist_head(&p->list);
-               INIT_WORK(&p->wq, free_work);
+               INIT_WORK(&p->wq, vfree_work);
+
+               p = &per_cpu(vunmap_deferred, i);
+               init_llist_head(&p->list);
+               INIT_WORK(&p->wq, vunmap_work);
+
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+               p2 = &per_cpu(stack_deferred, i);
+               init_llist_head(&p2->list.list);
+               INIT_WORK(&p2->wq, unmap_work);
+#endif
        }
 
        /* Import existing vmlist entries. */
@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+       if (flags & VM_KERNEXEC) {
+               if (start != VMALLOC_START || end != VMALLOC_END)
+                       return NULL;
+               start = (unsigned long)MODULES_EXEC_VADDR;
+               end = (unsigned long)MODULES_EXEC_END;
+       }
+#endif
+
        if (flags & VM_IOREMAP)
                align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
 
@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
  */
 void vunmap(const void *addr)
 {
-       BUG_ON(in_interrupt());
-       might_sleep();
-       if (addr)
+       if (!addr)
+               return;
+
+       if (unlikely(in_interrupt())) {
+               struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
+               if (llist_add((struct llist_node *)addr, &p->list))
+                       schedule_work(&p->wq);
+       } else {
+               might_sleep();
                __vunmap(addr, 0);
+       }
 }
 EXPORT_SYMBOL(vunmap);
 
+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+void unmap_process_stacks(struct task_struct *task)
+{
+       if (unlikely(in_interrupt())) {
+               struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
+               struct stack_deferred_llist *list = task->stack;
+               list->stack = task->stack;
+               list->lowmem_stack = task->lowmem_stack;
+               if (llist_add((struct llist_node *)&list->list, &p->list.list))
+                       schedule_work(&p->wq);
+       } else {
+               __vunmap(task->stack, 0);
+               free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
+       }
+}
+#endif
+
 /**
  *     vmap  -  map an array of pages into virtually contiguous space
  *     @pages:         array of page pointers
@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
        if (count > totalram_pages)
                return NULL;
 
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+       if (!(pgprot_val(prot) & _PAGE_NX))
+               flags |= VM_KERNEXEC;
+#endif
+
        area = get_vm_area_caller((count << PAGE_SHIFT), flags,
                                        __builtin_return_address(0));
        if (!area)
@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!size || (size >> PAGE_SHIFT) > totalram_pages)
                goto fail;
 
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+       if (!(pgprot_val(prot) & _PAGE_NX))
+               area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
+                                         VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
+       else
+#endif
+
        area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
                                  start, end, node, gfp_mask, caller);
        if (!area)
@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
  *     For tight control over page level allocator and protection flags
  *     use __vmalloc() instead.
  */
-
 void *vmalloc_exec(unsigned long size)
 {
-       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+       return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
                              NUMA_NO_NODE, __builtin_return_address(0));
 }
 
@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
 {
        struct vm_struct *area;
 
+       BUG_ON(vma->vm_mirror);
+
        size = PAGE_ALIGN(size);
 
        if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
                v->addr, v->addr + v->size, v->size);
 
        if (v->caller)
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+               seq_printf(m, " %pK", v->caller);
+#else
                seq_printf(m, " %pS", v->caller);
+#endif
 
        if (v->nr_pages)
                seq_printf(m, " pages=%d", v->nr_pages);
index cdac77398880ced092e4fdfc0956105d43f2e050..7dd324eebb9666306ce9e277992f26b7faade4cb 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mm_inline.h>
 #include <linux/page_ext.h>
 #include <linux/page_owner.h>
+#include <linux/grsecurity.h>
 
 #include "internal.h"
 
@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
  *
  * vm_stat contains the global counters
  */
-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 EXPORT_SYMBOL(vm_stat);
 
 #ifdef CONFIG_SMP
@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
 
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
                if (diff[i]) {
-                       atomic_long_add(diff[i], &vm_stat[i]);
+                       atomic_long_add_unchecked(diff[i], &vm_stat[i]);
                        changes++;
        }
        return changes;
@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
                        v = this_cpu_xchg(p->vm_stat_diff[i], 0);
                        if (v) {
 
-                               atomic_long_add(v, &zone->vm_stat[i]);
+                               atomic_long_add_unchecked(v, &zone->vm_stat[i]);
                                global_diff[i] += v;
 #ifdef CONFIG_NUMA
                                /* 3 seconds idle till flush */
@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
 
                                v = p->vm_stat_diff[i];
                                p->vm_stat_diff[i] = 0;
-                               atomic_long_add(v, &zone->vm_stat[i]);
+                               atomic_long_add_unchecked(v, &zone->vm_stat[i]);
                                global_diff[i] += v;
                        }
        }
@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
                if (pset->vm_stat_diff[i]) {
                        int v = pset->vm_stat_diff[i];
                        pset->vm_stat_diff[i] = 0;
-                       atomic_long_add(v, &zone->vm_stat[i]);
-                       atomic_long_add(v, &vm_stat[i]);
+                       atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+                       atomic_long_add_unchecked(v, &vm_stat[i]);
                }
 }
 #endif
@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
        stat_items_size += sizeof(struct vm_event_state);
 #endif
 
-       v = kmalloc(stat_items_size, GFP_KERNEL);
+       v = kzalloc(stat_items_size, GFP_KERNEL);
        m->private = v;
        if (!v)
                return ERR_PTR(-ENOMEM);
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+        if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                && !in_group_p(grsec_proc_gid)
+#endif
+        )
+               return (unsigned long *)m->private + *pos;
+#endif
+#endif
+
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
                v[i] = global_page_state(i);
        v += NR_VM_ZONE_STAT_ITEMS;
@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
        cpu_notifier_register_done();
 #endif
 #ifdef CONFIG_PROC_FS
-       proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
-       proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
-       proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
-       proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
+       {
+               mode_t gr_mode = S_IRUGO;
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+               gr_mode = S_IRUSR;
+#endif
+               proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
+               proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
+               proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+               proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
+       }
 #endif
        return 0;
 }
index 64c6bed4a3d3aa4545f596a1a146f1d053db949c..b79a5deae767dfae77980382260294f553f802ea 100644 (file)
@@ -481,7 +481,7 @@ out:
        return NOTIFY_DONE;
 }
 
-static struct notifier_block vlan_notifier_block __read_mostly = {
+static struct notifier_block vlan_notifier_block = {
        .notifier_call = vlan_device_event,
 };
 
@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
                err = -EPERM;
                if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                        break;
-               if ((args.u.name_type >= 0) &&
-                   (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
+               if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
                        struct vlan_net *vn;
 
                        vn = net_generic(net, vlan_net_id);
index 8ac8a5cc214331253e591fe26f6c3b39a00d5023..991defc6a4bf6d6846bdec6a2fed7d7013a11cb3 100644 (file)
@@ -238,7 +238,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-struct rtnl_link_ops vlan_link_ops __read_mostly = {
+struct rtnl_link_ops vlan_link_ops = {
        .kind           = "vlan",
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
index e86a9bea1d160ccc1a739eee576d0bdbf0483956..e91f70e490176cfcda960f92d12ae438b643b61e 100644 (file)
@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
                                       len - inline_len);
                        } else {
                                err = copy_from_user(ename + inline_len,
-                                                    uidata, len - inline_len);
+                                                    (char __force_user *)uidata, len - inline_len);
                                if (err) {
                                        err = -EFAULT;
                                        goto out_err;
@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
                        kernel_buf = 1;
                        indata = data;
                } else
-                       indata = (__force char *)udata;
+                       indata = (__force_kernel char *)udata;
                /*
                 * response header len is 11
                 * PDU Header(7) + IO Size (4)
@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
                        kernel_buf = 1;
                        odata = data;
                } else
-                       odata = (char *)udata;
+                       odata = (char __force_kernel *)udata;
                req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
                                       P9_ZC_HDR_SZ, kernel_buf, "dqd",
                                       fid->fid, offset, rsize);
index 6ab36aea77275ef342b88f8e66e0e5c294086fd0..6f1841b7ffcc674ebfdf08c5ac1d132bdb071ac3 100644 (file)
@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
 void v9fs_register_trans(struct p9_trans_module *m)
 {
        spin_lock(&v9fs_trans_lock);
-       list_add_tail(&m->list, &v9fs_trans_list);
+       pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
        spin_unlock(&v9fs_trans_lock);
 }
 EXPORT_SYMBOL(v9fs_register_trans);
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
 void v9fs_unregister_trans(struct p9_trans_module *m)
 {
        spin_lock(&v9fs_trans_lock);
-       list_del_init(&m->list);
+       pax_list_del_init((struct list_head *)&m->list);
        spin_unlock(&v9fs_trans_lock);
 }
 EXPORT_SYMBOL(v9fs_unregister_trans);
index 80d08f6664cbb5611eef291b959facd5332a61ef..de63fd19fe09be5b05c88d9b63b3cc4b22aafe50 100644 (file)
@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
        oldfs = get_fs();
        set_fs(get_ds());
        /* The cast to a user pointer is valid due to the set_fs() */
-       ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
+       ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
        set_fs(oldfs);
 
        if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
index af46bc49e1e9946ce1ff4ff0fc772ce14a231670..f9adfcdfd5c9ae8ec8fe3d4ce495ea005990dabb 100644 (file)
@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
        struct proc_dir_entry *p;
        int rc = -ENOMEM;
 
-       atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
+       atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
        if (!atalk_proc_dir)
                goto out;
 
index 876fbe83e2e4db5ded907ea9938c435edc059e0c..8bbea9f758e6d3471ec791c792ee606af4057f26 100644 (file)
@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
        if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
                return 1;
        atm_return(vcc, truesize);
-       atomic_inc(&vcc->stats->rx_drop);
+       atomic_inc_unchecked(&vcc->stats->rx_drop);
        return 0;
 }
 EXPORT_SYMBOL(atm_charge);
@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
                }
        }
        atm_return(vcc, guess);
-       atomic_inc(&vcc->stats->rx_drop);
+       atomic_inc_unchecked(&vcc->stats->rx_drop);
        return NULL;
 }
 EXPORT_SYMBOL(atm_alloc_charge);
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
 
 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
 {
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
        __SONET_ITEMS
 #undef __HANDLE_ITEM
 }
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
 
 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
 {
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
        __SONET_ITEMS
 #undef __HANDLE_ITEM
 }
index 4b98f897044aa6a364392bc1ec5b68a2a672a2d2..5a2f6cbf1e50cb6bc60eab3fd94799aef5d82b34 100644 (file)
@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
 }
 
 static struct lane2_ops lane2_ops = {
-       lane2_resolve,          /* resolve,             spec 3.1.3 */
-       lane2_associate_req,    /* associate_req,       spec 3.1.4 */
-       NULL                    /* associate indicator, spec 3.1.5 */
+       .resolve = lane2_resolve,
+       .associate_req = lane2_associate_req,
+       .associate_indicator = NULL
 };
 
 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
index 4149db1b7885127db5a3518bf5c944ab5ba9728f..f2ab682cf673fdca16188fd6a27b9a8b5c48cb18 100644 (file)
@@ -48,7 +48,7 @@ struct lane2_ops {
                              const u8 *tlvs, u32 sizeoftlvs);
        void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
                                     const u8 *tlvs, u32 sizeoftlvs);
-};
+} __no_const;
 
 /*
  * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
index d1b2d9a0314412570f770ce4630ee08e8f5b229c..d549f7fc7c25957a6804807b7bc3f9f0bd23719c 100644 (file)
@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
 
 
 static struct in_cache_ops ingress_ops = {
-       in_cache_add_entry,               /* add_entry       */
-       in_cache_get,                     /* get             */
-       in_cache_get_with_mask,           /* get_with_mask   */
-       in_cache_get_by_vcc,              /* get_by_vcc      */
-       in_cache_put,                     /* put             */
-       in_cache_remove_entry,            /* remove_entry    */
-       cache_hit,                        /* cache_hit       */
-       clear_count_and_expired,          /* clear_count     */
-       check_resolving_entries,          /* check_resolving */
-       refresh_entries,                  /* refresh         */
-       in_destroy_cache                  /* destroy_cache   */
+       .add_entry = in_cache_add_entry,
+       .get = in_cache_get,
+       .get_with_mask = in_cache_get_with_mask,
+       .get_by_vcc = in_cache_get_by_vcc,
+       .put = in_cache_put,
+       .remove_entry = in_cache_remove_entry,
+       .cache_hit = cache_hit,
+       .clear_count = clear_count_and_expired,
+       .check_resolving = check_resolving_entries,
+       .refresh = refresh_entries,
+       .destroy_cache = in_destroy_cache
 };
 
 static struct eg_cache_ops egress_ops = {
-       eg_cache_add_entry,               /* add_entry        */
-       eg_cache_get_by_cache_id,         /* get_by_cache_id  */
-       eg_cache_get_by_tag,              /* get_by_tag       */
-       eg_cache_get_by_vcc,              /* get_by_vcc       */
-       eg_cache_get_by_src_ip,           /* get_by_src_ip    */
-       eg_cache_put,                     /* put              */
-       eg_cache_remove_entry,            /* remove_entry     */
-       update_eg_cache_entry,            /* update           */
-       clear_expired,                    /* clear_expired    */
-       eg_destroy_cache                  /* destroy_cache    */
+       .add_entry = eg_cache_add_entry,
+       .get_by_cache_id = eg_cache_get_by_cache_id,
+       .get_by_tag = eg_cache_get_by_tag,
+       .get_by_vcc = eg_cache_get_by_vcc,
+       .get_by_src_ip = eg_cache_get_by_src_ip,
+       .put = eg_cache_put,
+       .remove_entry = eg_cache_remove_entry,
+       .update = update_eg_cache_entry,
+       .clear_expired = clear_expired,
+       .destroy_cache = eg_destroy_cache
 };
 
 
index bbb6461a4b7fbe9a9d2b2d957bca7974331d42aa..cf040169ed44f61cf269aa1afe428233536d6441 100644 (file)
@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
   const struct k_atm_aal_stats *stats)
 {
        seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
-                  atomic_read(&stats->tx), atomic_read(&stats->tx_err),
-                  atomic_read(&stats->rx), atomic_read(&stats->rx_err),
-                  atomic_read(&stats->rx_drop));
+                  atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
+                  atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
+                  atomic_read_unchecked(&stats->rx_drop));
 }
 
 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
index 0447d5d0b63983b139bda2853eaa9980640dd32d..3cf47285557bdb5590dff5cd5cd2b57a17bfdf0d 100644 (file)
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
 static void copy_aal_stats(struct k_atm_aal_stats *from,
     struct atm_aal_stats *to)
 {
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
        __AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 }
@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
 static void subtract_aal_stats(struct k_atm_aal_stats *from,
     struct atm_aal_stats *to)
 {
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
        __AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 }
index 919a5ce47515b17c63976bc7ee001b657760c606..cc6b444b371c7bdec8d2f45e89f0ec0a55db97ac 100644 (file)
@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
 {
        char path[sizeof("net/ax25/") + IFNAMSIZ];
        int k;
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
        if (!table)
index 1e8053976e83dd8bcb03ee37b029fe6f7140d3e5..676c37ac9149539047a86dcbb2d71c9aa60d4bfe 100644 (file)
@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
-       atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+       atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
        hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
        ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
        /* change sequence number to network order */
-       seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+       seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
        batadv_ogm_packet->seqno = htonl(seqno);
-       atomic_inc(&hard_iface->bat_iv.ogm_seqno);
+       atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
 
        batadv_iv_ogm_slide_own_bcast_window(hard_iface);
 
@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                return;
 
        /* could be changed by schedule_own_packet() */
-       if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
+       if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
 
        if (ogm_packet->flags & BATADV_DIRECTLINK)
                has_directlink_flag = true;
index 00f9e144cc97b1afe7cefc5b682524103799e4b7..e1c720353be57452325b18d789f0ff25e35ae18b 100644 (file)
@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
        frag_header.packet_type = BATADV_UNICAST_FRAG;
        frag_header.version = BATADV_COMPAT_VERSION;
        frag_header.ttl = BATADV_TTL;
-       frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+       frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
        frag_header.reserved = 0;
        frag_header.no = 0;
        frag_header.total_size = htons(skb->len);
index 5467955eb27c6c3e069db480f8a0ebc97fcaf107..75ad4e36abc7551e87223ac18b262f8e5bf1c33f 100644 (file)
@@ -296,7 +296,7 @@ send:
                                primary_if->net_dev->dev_addr);
 
                /* set broadcast sequence number */
-               seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+               seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
                bcast_packet->seqno = htonl(seqno);
 
                batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
        atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
-       atomic_set(&bat_priv->bcast_seqno, 1);
+       atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
        atomic_set(&bat_priv->tt.vn, 0);
        atomic_set(&bat_priv->tt.local_changes, 0);
        atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
 
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
-       atomic_set(&bat_priv->frag_seqno, random_seqno);
+       atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
 
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
        return 0;
 }
 
-struct rtnl_link_ops batadv_link_ops __read_mostly = {
+struct rtnl_link_ops batadv_link_ops = {
        .kind           = "batadv",
        .priv_size      = sizeof(struct batadv_priv),
        .setup          = batadv_softif_init_early,
index 8854c05622a9bae2b8f30b0cf91686e8c629a849..ee5d54972d2084780bdd9e1c6f04ec78f6a9f304 100644 (file)
@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
 struct batadv_hard_iface_bat_iv {
        unsigned char *ogm_buff;
        int ogm_buff_len;
-       atomic_t ogm_seqno;
+       atomic_unchecked_t ogm_seqno;
 };
 
 /**
@@ -768,7 +768,7 @@ struct batadv_priv {
        atomic_t bonding;
        atomic_t fragmentation;
        atomic_t packet_size_max;
-       atomic_t frag_seqno;
+       atomic_unchecked_t frag_seqno;
 #ifdef CONFIG_BATMAN_ADV_BLA
        atomic_t bridge_loop_avoidance;
 #endif
@@ -787,7 +787,7 @@ struct batadv_priv {
 #endif
        uint32_t isolation_mark;
        uint32_t isolation_mark_mask;
-       atomic_t bcast_seqno;
+       atomic_unchecked_t bcast_seqno;
        atomic_t bcast_queue_left;
        atomic_t batman_queue_left;
        char num_ifaces;
index 2c245fdf319a60022328e8f3918251db526f88f8..dccf54337b7472c246f09aa6faa29aaa28e8652b 100644 (file)
@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
                        uf.event_mask[1] = *((u32 *) f->event_mask + 1);
                }
 
-               len = min_t(unsigned int, len, sizeof(uf));
+               len = min((size_t)len, sizeof(uf));
                if (copy_from_user(&uf, optval, len)) {
                        err = -EFAULT;
                        break;
index d04dc009573691cde515d1db82c13cbfd11a459f..d25d576f483649e05f909c71ce88d00d6e1ca9ba 100644 (file)
@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
                        break;
 
                case L2CAP_CONF_RFC:
-                       if (olen == sizeof(rfc))
-                               memcpy(&rfc, (void *)val, olen);
+                       if (olen != sizeof(rfc))
+                               break;
+
+                       memcpy(&rfc, (void *)val, olen);
 
                        if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
                            rfc.mode != chan->mode)
index f65caf41953f866fdea55b8eb342fafc9dc8a1c7..c07110cf23b32981c2d3f6ac8a1e65d88c9c807f 100644 (file)
@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
        struct sock *sk = sock->sk;
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
        struct l2cap_options opts;
-       int len, err = 0;
+       int err = 0;
+       size_t len = optlen;
        u32 opt;
 
        BT_DBG("sk %p", sk);
@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
                opts.max_tx   = chan->max_tx;
                opts.txwin_size = chan->tx_win;
 
-               len = min_t(unsigned int, sizeof(opts), optlen);
+               len = min(sizeof(opts), len);
                if (copy_from_user((char *) &opts, optval, len)) {
                        err = -EFAULT;
                        break;
@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
        struct bt_security sec;
        struct bt_power pwr;
        struct l2cap_conn *conn;
-       int len, err = 0;
+       int err = 0;
+       size_t len = optlen;
        u32 opt;
 
        BT_DBG("sk %p", sk);
@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
 
                sec.level = BT_SECURITY_LOW;
 
-               len = min_t(unsigned int, sizeof(sec), optlen);
+               len = min(sizeof(sec), len);
                if (copy_from_user((char *) &sec, optval, len)) {
                        err = -EFAULT;
                        break;
@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
 
                pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
 
-               len = min_t(unsigned int, sizeof(pwr), optlen);
+               len = min(sizeof(pwr), len);
                if (copy_from_user((char *) &pwr, optval, len)) {
                        err = -EFAULT;
                        break;
index 2348176401a0b19ad3b5e9129999381f43da50a0..b9b6cf2b876cc8c7f7b7083b3f1487c09aa78138 100644 (file)
@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
        struct sock *sk = sock->sk;
        struct bt_security sec;
        int err = 0;
-       size_t len;
+       size_t len = optlen;
        u32 opt;
 
        BT_DBG("sk %p", sk);
@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
 
                sec.level = BT_SECURITY_LOW;
 
-               len = min_t(unsigned int, sizeof(sec), optlen);
+               len = min(sizeof(sec), len);
                if (copy_from_user((char *) &sec, optval, len)) {
                        err = -EFAULT;
                        break;
index 8e385a0ae60e0bd6e4b1ed7615905a7170c4a8bc..a5bdd8ef104702038667a9750271ca3b36706339 100644 (file)
@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
        BT_DBG("tty %p id %d", tty, tty->index);
 
        BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
-              dev->channel, dev->port.count);
+              dev->channel, atomic_read(&dev->port.count));
 
        err = tty_port_open(&dev->port, tty, filp);
        if (err)
@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
 
        BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
-                                               dev->port.count);
+                                               atomic_read(&dev->port.count));
 
        tty_port_close(&dev->port, tty, filp);
 }
index 44425aff7cba15f93c659fd0502a7b650d050b5e..4ee730e6eab0a4a6d9ac941ec74bf97e5b4c18bd 100644 (file)
@@ -147,6 +147,8 @@ static int __init br_init(void)
 {
        int err;
 
+       BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
+
        err = stp_proto_register(&br_stp_proto);
        if (err < 0) {
                pr_err("bridge: can't register sap for STP\n");
index 9f5eb55a4d3a7e4b51cc3fa3fa0783a52e6f98fa..45ab9c5d7ab49d1fbefa6a0d69d66b064d45ba6e 100644 (file)
@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
        .get_link_af_size       = br_get_link_af_size,
 };
 
-struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops = {
        .kind                   = "bridge",
        .priv_size              = sizeof(struct net_bridge),
        .setup                  = br_dev_setup,
index d9a8c05d995d14466d2cef02fcd571d4ad009272..8dadc6c65ff3e5b8eaa9f762f08e5e66a7f7366e 100644 (file)
@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                        tmp.valid_hooks = t->table->valid_hooks;
                }
                mutex_unlock(&ebt_mutex);
-               if (copy_to_user(user, &tmp, *len) != 0) {
+               if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
                        BUGPRINT("c2u Didn't work\n");
                        ret = -EFAULT;
                        break;
@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
                        goto out;
                tmp.valid_hooks = t->valid_hooks;
 
-               if (copy_to_user(user, &tmp, *len) != 0) {
+               if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
                        ret = -EFAULT;
                        break;
                }
@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
                tmp.entries_size = t->table->entries_size;
                tmp.valid_hooks = t->table->valid_hooks;
 
-               if (copy_to_user(user, &tmp, *len) != 0) {
+               if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
                        ret = -EFAULT;
                        break;
                }
index f5afda1abc76fe908e8cce160ad032870972424c..dcf770a6302678630da00d213c977e2235faf933 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/pkt_sched.h>
+#include <linux/sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfctrl.h>
@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
        memset(&dev_info, 0, sizeof(dev_info));
        dev_info.id = 0xff;
        cfsrvl_init(&this->serv, 0, &dev_info, false);
-       atomic_set(&this->req_seq_no, 1);
-       atomic_set(&this->rsp_seq_no, 1);
+       atomic_set_unchecked(&this->req_seq_no, 1);
+       atomic_set_unchecked(&this->rsp_seq_no, 1);
        this->serv.layer.receive = cfctrl_recv;
        sprintf(this->serv.layer.name, "ctrl");
        this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
                              struct cfctrl_request_info *req)
 {
        spin_lock_bh(&ctrl->info_list_lock);
-       atomic_inc(&ctrl->req_seq_no);
-       req->sequence_no = atomic_read(&ctrl->req_seq_no);
+       atomic_inc_unchecked(&ctrl->req_seq_no);
+       req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
        list_add_tail(&req->list, &ctrl->list);
        spin_unlock_bh(&ctrl->info_list_lock);
 }
@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
                        if (p != first)
                                pr_warn("Requests are not received in order\n");
 
-                       atomic_set(&ctrl->rsp_seq_no,
+                       atomic_set_unchecked(&ctrl->rsp_seq_no,
                                         p->sequence_no);
                        list_del(&p->list);
                        goto out;
index 67a4a36febd1a6bf554a36b4690272777cb008a5..8d280680ba4175ecb29e6bdf95336aab82eaa240 100644 (file)
@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
 };
 
 
-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
+static struct rtnl_link_ops ipcaif_link_ops = {
        .kind           = "caif",
        .priv_size      = sizeof(struct chnl_net),
        .setup          = ipcaif_net_setup,
index 66e08040ced7557ba19e7535815804bbcffca12a..93bcf0511e4bf2599cb5da44e356ca8f0b772aed 100644 (file)
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
                goto inval_skb;
        }
 
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
 
@@ -881,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
 };
 
 /* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
+static struct notifier_block can_netdev_notifier = {
        .notifier_call = can_notifier,
 };
 
index ee9ffd9565526eb0336fba37e17f0a31b2dd3c34..dfdf3d403762b6864751c6402a516adc71ac5dad 100644 (file)
@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
        }
 
        /* create /proc/net/can-bcm directory */
-       proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
+       proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
        return 0;
 }
 
index 295f62e62eb34bf4050eb0657c48426e583b65b4..0c3b09ed38cb7ab411b4a837642842fc8fba7adc 100644 (file)
@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
                 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
 
 static HLIST_HEAD(cgw_list);
-static struct notifier_block notifier;
 
 static struct kmem_cache *cgw_cache __read_mostly;
 
@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
        return err;
 }
 
+static struct notifier_block notifier = {
+       .notifier_call = cgw_notifier
+};
+
 static __init int cgw_module_init(void)
 {
        /* sanitize given module parameter */
@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
                return -ENOMEM;
 
        /* set notifier */
-       notifier.notifier_call = cgw_notifier;
        register_netdevice_notifier(&notifier);
 
        if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
index 1a19b985a8685b0aff4450acda4a8d4f429568bc..df2b4ec1798d49b479bcdea37947496f8affa243 100644 (file)
@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
 void can_init_proc(void)
 {
        /* create /proc/net/can directory */
-       can_dir = proc_mkdir("can", init_net.proc_net);
+       can_dir = proc_mkdir_restrict("can", init_net.proc_net);
 
        if (!can_dir) {
                printk(KERN_INFO "can: failed to create /proc/net/can . "
index 33a2f201e460e1585f82db94c8f3f90f01bbacdf..371bd0986dc22e5f24a89fd828b62b7a9b54e478 100644 (file)
@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
 #define MAX_ADDR_STR_LEN       64      /* 54 is enough */
 
 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
-static atomic_t addr_str_seq = ATOMIC_INIT(0);
+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
 
 static struct page *zero_page;         /* used in certain error cases */
 
@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
        struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
        struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
 
-       i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
+       i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
        s = addr_str[i];
 
        switch (ss->ss_family) {
index 94d3d5e978832cba85b7212988f735780ceed9dd..2bd2649dd8081a39b7975d7dd6a15a30b60a17da 100644 (file)
@@ -93,20 +93,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
 
 #define CMSG_COMPAT_FIRSTHDR(msg)                      \
        (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ?     \
-        (struct compat_cmsghdr __user *)((msg)->msg_control) :         \
+        (struct compat_cmsghdr __force_user *)((msg)->msg_control) :           \
         (struct compat_cmsghdr __user *)NULL)
 
 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
        ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
         (ucmlen) <= (unsigned long) \
         ((mhdr)->msg_controllen - \
-         ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
+         ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
 
 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
                struct compat_cmsghdr __user *cmsg, int cmsg_len)
 {
        char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
-       if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
+       if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
                        msg->msg_controllen)
                return NULL;
        return (struct compat_cmsghdr __user *)ptr;
@@ -196,7 +196,7 @@ Efault:
 
 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
 {
-       struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
+       struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
        struct compat_cmsghdr cmhdr;
        struct compat_timeval ctv;
        struct compat_timespec cts[3];
@@ -252,7 +252,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
 
 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
 {
-       struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
+       struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
        int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
        int fdnum = scm->fp->count;
        struct file **fp = scm->fp->fp;
@@ -340,7 +340,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
                return -EFAULT;
        old_fs = get_fs();
        set_fs(KERNEL_DS);
-       err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
+       err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
        set_fs(old_fs);
 
        return err;
@@ -401,7 +401,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
        len = sizeof(ktime);
        old_fs = get_fs();
        set_fs(KERNEL_DS);
-       err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
+       err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
        set_fs(old_fs);
 
        if (!err) {
@@ -544,7 +544,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
        case MCAST_JOIN_GROUP:
        case MCAST_LEAVE_GROUP:
        {
-               struct compat_group_req __user *gr32 = (void *)optval;
+               struct compat_group_req __user *gr32 = (void __user *)optval;
                struct group_req __user *kgr =
                        compat_alloc_user_space(sizeof(struct group_req));
                u32 interface;
@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
        case MCAST_BLOCK_SOURCE:
        case MCAST_UNBLOCK_SOURCE:
        {
-               struct compat_group_source_req __user *gsr32 = (void *)optval;
+               struct compat_group_source_req __user *gsr32 = (void __user *)optval;
                struct group_source_req __user *kgsr = compat_alloc_user_space(
                        sizeof(struct group_source_req));
                u32 interface;
@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
        }
        case MCAST_MSFILTER:
        {
-               struct compat_group_filter __user *gf32 = (void *)optval;
+               struct compat_group_filter __user *gf32 = (void __user *)optval;
                struct group_filter __user *kgf;
                u32 interface, fmode, numsrc;
 
@@ -624,7 +624,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
        char __user *optval, int __user *optlen,
        int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
 {
-       struct compat_group_filter __user *gf32 = (void *)optval;
+       struct compat_group_filter __user *gf32 = (void __user *)optval;
        struct group_filter __user *kgf;
        int __user      *koptlen;
        u32 interface, fmode, numsrc;
@@ -768,7 +768,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
 
        if (call < SYS_SOCKET || call > SYS_SENDMMSG)
                return -EINVAL;
-       if (copy_from_user(a, args, nas[call]))
+       if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
                return -EFAULT;
        a0 = a[0];
        a1 = a[1];
index df493d68330c03d1cb5b59e40d31294d7f45b3f8..1145766099e1470a14c2603c0f0fe5c220a7413f 100644 (file)
@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
        }
 
        kfree_skb(skb);
-       atomic_inc(&sk->sk_drops);
+       atomic_inc_unchecked(&sk->sk_drops);
        sk_mem_reclaim_partial(sk);
 
        return err;
index 4ff46f8054d411a41644d9276d695ed91daa950e..e877e7804ebf4ffe90537fad28dd0bafd4a933bc 100644 (file)
@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
                if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-                       atomic_long_inc(&dev->rx_dropped);
+                       atomic_long_inc_unchecked(&dev->rx_dropped);
                        kfree_skb(skb);
                        return NET_RX_DROP;
                }
        }
 
        if (unlikely(!is_skb_forwardable(dev, skb))) {
-               atomic_long_inc(&dev->rx_dropped);
+               atomic_long_inc_unchecked(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -2958,7 +2958,7 @@ recursion_alert:
 drop:
        rcu_read_unlock_bh();
 
-       atomic_long_inc(&dev->tx_dropped);
+       atomic_long_inc_unchecked(&dev->tx_dropped);
        kfree_skb_list(skb);
        return rc;
 out:
@@ -3301,7 +3301,7 @@ enqueue:
 
        local_irq_restore(flags);
 
-       atomic_long_inc(&skb->dev->rx_dropped);
+       atomic_long_inc_unchecked(&skb->dev->rx_dropped);
        kfree_skb(skb);
        return NET_RX_DROP;
 }
@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_rx_ni);
 
-static void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(void)
 {
        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
 
@@ -3711,7 +3711,7 @@ ncls:
                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
 drop:
-               atomic_long_inc(&skb->dev->rx_dropped);
+               atomic_long_inc_unchecked(&skb->dev->rx_dropped);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
                 * me how you were going to use this. :-)
@@ -4599,7 +4599,7 @@ out_unlock:
        return work;
 }
 
-static void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(void)
 {
        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
        unsigned long time_limit = jiffies + 2;
@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
        } else {
                netdev_stats_to_stats64(storage, &dev->stats);
        }
-       storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-       storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+       storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
+       storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index b94b1d29350603e19c3db8e6fd740d3f89440771..da3ed7c4e08e311654bf7d4c69ca49bd8f1d100e 100644 (file)
@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
        no_module = !dev;
        if (no_module && capable(CAP_NET_ADMIN))
                no_module = request_module("netdev-%s", name);
-       if (no_module && capable(CAP_SYS_MODULE))
+       if (no_module && capable(CAP_SYS_MODULE)) {
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+               ___request_module(true, "grsec_modharden_netdev", "%s", name);
+#else
                request_module("%s", name);
+#endif
+       }
 }
 EXPORT_SYMBOL(dev_load);
 
index ec9baea10c16c78200ec1809a5491f3aa01e83de..dd6195d5e8271fb5e1e50d25e77c3d19ffef6e36 100644 (file)
@@ -533,7 +533,11 @@ do_pass:
 
                /* Unkown instruction. */
                default:
-                       goto err;
+                       WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
+                                      fp->code, fp->jt, fp->jf, fp->k);
+                       kfree(addrs);
+                       BUG();
+                       return -EINVAL;
                }
 
                insn++;
@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
        u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
        int pc, ret = 0;
 
-       BUILD_BUG_ON(BPF_MEMWORDS > 16);
+       BUILD_BUG_ON(BPF_MEMWORDS != 16);
 
        masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
        if (!masks)
@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
        if (!fp)
                return -ENOMEM;
 
-       memcpy(fp->insns, fprog->filter, fsize);
+       memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
 
        fp->len = fprog->len;
        /* Since unattached filters are not copied back to user
index 1033725be40bd8f254ce27680e3b8abd09ad1546..340f65dc0580507aef8536b309e402324b4da0c7 100644 (file)
@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
 static int flow_entry_valid(struct flow_cache_entry *fle,
                                struct netns_xfrm *xfrm)
 {
-       if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
+       if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
                return 0;
        if (fle->object && !fle->object->ops->check(fle->object))
                return 0;
@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
                        fcp->hash_count++;
                }
-       } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
+       } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
                flo = fle->object;
                if (!flo)
                        goto ret_object;
@@ -263,7 +263,7 @@ nocache:
        }
        flo = resolver(net, key, family, dir, flo, ctx);
        if (fle) {
-               fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
+               fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
                if (!IS_ERR(flo))
                        fle->object = flo;
                else
index 8d614c93f86a233a5cb3864c600d9c68fe5f9ea1..55752ea7405602418828d8cb9fd3e04c9e279243 100644 (file)
@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int size, ret;
-       struct ctl_table tmp = *ctl;
+       ctl_table_no_const tmp = *ctl;
 
        tmp.extra1 = &zero;
        tmp.extra2 = &unres_qlen_max;
@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
                                           void __user *buffer,
                                           size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table tmp = *ctl;
+       ctl_table_no_const tmp = *ctl;
        int ret;
 
        tmp.extra1 = &zero;
index 2bf83299600a4ac8eb56069295bbc755709642b1..2eb142303dcc6d67137fb5da85aaa5967c14aeeb 100644 (file)
@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
 
-       seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+       if (gr_proc_is_restricted())
+               seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+                  "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
+                  dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+                  0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
+       else
+               seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
                   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
                   dev->name, stats->rx_bytes, stats->rx_packets,
                   stats->rx_errors,
@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static const struct seq_operations dev_seq_ops = {
+const struct seq_operations dev_seq_ops = {
        .start = dev_seq_start,
        .next  = dev_seq_next,
        .stop  = dev_seq_stop,
@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
 
 static int softnet_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &softnet_seq_ops);
+       return seq_open_restrict(file, &softnet_seq_ops);
 }
 
 static const struct file_operations softnet_seq_fops = {
@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
                else
                        seq_printf(seq, "%04x", ntohs(pt->type));
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+               seq_printf(seq, " %-8s %pf\n",
+                          pt->dev ? pt->dev->name : "", NULL);
+#else
                seq_printf(seq, " %-8s %pf\n",
                           pt->dev ? pt->dev->name : "", pt->func);
+#endif
        }
 
        return 0;
index 999341244434309b6275fe6230a9f1bf7c57d2b2..2a4672bc1253c22bb232ddf3bf69cd3f2ba65efb 100644 (file)
@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
 {
        struct net_device *netdev = to_net_dev(dev);
        return sprintf(buf, fmt_dec,
-                      atomic_read(&netdev->carrier_changes));
+                      atomic_read_unchecked(&netdev->carrier_changes));
 }
 static DEVICE_ATTR_RO(carrier_changes);
 
index ce780c722e48ca2bf35534895ef0b7dc0a9541d5..6d296b336e8d2e8c6a966a03ef0e549d5a741214 100644 (file)
@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
        int error;
        LIST_HEAD(net_exit_list);
 
-       list_add_tail(&ops->list, list);
+       pax_list_add_tail((struct list_head *)&ops->list, list);
        if (ops->init || (ops->id && ops->size)) {
                for_each_net(net) {
                        error = ops_init(ops, net);
@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
 
 out_undo:
        /* If I have an error cleanup all namespaces I initialized */
-       list_del(&ops->list);
+       pax_list_del((struct list_head *)&ops->list);
        ops_exit_list(ops, &net_exit_list);
        ops_free_list(ops, &net_exit_list);
        return error;
@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
        struct net *net;
        LIST_HEAD(net_exit_list);
 
-       list_del(&ops->list);
+       pax_list_del((struct list_head *)&ops->list);
        for_each_net(net)
                list_add_tail(&net->exit_list, &net_exit_list);
        ops_exit_list(ops, &net_exit_list);
@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
        mutex_lock(&net_mutex);
        error = register_pernet_operations(&pernet_list, ops);
        if (!error && (first_device == &pernet_list))
-               first_device = &ops->list;
+               first_device = (struct list_head *)&ops->list;
        mutex_unlock(&net_mutex);
        return error;
 }
index e0ad5d16c9c56947163d81201af07e26d1d3017c..04fa7f780e5fa02dd195d1c0e0aef9b87320a92e 100644 (file)
@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
-       static atomic_t ip_ident;
+       static atomic_unchecked_t ip_ident;
        struct ipv6hdr *ip6h;
 
        udp_len = len + sizeof(*udph);
@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
                put_unaligned(0x45, (unsigned char *)iph);
                iph->tos      = 0;
                put_unaligned(htons(ip_len), &(iph->tot_len));
-               iph->id       = htons(atomic_inc_return(&ip_ident));
+               iph->id       = htons(atomic_inc_return_unchecked(&ip_ident));
                iph->frag_off = 0;
                iph->ttl      = 64;
                iph->protocol = IPPROTO_UDP;
index 352d183ecba347432a9a4b8b3cc6c6fd0861858d..1bddfaf2f7bd65980c01eaf7a97b424d8393c400 100644 (file)
@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
        pn->net = net;
        INIT_LIST_HEAD(&pn->pktgen_threads);
        pn->pktgen_exiting = false;
-       pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
+       pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
        if (!pn->proc_dir) {
                pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
                return -ENODEV;
index 76ec6c52e3a3fdffd5965858b58e472a99876cdd..9cfb81cf0c7a7644b1e7dbe9a1114613cd5489f7 100644 (file)
@@ -60,7 +60,7 @@ struct rtnl_link {
        rtnl_doit_func          doit;
        rtnl_dumpit_func        dumpit;
        rtnl_calcit_func        calcit;
-};
+} __no_const;
 
 static DEFINE_MUTEX(rtnl_mutex);
 
@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
         * to use the ops for creating device. So do not
         * fill up dellink as well. That disables rtnl_dellink.
         */
-       if (ops->setup && !ops->dellink)
-               ops->dellink = unregister_netdevice_queue;
+       if (ops->setup && !ops->dellink) {
+               pax_open_kernel();
+               *(void **)&ops->dellink = unregister_netdevice_queue;
+               pax_close_kernel();
+       }
 
-       list_add_tail(&ops->list, &link_ops);
+       pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
        return 0;
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_register);
@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
        for_each_net(net) {
                __rtnl_kill_links(net, ops);
        }
-       list_del(&ops->list);
+       pax_list_del((struct list_head *)&ops->list);
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
            (dev->ifalias &&
             nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
            nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-                       atomic_read(&dev->carrier_changes)))
+                       atomic_read_unchecked(&dev->carrier_changes)))
                goto nla_put_failure;
 
        if (1) {
@@ -2094,6 +2097,10 @@ replay:
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
+               err = -EPERM;
+               if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
+                       goto out;
+
                dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
                if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
index 3b6899b7d810d569057b051162b51fde7c51cba8..cf3623863591d0e114b3987d35b1c11bcdc7399d 100644 (file)
@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
 {
        struct cmsghdr __user *cm
-               = (__force struct cmsghdr __user *)msg->msg_control;
+               = (struct cmsghdr __force_user *)msg->msg_control;
        struct cmsghdr cmhdr;
        int cmlen = CMSG_LEN(len);
        int err;
@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
        err = -EFAULT;
        if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
                goto out;
-       if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
+       if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
                goto out;
        cmlen = CMSG_SPACE(len);
        if (msg->msg_controllen < cmlen)
@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
 {
        struct cmsghdr __user *cm
-               = (__force struct cmsghdr __user*)msg->msg_control;
+               = (struct cmsghdr __force_user *)msg->msg_control;
 
        int fdmax = 0;
        int fdnum = scm->fp->count;
@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
        if (fdnum < fdmax)
                fdmax = fdnum;
 
-       for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
+       for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
             i++, cmfptr++)
        {
                struct socket *sock;
index 62c67bebcaf5cb5f57ef8579778517ec1a2af593..01893a0a4b67e26fe8da1368f00f7b315bf901c8 100644 (file)
@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
 __wsum skb_checksum(const struct sk_buff *skb, int offset,
                    int len, __wsum csum)
 {
-       const struct skb_checksum_ops ops = {
+       static const struct skb_checksum_ops ops = {
                .update  = csum_partial_ext,
                .combine = csum_block_add_ext,
        };
@@ -3363,12 +3363,14 @@ void __init skb_init(void)
        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
                                              sizeof(struct sk_buff),
                                              0,
-                                             SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                                             SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+                                             SLAB_NO_SANITIZE,
                                              NULL);
        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
                                                sizeof(struct sk_buff_fclones),
                                                0,
-                                               SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                                               SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+                                               SLAB_NO_SANITIZE,
                                                NULL);
 }
 
index 1c7a33db1314f3e2a7ded154b9ea498023d2d2e4..a3817e2a6553efc742e42ffc19220b7896e85eb2 100644 (file)
@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
        }
@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                return err;
 
        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                return -ENOBUFS;
        }
 
@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        skb_dst_force(skb);
 
        spin_lock_irqsave(&list->lock, flags);
-       skb->dropcount = atomic_read(&sk->sk_drops);
+       skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
        __skb_queue_tail(list, skb);
        spin_unlock_irqrestore(&list->lock, flags);
 
@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
        skb->dev = NULL;
 
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                goto discard_and_relse;
        }
        if (nested)
@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
        } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                bh_unlock_sock(sk);
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                goto discard_and_relse;
        }
 
@@ -888,6 +888,7 @@ set_rcvbuf:
                }
                break;
 
+#ifndef GRKERNSEC_BPF_HARDEN
        case SO_ATTACH_BPF:
                ret = -EINVAL;
                if (optlen == sizeof(u32)) {
@@ -900,7 +901,7 @@ set_rcvbuf:
                        ret = sk_attach_bpf(ufd, sk);
                }
                break;
-
+#endif
        case SO_DETACH_FILTER:
                ret = sk_detach_filter(sk);
                break;
@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                struct timeval tm;
        } v;
 
-       int lv = sizeof(int);
-       int len;
+       unsigned int lv = sizeof(int);
+       unsigned int len;
 
        if (get_user(len, optlen))
                return -EFAULT;
-       if (len < 0)
+       if (len > INT_MAX)
                return -EINVAL;
 
        memset(&v, 0, sizeof(v));
@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 
        case SO_PEERNAME:
        {
-               char address[128];
+               char address[_K_SS_MAXSIZE];
 
                if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
                        return -ENOTCONN;
-               if (lv < len)
+               if (lv < len || sizeof address < len)
                        return -EINVAL;
                if (copy_to_user(optval, address, len))
                        return -EFAULT;
@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
 
        if (len > lv)
                len = lv;
-       if (copy_to_user(optval, &v, len))
+       if (len > sizeof(v) || copy_to_user(optval, &v, len))
                return -EFAULT;
 lenout:
        if (put_user(len, optlen))
@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
         */
        smp_wmb();
        atomic_set(&sk->sk_refcnt, 1);
-       atomic_set(&sk->sk_drops, 0);
+       atomic_set_unchecked(&sk->sk_drops, 0);
 }
 EXPORT_SYMBOL(sock_init_data);
 
@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
                       int level, int type)
 {
+       struct sock_extended_err ee;
        struct sock_exterr_skb *serr;
        struct sk_buff *skb;
        int copied, err;
@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
        sock_recv_timestamp(msg, sk, skb);
 
        serr = SKB_EXT_ERR(skb);
-       put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+       ee = serr->ee;
+       put_cmsg(msg, level, type, sizeof ee, &ee);
 
        msg->msg_flags |= MSG_ERRQUEUE;
        err = copied;
index ad704c757bb4a6e48c685fe0478ce6525b08a3ab..ca48aff690ba580c12914a98bf254e2bc69474aa 100644 (file)
@@ -9,26 +9,33 @@
 #include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
 
-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 
 int sock_diag_check_cookie(void *sk, __u32 *cookie)
 {
+#ifndef CONFIG_GRKERNSEC_HIDESYM
        if ((cookie[0] != INET_DIAG_NOCOOKIE ||
             cookie[1] != INET_DIAG_NOCOOKIE) &&
            ((u32)(unsigned long)sk != cookie[0] ||
             (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
                return -ESTALE;
        else
+#endif
                return 0;
 }
 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
 
 void sock_diag_save_cookie(void *sk, __u32 *cookie)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       cookie[0] = 0;
+       cookie[1] = 0;
+#else
        cookie[0] = (u32)(unsigned long)sk;
        cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+#endif
 }
 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
 
@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
        mutex_lock(&sock_diag_table_mutex);
        if (sock_diag_handlers[hndl->family])
                err = -EBUSY;
-       else
+       else {
+               pax_open_kernel();
                sock_diag_handlers[hndl->family] = hndl;
+               pax_close_kernel();
+       }
        mutex_unlock(&sock_diag_table_mutex);
 
        return err;
@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
 
        mutex_lock(&sock_diag_table_mutex);
        BUG_ON(sock_diag_handlers[family] != hnld);
+       pax_open_kernel();
        sock_diag_handlers[family] = NULL;
+       pax_close_kernel();
        mutex_unlock(&sock_diag_table_mutex);
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister);
index 31baba2a71ce15e49450f69dae81e7d3be1ff3f2..754e2e583479050e517cec13a2e791dba602f2e0 100644 (file)
@@ -25,6 +25,8 @@
 static int zero = 0;
 static int one = 1;
 static int ushort_max = USHRT_MAX;
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -34,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
 {
        unsigned int orig_size, size;
        int ret, i;
-       struct ctl_table tmp = {
+       ctl_table_no_const tmp = {
                .data = &size,
                .maxlen = sizeof(size),
                .mode = table->mode
@@ -202,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
                             void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        char id[IFNAMSIZ];
-       struct ctl_table tbl = {
+       ctl_table_no_const tbl = {
                .data = id,
                .maxlen = IFNAMSIZ,
        };
@@ -220,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
 static int proc_do_rss_key(struct ctl_table *table, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct ctl_table fake_table;
+       ctl_table_no_const fake_table;
        char buf[NETDEV_RSS_KEY_LEN * 3];
 
        snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "rmem_max",
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "wmem_default",
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "rmem_default",
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "dev_weight",
@@ -284,7 +286,7 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0444,
                .proc_handler   = proc_do_rss_key,
        },
-#ifdef CONFIG_BPF_JIT
+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
        {
                .procname       = "bpf_jit_enable",
                .data           = &bpf_jit_enable,
@@ -400,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
 
 static __net_init int sysctl_core_net_init(struct net *net)
 {
-       struct ctl_table *tbl;
+       ctl_table_no_const *tbl = NULL;
 
        net->core.sysctl_somaxconn = SOMAXCONN;
 
-       tbl = netns_core_table;
        if (!net_eq(net, &init_net)) {
-               tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
+               tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
                if (tbl == NULL)
                        goto err_dup;
 
@@ -416,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
                if (net->user_ns != &init_user_ns) {
                        tbl[0].procname = NULL;
                }
-       }
-
-       net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
+               net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
+       } else
+               net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
        if (net->core.sysctl_hdr == NULL)
                goto err_reg;
 
        return 0;
 
 err_reg:
-       if (tbl != netns_core_table)
-               kfree(tbl);
+       kfree(tbl);
 err_dup:
        return -ENOMEM;
 }
@@ -441,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
        kfree(tbl);
 }
 
-static __net_initdata struct pernet_operations sysctl_core_ops = {
+static __net_initconst struct pernet_operations sysctl_core_ops = {
        .init = sysctl_core_net_init,
        .exit = sysctl_core_net_exit,
 };
index 810228646de38f9fe26eb2c75a84fbc000840f7b..a0c2755680d02ae16ca7107ae5640db0e5ac089d 100644 (file)
@@ -466,6 +466,7 @@ static struct proto dn_proto = {
        .sysctl_rmem            = sysctl_decnet_rmem,
        .max_header             = DN_MAX_NSP_DATA_HEADER + 64,
        .obj_size               = sizeof(struct dn_sock),
+       .slab_flags             = SLAB_USERCOPY,
 };
 
 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
index 4400da7739dafb3c3d42e52829086b9d3e418505..34299720c761ea865a51dc5bdf180b0ac3d4946b 100644 (file)
@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
                .extra1 = &min_t3,
                .extra2 = &max_t3
        },
-       {0}
+       { }
        },
 };
 
index 5325b541c526d9d5e92b7481302dde6ce5e7d65c..a0d4d69ff31f260674f3fcca372874e5ca660781 100644 (file)
@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
 
        if (len > *lenp) len = *lenp;
 
-       if (copy_to_user(buffer, addr, len))
+       if (len > sizeof addr || copy_to_user(buffer, addr, len))
                return -EFAULT;
 
        *lenp = len;
@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
 
        if (len > *lenp) len = *lenp;
 
-       if (copy_to_user(buffer, devname, len))
+       if (len > sizeof devname || copy_to_user(buffer, devname, len))
                return -EFAULT;
 
        *lenp = len;
index a2c7e4c0ac1ed8929f14786a09965663333f9619..3dc9f67a78380c5c57d736d9f74c040766b15d17 100644 (file)
@@ -102,7 +102,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
+static struct rtnl_link_ops hsr_link_ops = {
        .kind           = "hsr",
        .maxtype        = IFLA_HSR_MAX,
        .policy         = hsr_policy,
index 27eaa65e88e16ad51a70c33711f6f91d0e8fd61c..70832172c446f328b1dfdbfc8049cea47a5f441c 100644 (file)
@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
        dev_put(real_dev);
 }
 
-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+static struct rtnl_link_ops lowpan_link_ops = {
        .kind           = "lowpan",
        .priv_size      = sizeof(struct lowpan_dev_info),
        .setup          = lowpan_setup,
index 9d980ed3ffe2c5ed5d1683f9ee1a962751706e21..7d01e12356df6e56dd061a8f13262f65a9e1f4ee 100644 (file)
@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
 
 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
        struct netns_ieee802154_lowpan *ieee802154_lowpan =
                net_ieee802154_lowpan(net);
 
-       table = lowpan_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
+               table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
                                GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
                        table[0].procname = NULL;
-       }
-
-       hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
+               hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
+       } else
+               hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
        if (hdr == NULL)
                goto err_reg;
 
@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index a44773c8346c13c24535448f7e33105c894ac279..a6ae41542009cbb872286ab426baf1ce6f241b00 100644 (file)
@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
                return ip_recv_error(sk, msg, len, addr_len);
 #if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == AF_INET6)
-               return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
+               return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
 #endif
        return -EINVAL;
 }
index 214882e7d6deea0114531124689027dfdde83df6..ec032f695eef76a501eae1d6368cf8354d62334a 100644 (file)
@@ -69,7 +69,8 @@
 
 static struct ipv4_devconf ipv4_devconf = {
        .data = {
-               [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+               [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
+               [IPV4_DEVCONF_RP_FILTER - 1] = 1,
                [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
 
 static struct ipv4_devconf ipv4_devconf_dflt = {
        .data = {
-               [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+               [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
+               [IPV4_DEVCONF_RP_FILTER - 1] = 1,
                [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+               cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
                          net->dev_base_seq;
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+               cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
                          net->dev_base_seq;
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
        DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
 
-static struct devinet_sysctl_table {
+static const struct devinet_sysctl_table {
        struct ctl_table_header *sysctl_header;
        struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
 } devinet_sysctl = {
@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
        int err;
        struct ipv4_devconf *all, *dflt;
 #ifdef CONFIG_SYSCTL
-       struct ctl_table *tbl = ctl_forward_entry;
+       ctl_table_no_const *tbl = NULL;
        struct ctl_table_header *forw_hdr;
 #endif
 
@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
                        goto err_alloc_dflt;
 
 #ifdef CONFIG_SYSCTL
-               tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
+               tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
                if (tbl == NULL)
                        goto err_alloc_ctl;
 
@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
                goto err_reg_dflt;
 
        err = -ENOMEM;
-       forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
+       if (!net_eq(net, &init_net))
+               forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
+       else
+               forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
        if (forw_hdr == NULL)
                goto err_reg_ctl;
        net->ipv4.forw_hdr = forw_hdr;
@@ -2287,8 +2292,7 @@ err_reg_ctl:
 err_reg_dflt:
        __devinet_sysctl_unregister(all);
 err_reg_all:
-       if (tbl != ctl_forward_entry)
-               kfree(tbl);
+       kfree(tbl);
 err_alloc_ctl:
 #endif
        if (dflt != &ipv4_devconf_dflt)
index 23104a3f29245abeb682089185bd103ae4794cb2..9f5570bbbf818806cbc21fe2db36cac421c410d2 100644 (file)
@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                fib_sync_up(dev);
 #endif
-               atomic_inc(&net->ipv4.dev_addr_genid);
+               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
                rt_cache_flush(dev_net(dev));
                break;
        case NETDEV_DOWN:
                fib_del_ifaddr(ifa, NULL);
-               atomic_inc(&net->ipv4.dev_addr_genid);
+               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
                if (ifa->ifa_dev->ifa_list == NULL) {
                        /* Last address was deleted from this interface.
                         * Disable IP.
@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
                fib_sync_up(dev);
 #endif
-               atomic_inc(&net->ipv4.dev_addr_genid);
+               atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
                rt_cache_flush(net);
                break;
        case NETDEV_DOWN:
index f99f41bd15b83072d7a67cd452a29ce550e0593e..1879da92c21c014c32f3529fe57bac75fd8eb0a9 100644 (file)
@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
        nh->nh_saddr = inet_select_addr(nh->nh_dev,
                                        nh->nh_gw,
                                        nh->nh_parent->fib_scope);
-       nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+       nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
 
        return nh->nh_saddr;
 }
index e34dccbc4d70bd26f7cec88c031dcea31124314a..4eeba4e497a02b2945fd61fba8ee10bf5731a861 100644 (file)
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
        mutex_unlock(&inet_diag_table_mutex);
 }
 
+static size_t inet_sk_attr_size(void)
+{
+       return    nla_total_size(sizeof(struct tcp_info))
+               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1) /* INET_DIAG_TOS */
+               + nla_total_size(1) /* INET_DIAG_TCLASS */
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
+               + nla_total_size(sizeof(struct inet_diag_msg))
+               + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+               + nla_total_size(TCP_CA_NAME_MAX)
+               + nla_total_size(sizeof(struct tcpvegas_info))
+               + 64;
+}
+
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,                   
@@ -324,9 +338,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
        if (err)
                goto out;
 
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) +
-                       sizeof(struct tcp_info) + 64, GFP_KERNEL);
+       rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
        if (!rep) {
                err = -ENOMEM;
                goto out;
index 9111a4e221557173df0ce08e95632ee059d00b61..357690540287dd16b3e929c6ab86a885b298fc9a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/security.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
        return inet_ehashfn(net, laddr, lport, faddr, fport);
 }
 
+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
+
 /*
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
@@ -554,6 +557,8 @@ ok:
                        twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
                spin_unlock(&head->lock);
 
+               gr_update_task_in_ip_table(inet_sk(sk));
+
                if (tw) {
                        inet_twsk_deschedule(tw, death_row);
                        while (twrefcnt) {
index 241afd743d2ccfda93bfb42b928020f2d9dc0486..31b95d513c20588be49b00f2ee559768e814fdd9 100644 (file)
@@ -461,7 +461,7 @@ relookup:
        if (p) {
                p->daddr = *daddr;
                atomic_set(&p->refcnt, 1);
-               atomic_set(&p->rid, 0);
+               atomic_set_unchecked(&p->rid, 0);
                p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
                p->rate_tokens = 0;
                /* 60*HZ is arbitrary, but chosen enough high so that the first
index 145a50c4d56630a5fc97283d85c3fa29e10ab476..5dd8cc5fbd2ae6f548af94a03969523bb8b5bb72 100644 (file)
@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
                return 0;
 
        start = qp->rid;
-       end = atomic_inc_return(&peer->rid);
+       end = atomic_inc_return_unchecked(&peer->rid);
        qp->rid = end;
 
        rc = qp->q.fragments && (end - start) > max;
@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
 
 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
 
-       table = ip4_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
+               table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
 
@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
                        table[0].procname = NULL;
-       }
+               hdr = register_net_sysctl(net, "net/ipv4", table);
+       } else
+               hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
 
-       hdr = register_net_sysctl(net, "net/ipv4", table);
        if (hdr == NULL)
                goto err_reg;
 
@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index 4f4bf5b99686ee8f13cfd353b7dca307f9260dbc..2c936fe735e40d7e7dd9e703534f5ef9b30bba6a 100644 (file)
@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static struct rtnl_link_ops ipgre_link_ops;
 static int ipgre_tunnel_init(struct net_device *dev);
 
 static int ipgre_net_id __read_mostly;
@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
        [IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
 };
 
-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
+static struct rtnl_link_ops ipgre_link_ops = {
        .kind           = "gre",
        .maxtype        = IFLA_GRE_MAX,
        .policy         = ipgre_policy,
@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
        .fill_info      = ipgre_fill_info,
 };
 
-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
+static struct rtnl_link_ops ipgre_tap_ops = {
        .kind           = "gretap",
        .maxtype        = IFLA_GRE_MAX,
        .policy         = ipgre_policy,
index 3d4da2c16b6a3c6d6bc41d8fec0ed182037b0801..40f9c295f2b30388c9ac940345a841bf1fd7e710 100644 (file)
 #include <linux/mroute.h>
 #include <linux/netlink.h>
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 /*
  *     Process Router Attention IP option (RFC 2113)
  */
@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                        if (!raw) {
                                if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                                        IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+                                       if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
+#endif
                                        icmp_send(skb, ICMP_DEST_UNREACH,
                                                  ICMP_PROT_UNREACH, 0);
                                }
index 6b85adb05003cb775b538aacb81a37ec398d8b3c..cd7e5d3586ce33d48460eaaa00c48b90f87577d4 100644 (file)
@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                len = min_t(unsigned int, len, opt->optlen);
                if (put_user(len, optlen))
                        return -EFAULT;
-               if (copy_to_user(optval, opt->__data, len))
+               if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
+                   copy_to_user(optval, opt->__data, len))
                        return -EFAULT;
                return 0;
        }
@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                if (sk->sk_type != SOCK_STREAM)
                        return -ENOPROTOOPT;
 
-               msg.msg_control = (__force void *) optval;
+               msg.msg_control = (__force_kernel void *) optval;
                msg.msg_controllen = len;
                msg.msg_flags = flags;
 
index 1a7e979e80ba356f685ecfe020b98855f19db0a3..fd05aa4ef4a62b97f51cbb451f6af3745dcb71b9 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
-static struct rtnl_link_ops vti_link_ops __read_mostly;
+static struct rtnl_link_ops vti_link_ops;
 
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
        [IFLA_VTI_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
 };
 
-static struct rtnl_link_ops vti_link_ops __read_mostly = {
+static struct rtnl_link_ops vti_link_ops = {
        .kind           = "vti",
        .maxtype        = IFLA_VTI_MAX,
        .policy         = vti_policy,
index 7fa18bc7e47fd3199c18247f19390d61f6e83013..bea16aff03b251ac6649ffe1d417b4f4d1e23e3e 100644 (file)
@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
 
        mm_segment_t oldfs = get_fs();
        set_fs(get_ds());
-       res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
+       res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
        set_fs(oldfs);
        return res;
 }
@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
 
        mm_segment_t oldfs = get_fs();
        set_fs(get_ds());
-       res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
+       res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
        set_fs(oldfs);
        return res;
 }
@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
 
        mm_segment_t oldfs = get_fs();
        set_fs(get_ds());
-       res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
+       res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
        set_fs(oldfs);
        return res;
 }
index 40403114f00a115a6726e0a1b322aa01ca4db0b0..c35c647efdb45c466e941bfddc3a07b4749a3efd 100644 (file)
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 static int ipip_net_id __read_mostly;
 
 static int ipip_tunnel_init(struct net_device *dev);
-static struct rtnl_link_ops ipip_link_ops __read_mostly;
+static struct rtnl_link_ops ipip_link_ops;
 
 static int ipip_err(struct sk_buff *skb, u32 info)
 {
@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_ENCAP_DPORT]        = { .type = NLA_U16 },
 };
 
-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
+static struct rtnl_link_ops ipip_link_ops = {
        .kind           = "ipip",
        .maxtype        = IFLA_IPTUN_MAX,
        .policy         = ipip_policy,
index f95b6f93814b95b2c810eff8d4573a996f9a9f63..2ee209760adcc32aef8b68c8dfd8c2a2f5851f44 100644 (file)
@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct arpt_getinfo)) {
-               duprintf("length %u != %Zu\n", *len,
+       if (len != sizeof(struct arpt_getinfo)) {
+               duprintf("length %u != %Zu\n", len,
                         sizeof(struct arpt_getinfo));
                return -EINVAL;
        }
@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
                info.size = private->size;
                strcpy(info.name, name);
 
-               if (copy_to_user(user, &info, *len) != 0)
+               if (copy_to_user(user, &info, len) != 0)
                        ret = -EFAULT;
                else
                        ret = 0;
@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
 
        switch (cmd) {
        case ARPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
+               ret = get_info(sock_net(sk), user, *len, 1);
                break;
        case ARPT_SO_GET_ENTRIES:
                ret = compat_get_entries(sock_net(sk), user, len);
@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
 
        switch (cmd) {
        case ARPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, *len, 0);
                break;
 
        case ARPT_SO_GET_ENTRIES:
index 99e810f84671bbdb80e33c6915cc7be49ba0f1bb..3711b818c2297fd5a7472a2d549f4d018a5e7754 100644 (file)
@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct ipt_getinfo)) {
-               duprintf("length %u != %zu\n", *len,
+       if (len != sizeof(struct ipt_getinfo)) {
+               duprintf("length %u != %zu\n", len,
                         sizeof(struct ipt_getinfo));
                return -EINVAL;
        }
@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
                info.size = private->size;
                strcpy(info.name, name);
 
-               if (copy_to_user(user, &info, *len) != 0)
+               if (copy_to_user(user, &info, len) != 0)
                        ret = -EFAULT;
                else
                        ret = 0;
@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
+               ret = get_info(sock_net(sk), user, *len, 1);
                break;
        case IPT_SO_GET_ENTRIES:
                ret = compat_get_entries(sock_net(sk), user, len);
@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, *len, 0);
                break;
 
        case IPT_SO_GET_ENTRIES:
index e90f83a3415b464014cecf0f072b2188f6889b7a..3e6accaf64fb699fc46f1088c1b90449cb3b8a41 100644 (file)
@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
        spin_lock_init(&cn->lock);
 
 #ifdef CONFIG_PROC_FS
-       cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
+       cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
        if (!cn->procdir) {
                pr_err("Unable to proc dir entry\n");
                return -ENOMEM;
index 0ae28f517a9b5354e51f64dd8fc116023b323de4..d32b56559fb8eb6726dcc87716de1a27ef525c9e 100644 (file)
@@ -59,7 +59,7 @@ struct ping_table {
 };
 
 static struct ping_table ping_table;
-struct pingv6_ops pingv6_ops;
+struct pingv6_ops *pingv6_ops;
 EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                                return -ENODEV;
                        }
                }
-               has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
+               has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
                                                    scoped);
                rcu_read_unlock();
 
@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
                }
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
-               harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
+               harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
 #endif
        }
 
@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
                                      info, (u8 *)icmph);
 #if IS_ENABLED(CONFIG_IPV6)
                } else if (family == AF_INET6) {
-                       pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
+                       pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
                                                   info, (u8 *)icmph);
 #endif
                }
@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                }
 
                if (inet6_sk(sk)->rxopt.all)
-                       pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
+                       pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
                if (skb->protocol == htons(ETH_P_IPV6) &&
                    inet6_sk(sk)->rxopt.all)
-                       pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
+                       pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
                else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
                        ip_cmsg_recv(msg, skb);
 #endif
@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
                0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
-               atomic_read(&sp->sk_drops));
+               atomic_read_unchecked(&sp->sk_drops));
 }
 
 static int ping_v4_seq_show(struct seq_file *seq, void *v)
index 0bb68df5055d2d3f92cb06e829a997b44b512d65..59405fc367ec6ed1b2dc4e66d652264417de4f3d 100644 (file)
@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 int raw_rcv(struct sock *sk, struct sk_buff *skb)
 {
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
 
 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
 {
+       struct icmp_filter filter;
+
        if (optlen > sizeof(struct icmp_filter))
                optlen = sizeof(struct icmp_filter);
-       if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
+       if (copy_from_user(&filter, optval, optlen))
                return -EFAULT;
+       raw_sk(sk)->filter = filter;
        return 0;
 }
 
 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
 {
        int len, ret = -EFAULT;
+       struct icmp_filter filter;
 
        if (get_user(len, optlen))
                goto out;
@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
        if (len > sizeof(struct icmp_filter))
                len = sizeof(struct icmp_filter);
        ret = -EFAULT;
-       if (put_user(len, optlen) ||
-           copy_to_user(optval, &raw_sk(sk)->filter, len))
+       filter = raw_sk(sk)->filter;
+       if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
                goto out;
        ret = 0;
 out:   return ret;
@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                0, 0L, 0,
                from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                0, sock_i_ino(sp),
-               atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+               atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
 }
 
 static int raw_seq_show(struct seq_file *seq, void *v)
index 52e1f2bf0ca2ff2fe3d85086465068da4bbb43f2..e736cb47d9d9afe8c378677b123bee69e9786e43 100644 (file)
@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
 
 static int rt_cache_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &rt_cache_seq_ops);
+       return seq_open_restrict(file, &rt_cache_seq_ops);
 }
 
 static const struct file_operations rt_cache_seq_fops = {
@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
 
 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &rt_cpu_seq_ops);
+       return seq_open_restrict(file, &rt_cpu_seq_ops);
 }
 
 static const struct file_operations rt_cpu_seq_fops = {
@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
 
 static int rt_acct_proc_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, rt_acct_proc_show, NULL);
+       return single_open_restrict(file, rt_acct_proc_show, NULL);
 }
 
 static const struct file_operations rt_acct_proc_fops = {
@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 
 #define IP_IDENTS_SZ 2048u
 struct ip_ident_bucket {
-       atomic_t        id;
+       atomic_unchecked_t      id;
        u32             stamp32;
 };
 
-static struct ip_ident_bucket *ip_idents __read_mostly;
+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
 
 /* In order to protect privacy, we add a perturbation to identifiers
  * if one generator is seldom used. This makes hard for an attacker
@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
        if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
                delta = prandom_u32_max(now - old);
 
-       return atomic_add_return(segs + delta, &bucket->id) - segs;
+       return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0200,
                .proc_handler   = ipv4_sysctl_rtcache_flush,
+               .extra1         = &init_net,
        },
        { },
 };
 
 static __net_init int sysctl_route_net_init(struct net *net)
 {
-       struct ctl_table *tbl;
+       ctl_table_no_const *tbl = NULL;
 
-       tbl = ipv4_route_flush_table;
        if (!net_eq(net, &init_net)) {
-               tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+               tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
                if (tbl == NULL)
                        goto err_dup;
 
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
                        tbl[0].procname = NULL;
-       }
-       tbl[0].extra1 = net;
+               tbl[0].extra1 = net;
+               net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
+       } else
+               net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
 
-       net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
        if (net->ipv4.route_hdr == NULL)
                goto err_reg;
        return 0;
 
 err_reg:
-       if (tbl != ipv4_route_flush_table)
-               kfree(tbl);
+       kfree(tbl);
 err_dup:
        return -ENOMEM;
 }
@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
 
 static __net_init int rt_genid_init(struct net *net)
 {
-       atomic_set(&net->ipv4.rt_genid, 0);
-       atomic_set(&net->fnhe_genid, 0);
+       atomic_set_unchecked(&net->ipv4.rt_genid, 0);
+       atomic_set_unchecked(&net->fnhe_genid, 0);
        get_random_bytes(&net->ipv4.dev_addr_genid,
                         sizeof(net->ipv4.dev_addr_genid));
        return 0;
@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
 {
        int rc = 0;
 
-       ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
-       if (!ip_idents)
-               panic("IP: failed to allocate ip_idents\n");
-
-       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
+       prandom_bytes(ip_idents, sizeof(ip_idents));
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
index e0ee384a448fb0e6eb5b957d98dbcb272ea97edb..e2688d9154c34339389247bca7ed87859c0426b7 100644 (file)
@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
-       struct ctl_table tmp = {
+       ctl_table_no_const tmp = {
                .data = &range,
                .maxlen = sizeof(range),
                .mode = table->mode,
@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        int ret;
        gid_t urange[2];
        kgid_t low, high;
-       struct ctl_table tmp = {
+       ctl_table_no_const tmp = {
                .data = &urange,
                .maxlen = sizeof(urange),
                .mode = table->mode,
@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
                                       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        char val[TCP_CA_NAME_MAX];
-       struct ctl_table tbl = {
+       ctl_table_no_const tbl = {
                .data = val,
                .maxlen = TCP_CA_NAME_MAX,
        };
@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
                                                 void __user *buffer, size_t *lenp,
                                                 loff_t *ppos)
 {
-       struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
+       ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
        int ret;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
                                           void __user *buffer, size_t *lenp,
                                           loff_t *ppos)
 {
-       struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
+       ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
        int ret;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
                                 void __user *buffer, size_t *lenp,
                                 loff_t *ppos)
 {
-       struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
        int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
 
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
 
-       table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
                int i;
 
-               table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
+               table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
 
@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+       if (!net_eq(net, &init_net))
+               net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+       else
+               net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
 
index 075ab4d5af5e46e7a5a177a324228df592cbe5e3..623bb9d88bdd0105fabffb38e81177bdfb23f337 100644 (file)
@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
         * without any lock. We want to make sure compiler wont store
         * intermediate values in this location.
         */
-       ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+       ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
                                                sk->sk_max_pacing_rate);
 }
 
@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
  * simplifies code)
  */
 static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
             struct sk_buff *head, struct sk_buff *tail,
             u32 start, u32 end)
 {
@@ -5506,6 +5506,7 @@ discard:
            tcp_paws_reject(&tp->rx_opt, 0))
                goto discard_and_undo;
 
+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
        if (th->syn) {
                /* We see SYN without ACK. It is attempt of
                 * simultaneous connect with crossed SYNs.
@@ -5556,6 +5557,7 @@ discard:
                goto discard;
 #endif
        }
+#endif
        /* "fifth, if neither of the SYN or RST bits is set then
         * drop the segment and return."
         */
@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
 
                if (th->syn) {
-                       if (th->fin)
+                       if (th->fin || th->urg || th->psh)
                                goto discard;
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
index d22f54482babf8bbd41972596d01326e4b06f060..62f6787ae41a5bdd477f9ed0ed185e2e94d3d54e 100644 (file)
@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
 int sysctl_tcp_low_latency __read_mostly;
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 #ifdef CONFIG_TCP_MD5SIG
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                               __be32 daddr, __be32 saddr, const struct tcphdr *th);
@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 
 reset:
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+       if (!grsec_enable_blackhole)
+#endif
        tcp_v4_send_reset(rsk, skb);
 discard:
        kfree_skb(skb);
@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
        TCP_SKB_CB(skb)->sacked  = 0;
 
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
-       if (!sk)
+       if (!sk) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               ret = 1;
+#endif
                goto no_tcp_socket;
-
+       }
 process:
-       if (sk->sk_state == TCP_TIME_WAIT)
+       if (sk->sk_state == TCP_TIME_WAIT) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               ret = 2;
+#endif
                goto do_time_wait;
+       }
 
        if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
@@ -1698,6 +1712,10 @@ csum_error:
 bad_packet:
                TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
        } else {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               if (!grsec_enable_blackhole || (ret == 1 &&
+                   (skb->dev->flags & IFF_LOOPBACK)))
+#endif
                tcp_v4_send_reset(NULL, skb);
        }
 
index 63d2680b65db36c93737f8c72df66263dfde06bf..2db9d6bbbd509106ad76b184ea8ff608f7a8f835 100644 (file)
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 int sysctl_tcp_syncookies __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_syncookies);
 
@@ -739,7 +743,10 @@ embryonic_reset:
                 * avoid becoming vulnerable to outside attack aiming at
                 * resetting legit local connections.
                 */
-               req->rsk_ops->send_reset(sk, skb);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               if (!grsec_enable_blackhole)
+#endif
+                       req->rsk_ops->send_reset(sk, skb);
        } else if (fastopen) { /* received a valid RST pkt */
                reqsk_fastopen_remove(sk, req, true);
                tcp_reset(sk);
index ebf5ff57526eab65018005c47a907545e61cacf6..4d1ff32deb50dcbd06a4eec1c6f4d617785c840c 100644 (file)
@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
                if (cnt + width >= len)
                        break;
 
-               if (copy_to_user(buf + cnt, tbuf, width))
+               if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
                        return -EFAULT;
                cnt += width;
        }
index 1829c7fbc77e4ded47744a60e0b55c11c635e82d..c0b3d5203e80d82f927397dacc5f87da73ab8364 100644 (file)
 #include <linux/gfp.h>
 #include <net/tcp.h>
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_lastack_retries;
+#endif
+
 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
                }
        }
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+       if ((sk->sk_state == TCP_LAST_ACK) &&
+           (grsec_lastack_retries > 0) &&
+           (grsec_lastack_retries < retry_until))
+               retry_until = grsec_lastack_retries;
+#endif
+
        if (retransmits_timed_out(sk, retry_until,
                                  syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
                /* Has it gone just too far? */
index 13b4dcf86ef610d1fcc1b26f7f69f5a6bbd31686..b866a2aae861bd19eb092833c675c3684e4e639a 100644 (file)
@@ -87,6 +87,7 @@
 #include <linux/types.h>
 #include <linux/fcntl.h>
 #include <linux/module.h>
+#include <linux/security.h>
 #include <linux/socket.h>
 #include <linux/sockios.h>
 #include <linux/igmp.h>
 #include <net/busy_poll.h>
 #include "udp_impl.h"
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 struct udp_table udp_table __read_mostly;
 EXPORT_SYMBOL(udp_table);
 
@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
        return true;
 }
 
+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                dport = usin->sin_port;
                if (dport == 0)
                        return -EINVAL;
+
+               err = gr_search_udp_sendmsg(sk, usin);
+               if (err)
+                       return err;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
+
+               err = gr_search_udp_sendmsg(sk, NULL);
+               if (err)
+                       return err;
+
                daddr = inet->inet_daddr;
                dport = inet->inet_dport;
                /* Open fast path for connected socket.
@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
                                 IS_UDPLITE(sk));
                UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
                                 IS_UDPLITE(sk));
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                __skb_unlink(skb, rcvq);
                __skb_queue_tail(&list_kill, skb);
        }
@@ -1275,6 +1292,10 @@ try_again:
        if (!skb)
                goto out;
 
+       err = gr_search_udp_recvmsg(sk, skb);
+       if (err)
+               goto out_free;
+
        ulen = skb->len - sizeof(struct udphdr);
        copied = len;
        if (copied > ulen)
@@ -1307,7 +1328,7 @@ try_again:
        if (unlikely(err)) {
                trace_kfree_skb(skb, udp_recvmsg);
                if (!peeked) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        UDP_INC_STATS_USER(sock_net(sk),
                                           UDP_MIB_INERRORS, is_udplite);
                }
@@ -1605,7 +1626,7 @@ csum_error:
        UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       atomic_inc(&sk->sk_drops);
+       atomic_inc_unchecked(&sk->sk_drops);
        kfree_skb(skb);
        return -1;
 }
@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
                if (!skb1) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
                                         IS_UDPLITE(sk));
                        UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                goto csum_error;
 
        UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+       if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
+#endif
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
        /*
@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
                from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
                0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
-               atomic_read(&sp->sk_drops));
+               atomic_read_unchecked(&sp->sk_drops));
 }
 
 int udp4_seq_show(struct seq_file *seq, void *v)
index 6156f68a1e90b53f7504a1e6f729b60c29d52b3a..d6ab46dbeec09d21292b216ec42478bc1dd6adf2 100644 (file)
@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
        fl4->flowi4_tos = iph->tos;
 }
 
-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
+static int xfrm4_garbage_collect(struct dst_ops *ops)
 {
        struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
 
-       xfrm4_policy_afinfo.garbage_collect(net);
+       xfrm_garbage_collect_deferred(net);
        return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
 }
 
@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
 
 static int __net_init xfrm4_net_init(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
 
-       table = xfrm4_policy_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
+               table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
                if (!table)
                        goto err_alloc;
 
                table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
-       }
-
-       hdr = register_net_sysctl(net, "net/ipv4", table);
+               hdr = register_net_sysctl(net, "net/ipv4", table);
+       } else
+               hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
        if (!hdr)
                goto err_reg;
 
@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index dac94195a4b95efa31d78f125a877d9b98db9a05..534fa31ae714c69816cbfc81007bcab9cf9227ee 100644 (file)
@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
        .mtu6                   = IPV6_MIN_MTU,
        .accept_ra              = 1,
-       .accept_redirects       = 1,
+       .accept_redirects       = 0,
        .autoconf               = 1,
        .force_mld_version      = 0,
        .mldv1_unsolicited_report_interval = 10 * HZ,
@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .hop_limit              = IPV6_DEFAULT_HOPLIMIT,
        .mtu6                   = IPV6_MIN_MTU,
        .accept_ra              = 1,
-       .accept_redirects       = 1,
+       .accept_redirects       = 0,
        .autoconf               = 1,
        .force_mld_version      = 0,
        .mldv1_unsolicited_report_interval = 10 * HZ,
@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
                idx = 0;
                head = &net->dev_index_head[h];
                rcu_read_lock();
-               cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
+               cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
                          net->dev_base_seq;
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
                p.iph.ihl = 5;
                p.iph.protocol = IPPROTO_IPV6;
                p.iph.ttl = 64;
-               ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
+               ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
 
                if (ops->ndo_do_ioctl) {
                        mm_segment_t oldfs = get_fs();
@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
        .release        = seq_release_net,
 };
 
+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
+extern void unregister_ipv6_seq_ops_addr(void);
+
 static int __net_init if6_proc_net_init(struct net *net)
 {
-       if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
+       register_ipv6_seq_ops_addr(&if6_seq_ops);
+       if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
+               unregister_ipv6_seq_ops_addr();
                return -ENOMEM;
+       }
        return 0;
 }
 
 static void __net_exit if6_proc_net_exit(struct net *net)
 {
        remove_proc_entry("if_inet6", net->proc_net);
+       unregister_ipv6_seq_ops_addr();
 }
 
 static struct pernet_operations if6_proc_net_ops = {
@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
        s_ip_idx = ip_idx = cb->args[2];
 
        rcu_read_lock();
-       cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
+       cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
                head = &net->dev_index_head[h];
@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                rt_genid_bump_ipv6(net);
                break;
        }
-       atomic_inc(&net->ipv6.dev_addr_genid);
+       atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
 }
 
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
-       struct ctl_table lctl;
+       ctl_table_no_const lctl;
        int ret;
 
        /*
@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
-       struct ctl_table lctl;
+       ctl_table_no_const lctl;
        int ret;
 
        /*
index e8c4400f23e9b4afe47fcc45bb761d82354fb6a6..a4cd5daeecc9d787cb5cbd277c741f9e8840c690 100644 (file)
@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
        net->ipv6.sysctl.auto_flowlabels = 0;
-       atomic_set(&net->ipv6.fib6_sernum, 1);
+       atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
 
        err = ipv6_init_mibs(net);
        if (err)
index 49f5e73db1224549c7f3fc156209a85ed6ce4056..ae02d548c4d5d8ede9f2c5fc1029e7b0d4a856b8 100644 (file)
@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
                   0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
-                  atomic_read(&sp->sk_drops));
+                  atomic_read_unchecked(&sp->sk_drops));
 }
index d674152b6ede5d6134f1a89cb8ebaa6a81308c73..fb5a01db9f4330485aa06d5c3e7df699d1c83d66 100644 (file)
@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
 
 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(ipv6_icmp_table_template,
                        sizeof(ipv6_icmp_table_template),
index f1c6d5e9832253f15683fca6417255cee78618af..faabef6bd4f27998f893ec26a9da05ddf6cad26a 100644 (file)
@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
        int new, old;
 
        do {
-               old = atomic_read(&net->ipv6.fib6_sernum);
+               old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
                new = old < INT_MAX ? old + 1 : 1;
-       } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
+       } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
                                old, new) != old);
        return new;
 }
index 01ccc28a686f8cd6bc2b6c69b6d48548b627fbdc..66861c7e3b101921d3fc748b36b2e9e5e94f586f 100644 (file)
@@ -71,8 +71,8 @@ struct ip6gre_net {
        struct net_device *fb_tunnel_dev;
 };
 
-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
+static struct rtnl_link_ops ip6gre_link_ops;
+static struct rtnl_link_ops ip6gre_tap_ops;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
 }
 
 
-static struct inet6_protocol ip6gre_protocol __read_mostly = {
+static struct inet6_protocol ip6gre_protocol = {
        .handler     = ip6gre_rcv,
        .err_handler = ip6gre_err,
        .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
        [IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
 };
 
-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+static struct rtnl_link_ops ip6gre_link_ops = {
        .kind           = "ip6gre",
        .maxtype        = IFLA_GRE_MAX,
        .policy         = ip6gre_policy,
@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
        .fill_info      = ip6gre_fill_info,
 };
 
-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+static struct rtnl_link_ops ip6gre_tap_ops = {
        .kind           = "ip6gretap",
        .maxtype        = IFLA_GRE_MAX,
        .policy         = ip6gre_policy,
index 92b3da571980670e4f343a278eec49521057be1e..77837b88c64e56665473e94cb271a86915613107 100644 (file)
@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
 
 static int ip6_tnl_dev_init(struct net_device *dev);
 static void ip6_tnl_dev_setup(struct net_device *dev);
-static struct rtnl_link_ops ip6_link_ops __read_mostly;
+static struct rtnl_link_ops ip6_link_ops;
 
 static int ip6_tnl_net_id __read_mostly;
 struct ip6_tnl_net {
@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_PROTO]              = { .type = NLA_U8 },
 };
 
-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
+static struct rtnl_link_ops ip6_link_ops = {
        .kind           = "ip6tnl",
        .maxtype        = IFLA_IPTUN_MAX,
        .policy         = ip6_tnl_policy,
index ace10d0b3aacb8e484e78f4f9a69113727ba3f65..97a8b49a23deaac83f23161a95fb02cf12140a63 100644 (file)
@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
 
 static int vti6_dev_init(struct net_device *dev);
 static void vti6_dev_setup(struct net_device *dev);
-static struct rtnl_link_ops vti6_link_ops __read_mostly;
+static struct rtnl_link_ops vti6_link_ops;
 
 static int vti6_net_id __read_mostly;
 struct vti6_net {
@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
        [IFLA_VTI_OKEY]         = { .type = NLA_U32 },
 };
 
-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+static struct rtnl_link_ops vti6_link_ops = {
        .kind           = "vti6",
        .maxtype        = IFLA_VTI_MAX,
        .policy         = vti6_policy,
index 66980d8d98d1f5b3ef7a50dc33cb9b617f25604d..8aef0d1c49b0ffa579cfdd780f83c1cb40d73644 100644 (file)
@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                if (sk->sk_type != SOCK_STREAM)
                        return -ENOPROTOOPT;
 
-               msg.msg_control = optval;
+               msg.msg_control = (void __force_kernel *)optval;
                msg.msg_controllen = len;
                msg.msg_flags = flags;
 
index e080fbbbc0e5ce8d4d71014eed149e6b34250b62..412b3cfaaf6ef9a4c50777ce9582c318ff42d152 100644 (file)
@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
        int ret;
 
-       if (*len != sizeof(struct ip6t_getinfo)) {
-               duprintf("length %u != %zu\n", *len,
+       if (len != sizeof(struct ip6t_getinfo)) {
+               duprintf("length %u != %zu\n", len,
                         sizeof(struct ip6t_getinfo));
                return -EINVAL;
        }
@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
                info.size = private->size;
                strcpy(info.name, name);
 
-               if (copy_to_user(user, &info, *len) != 0)
+               if (copy_to_user(user, &info, len) != 0)
                        ret = -EFAULT;
                else
                        ret = 0;
@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IP6T_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
+               ret = get_info(sock_net(sk), user, *len, 1);
                break;
        case IP6T_SO_GET_ENTRIES:
                ret = compat_get_entries(sock_net(sk), user, len);
@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IP6T_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, *len, 0);
                break;
 
        case IP6T_SO_GET_ENTRIES:
index 6f187c8d8a1bdf4ab27ec71ea05e865fcf782371..34b367f946aa6776a921e5394a6ec90dfa56361f 100644 (file)
@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
 
 static int nf_ct_frag6_sysctl_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
 
-       table = nf_ct_frag6_sysctl_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
+               table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
                                GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
                table[2].data = &net->nf_frag.frags.high_thresh;
                table[2].extra1 = &net->nf_frag.frags.low_thresh;
                table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
-       }
-
-       hdr = register_net_sysctl(net, "net/netfilter", table);
+               hdr = register_net_sysctl(net, "net/netfilter", table);
+       } else
+               hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
        if (hdr == NULL)
                goto err_reg;
 
@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index fe7e3e403499729e52f3f08b182fbec7db570e90..47aba96a6103f70665aa3bf2d82f786e8b1f7852 100644 (file)
@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
 };
 #endif
 
+static struct pingv6_ops real_pingv6_ops = {
+       .ipv6_recv_error                = ipv6_recv_error,
+       .ip6_datagram_recv_common_ctl   = ip6_datagram_recv_common_ctl,
+       .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
+       .icmpv6_err_convert             = icmpv6_err_convert,
+       .ipv6_icmp_error                = ipv6_icmp_error,
+       .ipv6_chk_addr                  = ipv6_chk_addr,
+};
+
+static struct pingv6_ops dummy_pingv6_ops = {
+       .ipv6_recv_error                = dummy_ipv6_recv_error,
+       .ip6_datagram_recv_common_ctl   = dummy_ip6_datagram_recv_ctl,
+       .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
+       .icmpv6_err_convert             = dummy_icmpv6_err_convert,
+       .ipv6_icmp_error                = dummy_ipv6_icmp_error,
+       .ipv6_chk_addr                  = dummy_ipv6_chk_addr,
+};
+
 int __init pingv6_init(void)
 {
 #ifdef CONFIG_PROC_FS
@@ -249,13 +267,7 @@ int __init pingv6_init(void)
        if (ret)
                return ret;
 #endif
-       pingv6_ops.ipv6_recv_error = ipv6_recv_error;
-       pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
-       pingv6_ops.ip6_datagram_recv_specific_ctl =
-               ip6_datagram_recv_specific_ctl;
-       pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
-       pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
-       pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
+       pingv6_ops = &real_pingv6_ops;
        return inet6_register_protosw(&pingv6_protosw);
 }
 
@@ -264,14 +276,9 @@ int __init pingv6_init(void)
  */
 void pingv6_exit(void)
 {
-       pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
-       pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
-       pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
-       pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
-       pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
-       pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
 #ifdef CONFIG_PROC_FS
        unregister_pernet_subsys(&ping_v6_net_ops);
 #endif
+       pingv6_ops = &dummy_pingv6_ops;
        inet6_unregister_protosw(&pingv6_protosw);
 }
index 679253d0af8427008f72832e0c1b61341de45bd6..70b653cf309839e082cf394abc5edfe147e3991c 100644 (file)
@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
        if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
                goto proc_snmp6_fail;
 
-       net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
+       net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
        if (!net->mib.proc_net_devsnmp6)
                goto proc_dev_snmp6_fail;
        return 0;
index ee25631f8c293db3db95a0992fa2b319872afb30..3c3ac5de1f659639ab55d3e639023891a9581f60 100644 (file)
@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
            skb_checksum_complete(skb)) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
        struct raw6_sock *rp = raw6_sk(sk);
 
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
-               atomic_inc(&sk->sk_drops);
+               atomic_inc_unchecked(&sk->sk_drops);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (inet->hdrincl) {
                if (skb_checksum_complete(skb)) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        kfree_skb(skb);
                        return NET_RX_DROP;
                }
@@ -609,7 +609,7 @@ out:
        return err;
 }
 
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
                        struct flowi6 *fl6, struct dst_entry **dstp,
                        unsigned int flags)
 {
@@ -916,12 +916,15 @@ do_confirm:
 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
                               char __user *optval, int optlen)
 {
+       struct icmp6_filter filter;
+
        switch (optname) {
        case ICMPV6_FILTER:
                if (optlen > sizeof(struct icmp6_filter))
                        optlen = sizeof(struct icmp6_filter);
-               if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
+               if (copy_from_user(&filter, optval, optlen))
                        return -EFAULT;
+               raw6_sk(sk)->filter = filter;
                return 0;
        default:
                return -ENOPROTOOPT;
@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
                               char __user *optval, int __user *optlen)
 {
        int len;
+       struct icmp6_filter filter;
 
        switch (optname) {
        case ICMPV6_FILTER:
@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
                        len = sizeof(struct icmp6_filter);
                if (put_user(len, optlen))
                        return -EFAULT;
-               if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
+               filter = raw6_sk(sk)->filter;
+               if (len > sizeof filter || copy_to_user(optval, &filter, len))
                        return -EFAULT;
                return 0;
        default:
index d7d70e69973b7455fbf2b5cb88a09d428f9c45ea..bd5e9fc3fc88fc6fd227f042a4fd59e863d369b0 100644 (file)
@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
 
 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
 
-       table = ip6_frags_ns_ctl_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
+               table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
 
@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
                        table[0].procname = NULL;
-       }
+               hdr = register_net_sysctl(net, "net/ipv6", table);
+       } else
+               hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
 
-       hdr = register_net_sysctl(net, "net/ipv6", table);
        if (hdr == NULL)
                goto err_reg;
 
@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index 1528d8404cd20b89e49be5215cc309ef96fbcd3a..f3939608a7b1ae91429adce7d98e8d74b08f5f60 100644 (file)
@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
 
 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(ipv6_route_table_template,
                        sizeof(ipv6_route_table_template),
index cdbfe5af6187c5c604462b70b26a268f4041e289..e13eb31dba6adeb6c5fd111d02dde01306b794a4 100644 (file)
@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
 static void ipip6_dev_free(struct net_device *dev);
 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
                      __be32 *v4dst);
-static struct rtnl_link_ops sit_link_ops __read_mostly;
+static struct rtnl_link_ops sit_link_ops;
 
 static int sit_net_id __read_mostly;
 struct sit_net {
@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
                unregister_netdevice_queue(dev, head);
 }
 
-static struct rtnl_link_ops sit_link_ops __read_mostly = {
+static struct rtnl_link_ops sit_link_ops = {
        .kind           = "sit",
        .maxtype        = IFLA_IPTUN_MAX,
        .policy         = ipip6_policy,
index c5c10fafcfe2e068fe33adc8367206a16e40c7bf..2577d51c8f9c06e84e7fc518ba8752beabe2ed37 100644 (file)
@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
 
 static int __net_init ipv6_sysctl_net_init(struct net *net)
 {
-       struct ctl_table *ipv6_table;
+       ctl_table_no_const *ipv6_table;
        struct ctl_table *ipv6_route_table;
        struct ctl_table *ipv6_icmp_table;
        int err;
index 9c0b54e87b472390c080857f886a2af4a7a300f8..5e7bd8f806a96339ed7bc3c6ab09723a4af0124e 100644 (file)
@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        }
 }
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 static void tcp_v6_hash(struct sock *sk)
 {
        if (sk->sk_state != TCP_CLOSE) {
@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 
 reset:
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+       if (!grsec_enable_blackhole)
+#endif
        tcp_v6_send_reset(sk, skb);
 discard:
        if (opt_skb)
@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
 
        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
                                inet6_iif(skb));
-       if (!sk)
+       if (!sk) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               ret = 1;
+#endif
                goto no_tcp_socket;
+       }
 
 process:
-       if (sk->sk_state == TCP_TIME_WAIT)
+       if (sk->sk_state == TCP_TIME_WAIT) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               ret = 2;
+#endif
                goto do_time_wait;
+       }
 
        if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
@@ -1499,6 +1514,10 @@ csum_error:
 bad_packet:
                TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
        } else {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+               if (!grsec_enable_blackhole || (ret == 1 &&
+                   (skb->dev->flags & IFF_LOOPBACK)))
+#endif
                tcp_v6_send_reset(NULL, skb);
        }
 
index 189dc4ae3ecac1b140a7208c4b6de0b956e0b710..458bec0127f241774d0aed206c5a86a74dcd76fa 100644 (file)
@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
                               udp_ipv6_hash_secret + net_hash_mix(net));
 }
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
@@ -448,7 +452,7 @@ try_again:
        if (unlikely(err)) {
                trace_kfree_skb(skb, udpv6_recvmsg);
                if (!peeked) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        if (is_udp4)
                                UDP_INC_STATS_USER(sock_net(sk),
                                                   UDP_MIB_INERRORS,
@@ -714,7 +718,7 @@ csum_error:
        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-       atomic_inc(&sk->sk_drops);
+       atomic_inc_unchecked(&sk->sk_drops);
        kfree_skb(skb);
        return -1;
 }
@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
                if (likely(skb1 == NULL))
                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
                if (!skb1) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
                                          IS_UDPLITE(sk));
                        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                goto csum_error;
 
        UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+       if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
+#endif
        icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
        kfree_skb(skb);
index 48bf5a06847bd59db7834758b22aa9208d727940..691985a7f06a63f6859ff20ad1ce7468525c6c7a 100644 (file)
@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
        }
 }
 
-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
+static int xfrm6_garbage_collect(struct dst_ops *ops)
 {
        struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
 
-       xfrm6_policy_afinfo.garbage_collect(net);
+       xfrm_garbage_collect_deferred(net);
        return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
 }
 
@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
 
 static int __net_init xfrm6_net_init(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table = NULL;
        struct ctl_table_header *hdr;
 
-       table = xfrm6_policy_table;
        if (!net_eq(net, &init_net)) {
-               table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
+               table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
                if (!table)
                        goto err_alloc;
 
                table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
-       }
+               hdr = register_net_sysctl(net, "net/ipv6", table);
+       } else
+               hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
 
-       hdr = register_net_sysctl(net, "net/ipv6", table);
        if (!hdr)
                goto err_reg;
 
@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
        return 0;
 
 err_reg:
-       if (!net_eq(net, &init_net))
-               kfree(table);
+       kfree(table);
 err_alloc:
        return -ENOMEM;
 }
index c1d247ebe916685ba2d48985462e6592f12d4a5b..9e5949d06ef5ebca4f8b2827687df1674e39ab9e 100644 (file)
@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
        struct proc_dir_entry *p;
        int rc = -ENOMEM;
 
-       ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
+       ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
 
        if (!ipx_proc_dir)
                goto out;
index 4efe486baee62f849c2b878c5c6ca90a510209ab..dee966ee3221ced08d046eaac2cef5fb361262d8 100644 (file)
@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
        add_wait_queue(&port->open_wait, &wait);
 
        pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
-                __FILE__, __LINE__, tty->driver->name, port->count);
+                __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
        spin_lock_irqsave(&port->lock, flags);
-       port->count--;
+       atomic_dec(&port->count);
        port->blocked_open++;
        spin_unlock_irqrestore(&port->lock, flags);
 
@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
                }
 
                pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
-                        __FILE__, __LINE__, tty->driver->name, port->count);
+                        __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
                schedule();
        }
@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
 
        spin_lock_irqsave(&port->lock, flags);
        if (!tty_hung_up_p(filp))
-               port->count++;
+               atomic_inc(&port->count);
        port->blocked_open--;
        spin_unlock_irqrestore(&port->lock, flags);
 
        pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
-                __FILE__, __LINE__, tty->driver->name, port->count);
+                __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
        if (!retval)
                port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
 
        /* ++ is not atomic, so this should be protected - Jean II */
        spin_lock_irqsave(&self->port.lock, flags);
-       self->port.count++;
+       atomic_inc(&self->port.count);
        spin_unlock_irqrestore(&self->port.lock, flags);
        tty_port_tty_set(&self->port, tty);
 
        pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
-                self->line, self->port.count);
+                self->line, atomic_read(&self->port.count));
 
        /* Not really used by us, but lets do it anyway */
        self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
                tty_kref_put(port->tty);
        }
        port->tty = NULL;
-       port->count = 0;
+       atomic_set(&port->count, 0);
        spin_unlock_irqrestore(&port->lock, flags);
 
        wake_up_interruptible(&port->open_wait);
@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
        seq_putc(m, '\n');
 
        seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
-       seq_printf(m, "Open count: %d\n", self->port.count);
+       seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
        seq_printf(m, "Max data size: %d\n", self->max_data_size);
        seq_printf(m, "Max header size: %d\n", self->max_header_size);
 
index b9ac598e2116345a7b05365c74adf95bbe5abd35..f88cc5627cf922f146036deb14fad572ebe99dd7 100644 (file)
@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
 {
        int i;
 
-       proc_irda = proc_mkdir("irda", init_net.proc_net);
+       proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
        if (proc_irda == NULL)
                return;
 
index 2e9953b2db8402dd71c88691f263db00f2cba3e2..ed06350cf775e812985a73081d718c9a68c7f8da 100644 (file)
@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
 {
        char name[12];
 
-       sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+       sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
        while (__iucv_get_sock_by_name(name)) {
                sprintf(name, "%08x",
-                       atomic_inc_return(&iucv_sk_list.autobind_name));
+                       atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
        }
        memcpy(iucv->src_name, name, 8);
 }
index 2a6a1fdd62c059eb857312d10fee38fa1c6bc86f..6c112b0e0a14052a8c772d7f187b6dd8c6e38129 100644 (file)
@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata iucv_cpu_notifier = {
+static struct notifier_block iucv_cpu_notifier = {
        .notifier_call = iucv_cpu_notify,
 };
 
index f8ac939d52b4b83ce3720e274f33461347029661..1e189bf0629934ffd91987eda2ec56521ceaf186 100644 (file)
@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
 static u32 get_acqseq(void)
 {
        u32 res;
-       static atomic_t acqseq;
+       static atomic_unchecked_t acqseq;
 
        do {
-               res = atomic_inc_return(&acqseq);
+               res = atomic_inc_return_unchecked(&acqseq);
        } while (!res);
        return res;
 }
index 781b3a226ba73204aa9ff68923ae2385a88e7205..73a74342d620e19e72978a2752992f5b7a6c879a 100644 (file)
@@ -42,12 +42,12 @@ struct l2tp_eth {
        struct sock             *tunnel_sock;
        struct l2tp_session     *session;
        struct list_head        list;
-       atomic_long_t           tx_bytes;
-       atomic_long_t           tx_packets;
-       atomic_long_t           tx_dropped;
-       atomic_long_t           rx_bytes;
-       atomic_long_t           rx_packets;
-       atomic_long_t           rx_errors;
+       atomic_long_unchecked_t tx_bytes;
+       atomic_long_unchecked_t tx_packets;
+       atomic_long_unchecked_t tx_dropped;
+       atomic_long_unchecked_t rx_bytes;
+       atomic_long_unchecked_t rx_packets;
+       atomic_long_unchecked_t rx_errors;
 };
 
 /* via l2tp_session_priv() */
@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
 
        if (likely(ret == NET_XMIT_SUCCESS)) {
-               atomic_long_add(len, &priv->tx_bytes);
-               atomic_long_inc(&priv->tx_packets);
+               atomic_long_add_unchecked(len, &priv->tx_bytes);
+               atomic_long_inc_unchecked(&priv->tx_packets);
        } else {
-               atomic_long_inc(&priv->tx_dropped);
+               atomic_long_inc_unchecked(&priv->tx_dropped);
        }
        return NETDEV_TX_OK;
 }
@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
 {
        struct l2tp_eth *priv = netdev_priv(dev);
 
-       stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
-       stats->tx_packets = atomic_long_read(&priv->tx_packets);
-       stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
-       stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
-       stats->rx_packets = atomic_long_read(&priv->rx_packets);
-       stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+       stats->tx_bytes   = atomic_long_read_unchecked(&priv->tx_bytes);
+       stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
+       stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
+       stats->rx_bytes   = atomic_long_read_unchecked(&priv->rx_bytes);
+       stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
+       stats->rx_errors  = atomic_long_read_unchecked(&priv->rx_errors);
        return stats;
 }
 
@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        nf_reset(skb);
 
        if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
-               atomic_long_inc(&priv->rx_packets);
-               atomic_long_add(data_len, &priv->rx_bytes);
+               atomic_long_inc_unchecked(&priv->rx_packets);
+               atomic_long_add_unchecked(data_len, &priv->rx_bytes);
        } else {
-               atomic_long_inc(&priv->rx_errors);
+               atomic_long_inc_unchecked(&priv->rx_errors);
        }
        return;
 
 error:
-       atomic_long_inc(&priv->rx_errors);
+       atomic_long_inc_unchecked(&priv->rx_errors);
        kfree_skb(skb);
 }
 
index 1a3c7e0f5d0de3c1d35759e1b2ae1bfa7849af84..80f8b0cdd8f17ad7cd053a6c89ec9a180a973805 100644 (file)
@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
        int rc = -ENOMEM;
        struct proc_dir_entry *p;
 
-       llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
+       llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
        if (!llc_proc_dir)
                goto out;
 
index e75d5c53e97ba9cbd9640de2d4a3798d33711609..429fc9540a8890f94530331679249a3308a627ad 100644 (file)
@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
                        ret = ieee80211_vif_use_channel(sdata, chandef,
                                        IEEE80211_CHANCTX_EXCLUSIVE);
                }
-       } else if (local->open_count == local->monitors) {
+       } else if (local_read(&local->open_count) == local->monitors) {
                local->_oper_chandef = *chandef;
                ieee80211_hw_config(local, 0);
        }
@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
                else
                        local->probe_req_reg--;
 
-               if (!local->open_count)
+               if (!local_read(&local->open_count))
                        break;
 
                ieee80211_queue_work(&local->hw, &local->reconfig_filter);
@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
        if (chanctx_conf) {
                *chandef = sdata->vif.bss_conf.chandef;
                ret = 0;
-       } else if (local->open_count > 0 &&
-                  local->open_count == local->monitors &&
+       } else if (local_read(&local->open_count) > 0 &&
+                  local_read(&local->open_count) == local->monitors &&
                   sdata->vif.type == NL80211_IFTYPE_MONITOR) {
                if (local->use_chanctx)
                        *chandef = local->monitor_chandef;
index cc6e964d98370e8d07b640f6d8c01ddfbce2be1b..029a3a37342b8ebfdee5d3516a50ce26ce2f2fb7 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
 #include <net/mac80211.h>
+#include <asm/local.h>
 #include "key.h"
 #include "sta_info.h"
 #include "debug.h"
@@ -1114,7 +1115,7 @@ struct ieee80211_local {
        /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
        spinlock_t queue_stop_reason_lock;
 
-       int open_count;
+       local_t open_count;
        int monitors, cooked_mntrs;
        /* number of interfaces with corresponding FIF_ flags */
        int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
index 41735539087380c8e84735ef52efd1f2d265bbed..e3b5a3fa2cabeae4d47401ef06c84c4cfee16ea5 100644 (file)
@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                break;
        }
 
-       if (local->open_count == 0) {
+       if (local_read(&local->open_count) == 0) {
                res = drv_start(local);
                if (res)
                        goto err_del_bss;
@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                        res = drv_add_interface(local, sdata);
                        if (res)
                                goto err_stop;
-               } else if (local->monitors == 0 && local->open_count == 0) {
+               } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
                        res = ieee80211_add_virtual_monitor(local);
                        if (res)
                                goto err_stop;
@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                atomic_inc(&local->iff_promiscs);
 
        if (coming_up)
-               local->open_count++;
+               local_inc(&local->open_count);
 
        if (hw_reconf_flags)
                ieee80211_hw_config(local, hw_reconf_flags);
@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
  err_del_interface:
        drv_remove_interface(local, sdata);
  err_stop:
-       if (!local->open_count)
+       if (!local_read(&local->open_count))
                drv_stop(local);
  err_del_bss:
        sdata->bss = NULL;
@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        }
 
        if (going_down)
-               local->open_count--;
+               local_dec(&local->open_count);
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        }
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
-       if (local->open_count == 0)
+       if (local_read(&local->open_count) == 0)
                ieee80211_clear_tx_pending(local);
 
        /*
@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (cancel_scan)
                flush_delayed_work(&local->scan_work);
 
-       if (local->open_count == 0) {
+       if (local_read(&local->open_count) == 0) {
                ieee80211_stop_device(local);
 
                /* no reconfiguring after stop! */
@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        ieee80211_configure_filter(local);
        ieee80211_hw_config(local, hw_reconf_flags);
 
-       if (local->monitors == local->open_count)
+       if (local->monitors == local_read(&local->open_count))
                ieee80211_add_virtual_monitor(local);
 }
 
index 6ab99da38db92f72c5bec089e8e6ec34267d62b0..f9502d49d925db811527f123eb88ab997d60f4a8 100644 (file)
@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
                changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
                             IEEE80211_CONF_CHANGE_POWER);
 
-       if (changed && local->open_count) {
+       if (changed && local_read(&local->open_count)) {
                ret = drv_config(local, changed);
                /*
                 * Goal:
index 4a95fe3cffbc9bd9e6d2ff6853b36ebe904b94a4..0bfd71312670cbfa85c8d608d866b0bd50fee109 100644 (file)
@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
 
-       if (!local->open_count)
+       if (!local_read(&local->open_count))
                goto suspend;
 
        ieee80211_scan_cancel(local);
@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        cancel_work_sync(&local->dynamic_ps_enable_work);
        del_timer_sync(&local->dynamic_ps_timer);
 
-       local->wowlan = wowlan && local->open_count;
+       local->wowlan = wowlan && local_read(&local->open_count);
        if (local->wowlan) {
                int err = drv_suspend(local, wowlan);
                if (err < 0) {
@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        WARN_ON(!list_empty(&local->chanctx_list));
 
        /* stop hardware - this must stop RX */
-       if (local->open_count)
+       if (local_read(&local->open_count))
                ieee80211_stop_device(local);
 
  suspend:
index d53355b011f5cf6407367e3c5f1a3e513d1cc08e..21f583aca5ccca6b0e07183c663bbd6a87a052f5 100644 (file)
@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
 
        ASSERT_RTNL();
 
-       if (local->open_count)
+       if (local_read(&local->open_count))
                return -EBUSY;
 
        if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
index 974ebe70f5b0139c0dd2aec7335201dec51e6aca..57bcd3cdcb795205fcff12ebe5e081ea419802b9 100644 (file)
@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        }
 #endif
        /* everything else happens only if HW was up & running */
-       if (!local->open_count)
+       if (!local_read(&local->open_count))
                goto wake_up;
 
        /*
@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        local->in_reconfig = false;
        barrier();
 
-       if (local->monitors == local->open_count && local->monitors > 0)
+       if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
                ieee80211_add_virtual_monitor(local);
 
        /*
index b02660fa9eb00cd28aeb01f10b76f6604eae8096..c0f791cf26d9f110a7c4dd7173511928c1f7d4ee 100644 (file)
@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_GRADM
+       tristate '"gradm" match support'
+       depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
+       depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
+       ---help---
+         The gradm match allows to match on grsecurity RBAC being enabled.
+         It is useful when iptables rules are applied early on bootup to
+         prevent connections to the machine (except from a trusted host)
+         while the RBAC system is disabled.
+
 config NETFILTER_XT_MATCH_HASHLIMIT
        tristate '"hashlimit" match support'
        depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
index 89f73a9e98741702f2ea324e49b7b2f75bfb69fd..e4e5bd98ee82ded3f9f62b028f8bea1c43894e08 100644 (file)
@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
index d259da3ce67a6b18f6e4a345a5729876e373efc0..6a32b2c4a08e3107c2d3fde05f3bdb6460a8aa0e 100644 (file)
@@ -1952,7 +1952,7 @@ done:
        return ret;
 }
 
-static struct nf_sockopt_ops so_set __read_mostly = {
+static struct nf_sockopt_ops so_set = {
        .pf             = PF_INET,
        .get_optmin     = SO_IP_SET,
        .get_optmax     = SO_IP_SET + 1,
index b0f7b626b56da755222c0a1bd9a3a7b276ba32c8..0541842b735e623689f27e9ce17f19c614cec264 100644 (file)
@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
        /* Increase the refcnt counter of the dest */
        ip_vs_dest_hold(dest);
 
-       conn_flags = atomic_read(&dest->conn_flags);
+       conn_flags = atomic_read_unchecked(&dest->conn_flags);
        if (cp->protocol != IPPROTO_UDP)
                conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
        flags = cp->flags;
@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
 
        cp->control = NULL;
        atomic_set(&cp->n_control, 0);
-       atomic_set(&cp->in_pkts, 0);
+       atomic_set_unchecked(&cp->in_pkts, 0);
 
        cp->packet_xmit = NULL;
        cp->app = NULL;
@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
 
        /* Don't drop the entry if its number of incoming packets is not
           located in [0, 8] */
-       i = atomic_read(&cp->in_pkts);
+       i = atomic_read_unchecked(&cp->in_pkts);
        if (i > 8 || i < 0) return 0;
 
        if (!todrop_rate[i]) return 0;
index b87ca32efa0b4e6edc7f251c2c32c4ba3b55659c..76c77993680e349c4eac45e56b1273dc1d5df13d 100644 (file)
@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
                ret = cp->packet_xmit(skb, cp, pd->pp, iph);
                /* do not touch skb anymore */
 
-               atomic_inc(&cp->in_pkts);
+               atomic_inc_unchecked(&cp->in_pkts);
                ip_vs_conn_put(cp);
                return ret;
        }
@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
                pkts = sysctl_sync_threshold(ipvs);
        else
-               pkts = atomic_add_return(1, &cp->in_pkts);
+               pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
 
        if (ipvs->sync_state & IP_VS_STATE_MASTER)
                ip_vs_sync_conn(net, cp, pkts);
index b8295a430a5600d35b6de4163ba3b98e75c5f28c..17ff579abdd7cbb71614c8b1832c9c9c14a61461 100644 (file)
@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                 */
                ip_vs_rs_hash(ipvs, dest);
        }
-       atomic_set(&dest->conn_flags, conn_flags);
+       atomic_set_unchecked(&dest->conn_flags, conn_flags);
 
        /* bind the service */
        old_svc = rcu_dereference_protected(dest->svc, 1);
@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
  *     align with netns init in ip_vs_control_net_init()
  */
 
-static struct ctl_table vs_vars[] = {
+static ctl_table_no_const vs_vars[] __read_only = {
        {
                .procname       = "amemthresh",
                .maxlen         = sizeof(int),
@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
                                           "      %-7s %-6d %-10d %-10d\n",
                                           &dest->addr.in6,
                                           ntohs(dest->port),
-                                          ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+                                          ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
                                           atomic_read(&dest->weight),
                                           atomic_read(&dest->activeconns),
                                           atomic_read(&dest->inactconns));
@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
                                           "%-7s %-6d %-10d %-10d\n",
                                           ntohl(dest->addr.ip),
                                           ntohs(dest->port),
-                                          ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+                                          ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
                                           atomic_read(&dest->weight),
                                           atomic_read(&dest->activeconns),
                                           atomic_read(&dest->inactconns));
@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
 
                        entry.addr = dest->addr.ip;
                        entry.port = dest->port;
-                       entry.conn_flags = atomic_read(&dest->conn_flags);
+                       entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
                        entry.weight = atomic_read(&dest->weight);
                        entry.u_threshold = dest->u_threshold;
                        entry.l_threshold = dest->l_threshold;
@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
        if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
            nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-                       (atomic_read(&dest->conn_flags) &
+                       (atomic_read_unchecked(&dest->conn_flags) &
                         IP_VS_CONN_F_FWD_MASK)) ||
            nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
                        atomic_read(&dest->weight)) ||
@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
        int idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
-       struct ctl_table *tbl;
+       ctl_table_no_const *tbl;
 
        atomic_set(&ipvs->dropentry, 0);
        spin_lock_init(&ipvs->dropentry_lock);
index 127f14046c519d9aa0d0cb7596b8bcf676b5a55b..553d652c126a6be720418505620074915c77cd92 100644 (file)
@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
  *      IPVS LBLC sysctl table
  */
 #ifdef CONFIG_SYSCTL
-static struct ctl_table vs_vars_table[] = {
+static ctl_table_no_const vs_vars_table[] __read_only = {
        {
                .procname       = "lblc_expiration",
                .data           = NULL,
index 2229d2d8bbe0afe97e4d8fa4c2bfc95a6a93ee72..b32b785cb3b5802b5fbcd9ecc348bdb7cc49d376 100644 (file)
@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
  *      IPVS LBLCR sysctl table
  */
 
-static struct ctl_table vs_vars_table[] = {
+static ctl_table_no_const vs_vars_table[] __read_only = {
        {
                .procname       = "lblcr_expiration",
                .data           = NULL,
index c47ffd7a0a709cb73834c84652f251960f25db79..d233a81b6a4cd1222c79361e4ee81e3bca46fe51 100644 (file)
@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
        cp = cp->control;
        if (cp) {
                if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-                       pkts = atomic_add_return(1, &cp->in_pkts);
+                       pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
                else
                        pkts = sysctl_sync_threshold(ipvs);
                ip_vs_sync_conn(net, cp->control, pkts);
@@ -771,7 +771,7 @@ control:
        if (!cp)
                return;
        if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-               pkts = atomic_add_return(1, &cp->in_pkts);
+               pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
        else
                pkts = sysctl_sync_threshold(ipvs);
        goto sloop;
@@ -900,7 +900,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
 
        if (opt)
                memcpy(&cp->in_seq, opt, sizeof(*opt));
-       atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+       atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
        cp->state = state;
        cp->old_state = cp->state;
        /*
index 3aedbda7658a4fbbbd54fcc683a09f089ffe6de5..6a6356756d553bc6498d083469763c42c16d8348 100644 (file)
@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                else
                        rc = NF_ACCEPT;
                /* do not touch skb anymore */
-               atomic_inc(&cp->in_pkts);
+               atomic_inc_unchecked(&cp->in_pkts);
                goto out;
        }
 
@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                else
                        rc = NF_ACCEPT;
                /* do not touch skb anymore */
-               atomic_inc(&cp->in_pkts);
+               atomic_inc_unchecked(&cp->in_pkts);
                goto out;
        }
 
index a4b5e2a435acb4c2fafaf26ccb4fce349a151f9a..13b1de309a96947aef9160655bba718dda817547 100644 (file)
@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_acct_init_sysctl(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
                        GFP_KERNEL);
index 46d1b26a468ed0d4cd0c9f30c02398f51c922db0..b7f3b76b5bf94ef5a844ce7d86b90b2848bc78cc 100644 (file)
@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
 #define DYING_NULLS_VAL                ((1<<30)+1)
 #define TEMPLATE_NULLS_VAL     ((1<<30)+2)
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
+#endif
+
 int nf_conntrack_init_net(struct net *net)
 {
        int ret = -ENOMEM;
@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
        if (!net->ct.stat)
                goto err_pcpu_lists;
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
+#else
        net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+#endif
        if (!net->ct.slabname)
                goto err_slabname;
 
index 4e78c57b818f7d2cc387c793fe5a100e03fe067e..ec8fb748f00a6c90c5dcac1f598b1018e42fbf94 100644 (file)
@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_event_init_sysctl(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
                        GFP_KERNEL);
index bd9d31537905e4e372da427ac29ce5948f2af34c..989947e41bdaeb1e1eff46be3d536d59ce207f61 100644 (file)
@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
 
 static int nf_conntrack_helper_init_sysctl(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
                        GFP_KERNEL);
index b65d5864b6d948a0f902bc95af1b3b2de149c7f9..beec9021d796a241292e7246e2ddba6da11ee63e 100644 (file)
@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
 
 static void
 nf_ct_unregister_sysctl(struct ctl_table_header **header,
-                       struct ctl_table **table,
+                       ctl_table_no_const **table,
                        unsigned int users)
 {
        if (users > 0)
index fc823fa5dcf53794bc8977cb5502d1dac92938e2..8311af318abc48624e6903668920bf13e341eeb9 100644 (file)
@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
 
 static int nf_conntrack_standalone_init_sysctl(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
                        GFP_KERNEL);
index 7a394df0deb7686fa7fe4da162ff77d3c03435c5..bd91a8aa4edb2f58328fb1cac280e2523da038b0 100644 (file)
@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
                        GFP_KERNEL);
index 43c926cae9c00de1306144abcddcf64f510c2864..a5731d8f76f9a2abbe476690a1325815b396d522 100644 (file)
@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
 
 #ifdef CONFIG_SYSCTL
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
 
 static int nf_log_proc_dostring(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               ctl_table_no_const nf_log_table = *table;
+
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       nf_log_table.data = "NONE";
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       nf_log_table.data = logger->name;
+               r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
                mutex_unlock(&nf_log_mutex);
        }
 
index c68c1e58b3628930495c5fe5a24f00f15adc1537..8b5d6705c79e0ac3f64a30ec434d63c858845473 100644 (file)
@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
                }
        }
 
-       list_add(&reg->list, &nf_sockopts);
+       pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
 out:
        mutex_unlock(&nf_sockopt_mutex);
        return ret;
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
 {
        mutex_lock(&nf_sockopt_mutex);
-       list_del(&reg->list);
+       pax_list_del((struct list_head *)&reg->list);
        mutex_unlock(&nf_sockopt_mutex);
 }
 EXPORT_SYMBOL(nf_unregister_sockopt);
index 11d85b3813f26ae87f465d7f4d09ae36a6d0167e..7fcc420e8b50d89c6fb9b7c962d1a464242ae1af 100644 (file)
@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
 struct nfnl_log_net {
        spinlock_t instances_lock;
        struct hlist_head instance_table[INSTANCE_BUCKETS];
-       atomic_t global_seq;
+       atomic_unchecked_t global_seq;
 };
 
 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
        /* global sequence number */
        if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
            nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
-                        htonl(atomic_inc_return(&log->global_seq))))
+                        htonl(atomic_inc_return_unchecked(&log->global_seq))))
                goto nla_put_failure;
 
        if (data_len) {
index 178696852bde39d25bb5fdcaa7961511e6b4ec25..1ec6dac21a4f9988fa25c4959cb87f86bcea1a17 100644 (file)
@@ -869,11 +869,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
 {
        struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
 
-       hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
+       hashlimit_net->ipt_hashlimit = proc_mkdir_restrict("ipt_hashlimit", net->proc_net);
        if (!hashlimit_net->ipt_hashlimit)
                return -ENOMEM;
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
+       hashlimit_net->ip6t_hashlimit = proc_mkdir_restrict("ip6t_hashlimit", net->proc_net);
        if (!hashlimit_net->ip6t_hashlimit) {
                remove_proc_entry("ipt_hashlimit", net->proc_net);
                return -ENOMEM;
index a9faae89f95533a53da4651efb2f737e3a0952b4..1ea30e002e5a9eea85b6b59b44564bc7ab028148 100644 (file)
@@ -615,7 +615,7 @@ static int __net_init recent_proc_net_init(struct net *net)
 {
        struct recent_net *recent_net = recent_pernet(net);
 
-       recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
+       recent_net->xt_recent = proc_mkdir_restrict("xt_recent", net->proc_net);
        if (!recent_net->xt_recent)
                return -ENOMEM;
        return 0;
index 11de55e7a868950c6b9ad5bbfa0a0561eb8787d7..f25e448afda20a6f5df3d04be747c1b204df78cd 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/module.h>
 
 struct xt_statistic_priv {
-       atomic_t count;
+       atomic_unchecked_t count;
 } ____cacheline_aligned_in_smp;
 
 MODULE_LICENSE("GPL");
@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        case XT_STATISTIC_MODE_NTH:
                do {
-                       oval = atomic_read(&info->master->count);
+                       oval = atomic_read_unchecked(&info->master->count);
                        nval = (oval == info->u.nth.every) ? 0 : oval + 1;
-               } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
+               } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
                if (nval == 0)
                        ret = !ret;
                break;
@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
        info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
        if (info->master == NULL)
                return -ENOMEM;
-       atomic_set(&info->master->count, info->u.nth.count);
+       atomic_set_unchecked(&info->master->count, info->u.nth.count);
 
        return 0;
 }
index 75532efa51cd6d54389366b0f4cc7a4de34e0fbc..7a461cdf465b56f388e92c4732a404d0a493bf52 100644 (file)
@@ -273,7 +273,7 @@ static void netlink_overrun(struct sock *sk)
                        sk->sk_error_report(sk);
                }
        }
-       atomic_inc(&sk->sk_drops);
+       atomic_inc_unchecked(&sk->sk_drops);
 }
 
 static void netlink_rcv_wake(struct sock *sk)
@@ -3007,7 +3007,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
                           sk_wmem_alloc_get(s),
                           nlk->cb_running,
                           atomic_read(&s->sk_refcnt),
-                          atomic_read(&s->sk_drops),
+                          atomic_read_unchecked(&s->sk_drops),
                           sock_i_ino(s)
                        );
 
index 6a55f71055051957685b11e8950ba1e4197dce20..e88c9374a9da11dd0b847c9669164e4dd4850e78 100644 (file)
@@ -124,7 +124,7 @@ static const struct net_device_ops internal_dev_netdev_ops = {
        .ndo_get_stats64 = internal_dev_get_stats,
 };
 
-static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
+static struct rtnl_link_ops internal_dev_link_ops = {
        .kind = "openvswitch",
 };
 
index 2034c6d9cb5a51f3094747e768c93522024bea96..1a24f035c4bcb784a43cd4d47acf080ac837ea2a 100644 (file)
@@ -305,10 +305,10 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
         * netdev-stats can be directly read over netlink-ioctl.
         */
 
-       stats->rx_errors  = atomic_long_read(&vport->err_stats.rx_errors);
-       stats->tx_errors  = atomic_long_read(&vport->err_stats.tx_errors);
-       stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
-       stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
+       stats->rx_errors  = atomic_long_read_unchecked(&vport->err_stats.rx_errors);
+       stats->tx_errors  = atomic_long_read_unchecked(&vport->err_stats.tx_errors);
+       stats->tx_dropped = atomic_long_read_unchecked(&vport->err_stats.tx_dropped);
+       stats->rx_dropped = atomic_long_read_unchecked(&vport->err_stats.rx_dropped);
 
        for_each_possible_cpu(i) {
                const struct pcpu_sw_netstats *percpu_stats;
@@ -539,19 +539,19 @@ static void ovs_vport_record_error(struct vport *vport,
 {
        switch (err_type) {
        case VPORT_E_RX_DROPPED:
-               atomic_long_inc(&vport->err_stats.rx_dropped);
+               atomic_long_inc_unchecked(&vport->err_stats.rx_dropped);
                break;
 
        case VPORT_E_RX_ERROR:
-               atomic_long_inc(&vport->err_stats.rx_errors);
+               atomic_long_inc_unchecked(&vport->err_stats.rx_errors);
                break;
 
        case VPORT_E_TX_DROPPED:
-               atomic_long_inc(&vport->err_stats.tx_dropped);
+               atomic_long_inc_unchecked(&vport->err_stats.tx_dropped);
                break;
 
        case VPORT_E_TX_ERROR:
-               atomic_long_inc(&vport->err_stats.tx_errors);
+               atomic_long_inc_unchecked(&vport->err_stats.tx_errors);
                break;
        }
 
index 8a057d7a86b67e24ca35bf6091847f37d6d2c7ec..b5710c804e4bde5dd5e0821b1a1f0d0ab5befba5 100644 (file)
@@ -71,10 +71,10 @@ int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
 /* The following definitions are for implementers of vport devices: */
 
 struct vport_err_stats {
-       atomic_long_t rx_dropped;
-       atomic_long_t rx_errors;
-       atomic_long_t tx_dropped;
-       atomic_long_t tx_errors;
+       atomic_long_unchecked_t rx_dropped;
+       atomic_long_unchecked_t rx_errors;
+       atomic_long_unchecked_t tx_dropped;
+       atomic_long_unchecked_t tx_errors;
 };
 /**
  * struct vport_portids - array of netlink portids of a vport.
index 9cfe2e1dd8b5099bbac7d824241bd0d06cb01cf7..1b7caf81926f4cdac7afa4634953bb9588b01a28 100644 (file)
@@ -269,7 +269,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
 
        return ret;
 drop:
-       atomic_long_inc(&dev->tx_dropped);
+       atomic_long_inc_unchecked(&dev->tx_dropped);
        kfree_skb(skb);
        return NET_XMIT_DROP;
 }
@@ -1839,7 +1839,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_packets++;
-       skb->dropcount = atomic_read(&sk->sk_drops);
+       skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
        __skb_queue_tail(&sk->sk_receive_queue, skb);
        spin_unlock(&sk->sk_receive_queue.lock);
        sk->sk_data_ready(sk);
@@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 drop_n_acct:
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_drops++;
-       atomic_inc(&sk->sk_drops);
+       atomic_inc_unchecked(&sk->sk_drops);
        spin_unlock(&sk->sk_receive_queue.lock);
 
 drop_n_restore:
@@ -3485,7 +3485,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        case PACKET_HDRLEN:
                if (len > sizeof(int))
                        len = sizeof(int);
-               if (copy_from_user(&val, optval, len))
+               if (len > sizeof(val) || copy_from_user(&val, optval, len))
                        return -EFAULT;
                switch (val) {
                case TPACKET_V1:
@@ -3531,7 +3531,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                len = lv;
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, data, len))
+       if (len > sizeof(st) || copy_to_user(optval, data, len))
                return -EFAULT;
        return 0;
 }
index 5d3f2b7507d45a9b78ba0fd412cc3a8f57ad9178..8a4dbb758e1aef096c1ae5dccb82deb72aad9afd 100644 (file)
@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        case PNS_PEP_CTRL_REQ:
                if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        break;
                }
                __skb_pull(skb, 4);
@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
 
                if (pn->rx_credits == 0) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        err = -ENOBUFS;
                        break;
                }
@@ -579,7 +579,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
                }
 
                if (pn->rx_credits == 0) {
-                       atomic_inc(&sk->sk_drops);
+                       atomic_inc_unchecked(&sk->sk_drops);
                        err = NET_RX_DROP;
                        break;
                }
index 008214a3d5eb5b90ecbccb3bea9f532a87f630c9..bb68240566cc463bd681bf5d86be0d0f0b248497 100644 (file)
@@ -611,7 +611,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
                        from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                        sock_i_ino(sk),
                        atomic_read(&sk->sk_refcnt), sk,
-                       atomic_read(&sk->sk_drops));
+                       atomic_read_unchecked(&sk->sk_drops));
        }
        seq_pad(seq, '\n');
        return 0;
index c02a8c4bc11fd319e08608315a3bec864a462c5a..3c5b600204ef05a860eb2f1f3a8f3eff188b0986 100644 (file)
@@ -67,7 +67,7 @@ static int proc_local_port_range(struct ctl_table *table, int write,
 {
        int ret;
        int range[2] = {local_port_range[0], local_port_range[1]};
-       struct ctl_table tmp = {
+       ctl_table_no_const tmp = {
                .data = &range,
                .maxlen = sizeof(range),
                .mode = table->mode,
index e5b65acd650b54622b19aa9b879de6b2425a4a66..f3b6fb7376eb0d78e43b023ff9b62f11024027ba 100644 (file)
@@ -78,7 +78,7 @@
  * finds that the saved generation number is smaller than the global generation
  * number, it wakes up the process.
  */
-static atomic_t                rds_cong_generation = ATOMIC_INIT(0);
+static atomic_unchecked_t              rds_cong_generation = ATOMIC_INIT(0);
 
 /*
  * Congestion monitoring
@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
        rdsdebug("waking map %p for %pI4\n",
          map, &map->m_addr);
        rds_stats_inc(s_cong_update_received);
-       atomic_inc(&rds_cong_generation);
+       atomic_inc_unchecked(&rds_cong_generation);
        if (waitqueue_active(&map->m_waitq))
                wake_up(&map->m_waitq);
        if (waitqueue_active(&rds_poll_waitq))
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
 
 int rds_cong_updated_since(unsigned long *recent)
 {
-       unsigned long gen = atomic_read(&rds_cong_generation);
+       unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
 
        if (likely(*recent == gen))
                return 0;
index c36d713229e0f5c5a1b43fe227a1e04480ba100d..af45bf24059de0c7d2da4119ffe9957b6ecf755e 100644 (file)
@@ -128,7 +128,7 @@ struct rds_ib_connection {
        /* sending acks */
        unsigned long           i_ack_flags;
 #ifdef KERNEL_HAS_ATOMIC64
-       atomic64_t              i_ack_next;     /* next ACK to send */
+       atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
 #else
        spinlock_t              i_ack_lock;     /* protect i_ack_next */
        u64                     i_ack_next;     /* next ACK to send */
index 31b74f5e61adbd37535b636b1499c384bdd992f5..dc1fbfa69839e77c1ff3f624d0ccf2cde509f80d 100644 (file)
@@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
        /* Clear the ACK state */
        clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 #ifdef KERNEL_HAS_ATOMIC64
-       atomic64_set(&ic->i_ack_next, 0);
+       atomic64_set_unchecked(&ic->i_ack_next, 0);
 #else
        ic->i_ack_next = 0;
 #endif
index 1b981a4e42c214d575a838b096da368a7f0316c6..ae44b0cbf5140fb996c3c514318360fc8e0f0adb 100644 (file)
@@ -581,7 +581,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
                                int ack_required)
 {
-       atomic64_set(&ic->i_ack_next, seq);
+       atomic64_set_unchecked(&ic->i_ack_next, seq);
        if (ack_required) {
                smp_mb__before_atomic();
                set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -593,7 +593,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
        clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
        smp_mb__after_atomic();
 
-       return atomic64_read(&ic->i_ack_next);
+       return atomic64_read_unchecked(&ic->i_ack_next);
 }
 #endif
 
index cbe6674e31ee52f6c9fd4380e3f0942127ede7c9..dc9eb89ef96173bf37a6deb70ff3534574a5b95e 100644 (file)
@@ -134,7 +134,7 @@ struct rds_iw_connection {
        /* sending acks */
        unsigned long           i_ack_flags;
 #ifdef KERNEL_HAS_ATOMIC64
-       atomic64_t              i_ack_next;     /* next ACK to send */
+       atomic64_unchecked_t    i_ack_next;     /* next ACK to send */
 #else
        spinlock_t              i_ack_lock;     /* protect i_ack_next */
        u64                     i_ack_next;     /* next ACK to send */
index a91e1db62ee6a1833e65372989b0644d980899e6..cf3053febdb48939c9f4b7e38a78261441cf4c46 100644 (file)
@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
        /* Clear the ACK state */
        clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 #ifdef KERNEL_HAS_ATOMIC64
-       atomic64_set(&ic->i_ack_next, 0);
+       atomic64_set_unchecked(&ic->i_ack_next, 0);
 #else
        ic->i_ack_next = 0;
 #endif
index a817705ce2d0e9246388c2c65d77b4c486a8feba..dba8d0864f18046ee87a168d49cc159518fa2916 100644 (file)
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
                        int *unpinned);
 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
 
-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
+static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
+                            struct rds_iw_device **rds_iwdev,
+                            struct rdma_cm_id **cm_id)
 {
        struct rds_iw_device *iwdev;
        struct rds_iw_cm_id *i_cm_id;
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
                                src_addr->sin_port,
                                dst_addr->sin_addr.s_addr,
                                dst_addr->sin_port,
-                               rs->rs_bound_addr,
-                               rs->rs_bound_port,
-                               rs->rs_conn_addr,
-                               rs->rs_conn_port);
+                               src->sin_addr.s_addr,
+                               src->sin_port,
+                               dst->sin_addr.s_addr,
+                               dst->sin_port);
 #ifdef WORKING_TUPLE_DETECTION
-                       if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
-                           src_addr->sin_port == rs->rs_bound_port &&
-                           dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
-                           dst_addr->sin_port == rs->rs_conn_port) {
+                       if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
+                           src_addr->sin_port == src->sin_port &&
+                           dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
+                           dst_addr->sin_port == dst->sin_port) {
 #else
                        /* FIXME - needs to compare the local and remote
                         * ipaddr/port tuple, but the ipaddr is the only
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
                         * zero'ed.  It doesn't appear to be properly populated
                         * during connection setup...
                         */
-                       if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
+                       if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
 #endif
                                spin_unlock_irq(&iwdev->spinlock);
                                *rds_iwdev = iwdev;
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
 {
        struct sockaddr_in *src_addr, *dst_addr;
        struct rds_iw_device *rds_iwdev_old;
-       struct rds_sock rs;
        struct rdma_cm_id *pcm_id;
        int rc;
 
        src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
        dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
 
-       rs.rs_bound_addr = src_addr->sin_addr.s_addr;
-       rs.rs_bound_port = src_addr->sin_port;
-       rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
-       rs.rs_conn_port = dst_addr->sin_port;
-
-       rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
+       rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
        if (rc)
                rds_iw_remove_cm_id(rds_iwdev, cm_id);
 
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
        struct rds_iw_device *rds_iwdev;
        struct rds_iw_mr *ibmr = NULL;
        struct rdma_cm_id *cm_id;
+       struct sockaddr_in src = {
+               .sin_addr.s_addr = rs->rs_bound_addr,
+               .sin_port = rs->rs_bound_port,
+       };
+       struct sockaddr_in dst = {
+               .sin_addr.s_addr = rs->rs_conn_addr,
+               .sin_port = rs->rs_conn_port,
+       };
        int ret;
 
-       ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
+       ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
        if (ret || !cm_id) {
                ret = -ENODEV;
                goto out;
index a66d1794b2d0472e511a179ae9872c2766fb8dd8..cf1e2582692bb33ada05082791f91e1658ad0b33 100644 (file)
@@ -412,7 +412,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
                                int ack_required)
 {
-       atomic64_set(&ic->i_ack_next, seq);
+       atomic64_set_unchecked(&ic->i_ack_next, seq);
        if (ack_required) {
                smp_mb__before_atomic();
                set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -424,7 +424,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
        clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
        smp_mb__after_atomic();
 
-       return atomic64_read(&ic->i_ack_next);
+       return atomic64_read_unchecked(&ic->i_ack_next);
 }
 #endif
 
index c2a5eef41343c816f70e6dc16fc7a2fa9ea2d684..90ba1fd23612ba9500d0b70845a292400f2fa7c7 100644 (file)
@@ -448,7 +448,7 @@ struct rds_transport {
        void (*sync_mr)(void *trans_private, int direction);
        void (*free_mr)(void *trans_private, int invalidate);
        void (*flush_mrs)(void);
-};
+} __do_const;
 
 struct rds_sock {
        struct sock             rs_sk;
index edac9ef2bc8b1c2060a2030deb4225d14c2fcc89..16bcb988b26f80d2757830c108b66139db9f7d09 100644 (file)
@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
        int val = 1;
 
        set_fs(KERNEL_DS);
-       sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
+       sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
                              sizeof(val));
        set_fs(oldfs);
 }
index 53b17ca0dff5a5618d92b2f6aeb876284707959b..45463e7804f56fd714db4d32f2248ee6dec033dd 100644 (file)
@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
 
        oldfs = get_fs();
        set_fs(KERNEL_DS);
-       sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
+       sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
                              sizeof(val));
        set_fs(oldfs);
 }
index 7b1670489638e565c7ccea71961f8532986c94cb..9628ec6bfd7d61b67a3d02c046e7701eac11933f 100644 (file)
@@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
 __be32 rxrpc_epoch;
 
 /* current debugging ID */
-atomic_t rxrpc_debug_id;
+atomic_unchecked_t rxrpc_debug_id;
 
 /* count of skbs currently in use */
 atomic_t rxrpc_n_skbs;
index c6be17a959a6e4981ecfff38af85805df6d8b26e..2a6200ae9d39e7ba999e1b8c372b06103b5a8b99 100644 (file)
@@ -223,7 +223,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
 
        _enter("{%d,%d,%d,%d},",
               call->acks_hard, call->acks_unacked,
-              atomic_read(&call->sequence),
+              atomic_read_unchecked(&call->sequence),
               CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
 
        stop = 0;
@@ -247,7 +247,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
 
                        /* each Tx packet has a new serial number */
                        sp->hdr.serial =
-                               htonl(atomic_inc_return(&call->conn->serial));
+                               htonl(atomic_inc_return_unchecked(&call->conn->serial));
 
                        hdr = (struct rxrpc_header *) txb->head;
                        hdr->serial = sp->hdr.serial;
@@ -451,7 +451,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  */
 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
 {
-       rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
+       rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
 }
 
 /*
@@ -677,7 +677,7 @@ process_further:
 
                latest = ntohl(sp->hdr.serial);
                hard = ntohl(ack.firstPacket);
-               tx = atomic_read(&call->sequence);
+               tx = atomic_read_unchecked(&call->sequence);
 
                _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
                       latest,
@@ -1209,7 +1209,7 @@ void rxrpc_process_call(struct work_struct *work)
        goto maybe_reschedule;
 
 send_ACK_with_skew:
-       ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
+       ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
                            ntohl(ack.serial));
 send_ACK:
        mtu = call->conn->trans->peer->if_mtu;
@@ -1221,7 +1221,7 @@ send_ACK:
        ackinfo.rxMTU   = htonl(rxrpc_rx_mtu);
        ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
 
-       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+       hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
        _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
               ntohl(hdr.serial),
               ntohs(ack.maxSkew),
@@ -1239,7 +1239,7 @@ send_ACK:
 send_message:
        _debug("send message");
 
-       hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+       hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
        _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
 send_message_2:
 
index a9e05db0f5d5900e93f87a8567e7533a1745c82a..194e793104887d07b1921d2e99a6126a6e6dc5be 100644 (file)
@@ -232,7 +232,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        spin_lock_init(&call->lock);
        rwlock_init(&call->state_lock);
        atomic_set(&call->usage, 1);
-       call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+       call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
        call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
 
        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
index 6631f4f1e39be713029c8b9b504db4ea741fb3e6..bfdf056e076254e898eff4ae55efd136e268bb51 100644 (file)
@@ -210,7 +210,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
                rwlock_init(&conn->lock);
                spin_lock_init(&conn->state_lock);
                atomic_set(&conn->usage, 1);
-               conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
                conn->avail_calls = RXRPC_MAXCALLS;
                conn->size_align = 4;
                conn->header_size = sizeof(struct rxrpc_header);
index e7ed43a54c41f4e60461af753a68407dfb153bd6..6afa1409dea8faf0e54a2eda61c344ee558e7272 100644 (file)
@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
 
        len = iov[0].iov_len + iov[1].iov_len;
 
-       hdr.serial = htonl(atomic_inc_return(&conn->serial));
+       hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
        _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
 
        ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
index 481f89f93789a147fd5979e894e62145f4d9d767..ceeaf8ded913db20ff024b111d57b22347c469e8 100644 (file)
@@ -331,9 +331,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
        /* track the latest serial number on this connection for ACK packet
         * information */
        serial = ntohl(sp->hdr.serial);
-       hi_serial = atomic_read(&call->conn->hi_serial);
+       hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
        while (serial > hi_serial)
-               hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
+               hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
                                           serial);
 
        /* request ACK generation for any ACK or DATA packet that requests
index ba9fd36d3f156452c470a93a0b04e428672daa35..9bbfe014e0d6267511506d8d0fa5bbfc6dd1f377 100644 (file)
@@ -272,8 +272,8 @@ struct rxrpc_connection {
        int                     error;          /* error code for local abort */
        int                     debug_id;       /* debug ID for printks */
        unsigned int            call_counter;   /* call ID counter */
-       atomic_t                serial;         /* packet serial number counter */
-       atomic_t                hi_serial;      /* highest serial number received */
+       atomic_unchecked_t      serial;         /* packet serial number counter */
+       atomic_unchecked_t      hi_serial;      /* highest serial number received */
        u8                      avail_calls;    /* number of calls available */
        u8                      size_align;     /* data size alignment (for security) */
        u8                      header_size;    /* rxrpc + security header size */
@@ -346,7 +346,7 @@ struct rxrpc_call {
        spinlock_t              lock;
        rwlock_t                state_lock;     /* lock for state transition */
        atomic_t                usage;
-       atomic_t                sequence;       /* Tx data packet sequence counter */
+       atomic_unchecked_t      sequence;       /* Tx data packet sequence counter */
        u32                     abort_code;     /* local/remote abort code */
        enum {                                  /* current state of call */
                RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
@@ -431,7 +431,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
  */
 extern atomic_t rxrpc_n_skbs;
 extern __be32 rxrpc_epoch;
-extern atomic_t rxrpc_debug_id;
+extern atomic_unchecked_t rxrpc_debug_id;
 extern struct workqueue_struct *rxrpc_workqueue;
 
 /*
index 87f7135d238b498f9208543cf4608c1cb78a59d3..74d3703782541d510a9bb676fa92b912f74bbf78 100644 (file)
@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
                spin_lock_init(&local->lock);
                rwlock_init(&local->services_lock);
                atomic_set(&local->usage, 1);
-               local->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
                memcpy(&local->srx, srx, sizeof(*srx));
        }
 
index e1a9373e59799fd2a9cd998fbdc4399d2d021f6a..a67f207b3aecb7f0bb80461ab66f35ac85f84bc6 100644 (file)
@@ -680,14 +680,14 @@ static int rxrpc_send_data(struct kiocb *iocb,
                                        memset(skb_put(skb, pad), 0, pad);
                        }
 
-                       seq = atomic_inc_return(&call->sequence);
+                       seq = atomic_inc_return_unchecked(&call->sequence);
 
                        sp->hdr.epoch = conn->epoch;
                        sp->hdr.cid = call->cid;
                        sp->hdr.callNumber = call->call_id;
                        sp->hdr.seq = htonl(seq);
                        sp->hdr.serial =
-                               htonl(atomic_inc_return(&conn->serial));
+                               htonl(atomic_inc_return_unchecked(&conn->serial));
                        sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
                        sp->hdr.userStatus = 0;
                        sp->hdr.securityIndex = conn->security_ix;
index bebaa43484bcdbf72bbaaa764559051aeb8efb99..26445919452ffa960a30e7b9a01fbf40e48333d4 100644 (file)
@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
                INIT_LIST_HEAD(&peer->error_targets);
                spin_lock_init(&peer->lock);
                atomic_set(&peer->usage, 1);
-               peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
                memcpy(&peer->srx, srx, sizeof(*srx));
 
                rxrpc_assess_MTU_size(peer);
index 38047f713f2cf0fb2b60261039c475bd7fe19f7e..9f48511a5a0face574ebc5c4ef9f75340f1f6d29 100644 (file)
@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
                   atomic_read(&conn->usage),
                   rxrpc_conn_states[conn->state],
                   key_serial(conn->key),
-                  atomic_read(&conn->serial),
-                  atomic_read(&conn->hi_serial));
+                  atomic_read_unchecked(&conn->serial),
+                  atomic_read_unchecked(&conn->hi_serial));
 
        return 0;
 }
index 1976dec84f297cfb126df6bcd53129f1d518001b..aa70be54fecfab69cab2e70de71f9dad84c93e0d 100644 (file)
@@ -51,7 +51,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
                spin_lock_init(&trans->client_lock);
                rwlock_init(&trans->conn_lock);
                atomic_set(&trans->usage, 1);
-               trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
+               trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 
                if (peer->srx.transport.family == AF_INET) {
                        switch (peer->srx.transport_type) {
index f226709ebd8f159b4fef700307728accb4d2bf66..0e735a8fbcbf154853ccd716ba4334d937f36040 100644 (file)
@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
 
        len = iov[0].iov_len + iov[1].iov_len;
 
-       hdr.serial = htonl(atomic_inc_return(&conn->serial));
+       hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
        _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
 
        ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
 
        len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
 
-       hdr->serial = htonl(atomic_inc_return(&conn->serial));
+       hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
        _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
 
        ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
index 6efca30894aad4294824d2ba64f732239a921517..1259f82bce84985ab5ff6e1fedc1e29c0257c2d3 100644 (file)
@@ -349,7 +349,7 @@ void netif_carrier_on(struct net_device *dev)
        if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
-               atomic_inc(&dev->carrier_changes);
+               atomic_inc_unchecked(&dev->carrier_changes);
                linkwatch_fire_event(dev);
                if (netif_running(dev))
                        __netdev_watchdog_up(dev);
@@ -368,7 +368,7 @@ void netif_carrier_off(struct net_device *dev)
        if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
-               atomic_inc(&dev->carrier_changes);
+               atomic_inc_unchecked(&dev->carrier_changes);
                linkwatch_fire_event(dev);
        }
 }
index 0e4198ee237086e01e546432b38ee49d046837a1..f94193e25fef2036411f7300eb60281988a3c0fb 100644 (file)
@@ -972,7 +972,7 @@ static const struct inet6_protocol sctpv6_protocol = {
        .flags        = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
 };
 
-static struct sctp_af sctp_af_inet6 = {
+static struct sctp_af sctp_af_inet6 __read_only = {
        .sa_family         = AF_INET6,
        .sctp_xmit         = sctp_v6_xmit,
        .setsockopt        = ipv6_setsockopt,
@@ -1002,7 +1002,7 @@ static struct sctp_af sctp_af_inet6 = {
 #endif
 };
 
-static struct sctp_pf sctp_pf_inet6 = {
+static struct sctp_pf sctp_pf_inet6 __read_only = {
        .event_msgname = sctp_inet6_event_msgname,
        .skb_msgname   = sctp_inet6_skb_msgname,
        .af_supported  = sctp_inet6_af_supported,
@@ -1029,7 +1029,7 @@ void sctp_v6_pf_init(void)
 
 void sctp_v6_pf_exit(void)
 {
-       list_del(&sctp_af_inet6.list);
+       pax_list_del(&sctp_af_inet6.list);
 }
 
 /* Initialize IPv6 support and register with socket layer.  */
index 8f34b27d5775f053ffde8a763f724c0d8b4f6e1f..b10538527f3986370607f87931ec74dd5903f2d4 100644 (file)
@@ -836,8 +836,10 @@ int sctp_register_af(struct sctp_af *af)
                return 0;
        }
 
+       pax_open_kernel();
        INIT_LIST_HEAD(&af->list);
-       list_add_tail(&af->list, &sctp_address_families);
+       pax_close_kernel();
+       pax_list_add_tail(&af->list, &sctp_address_families);
        return 1;
 }
 
@@ -967,7 +969,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
 
 static struct sctp_af sctp_af_inet;
 
-static struct sctp_pf sctp_pf_inet = {
+static struct sctp_pf sctp_pf_inet __read_only = {
        .event_msgname = sctp_inet_event_msgname,
        .skb_msgname   = sctp_inet_skb_msgname,
        .af_supported  = sctp_inet_af_supported,
@@ -1039,7 +1041,7 @@ static const struct net_protocol sctp_protocol = {
 };
 
 /* IPv4 address related functions.  */
-static struct sctp_af sctp_af_inet = {
+static struct sctp_af sctp_af_inet __read_only = {
        .sa_family         = AF_INET,
        .sctp_xmit         = sctp_v4_xmit,
        .setsockopt        = ip_setsockopt,
@@ -1123,7 +1125,7 @@ static void sctp_v4_pf_init(void)
 
 static void sctp_v4_pf_exit(void)
 {
-       list_del(&sctp_af_inet.list);
+       pax_list_del(&sctp_af_inet.list);
 }
 
 static int sctp_v4_protosw_init(void)
index fef2acdf4a2e675c55dc9fbf2124d132499b89e3..c705c4f5be06ddcedc0b7baebcff1a60afb142ef 100644 (file)
@@ -439,7 +439,7 @@ static void sctp_generate_sack_event(unsigned long data)
        sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
 }
 
-sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
+sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
        NULL,
        sctp_generate_t1_cookie_event,
        sctp_generate_t1_init_event,
index aafe94bf292e73ecb765a31ae3456c3c11fe932f..40b016f032cce8ecf14cbe819486e95969ffec9c 100644 (file)
@@ -2205,11 +2205,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 {
        struct sctp_association *asoc;
        struct sctp_ulpevent *event;
+       struct sctp_event_subscribe subscribe;
 
        if (optlen > sizeof(struct sctp_event_subscribe))
                return -EINVAL;
-       if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
+       if (copy_from_user(&subscribe, optval, optlen))
                return -EFAULT;
+       sctp_sk(sk)->subscribe = subscribe;
 
        if (sctp_sk(sk)->subscribe.sctp_data_io_event)
                pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
@@ -4378,13 +4380,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
                                  int __user *optlen)
 {
+       struct sctp_event_subscribe subscribe;
+
        if (len <= 0)
                return -EINVAL;
        if (len > sizeof(struct sctp_event_subscribe))
                len = sizeof(struct sctp_event_subscribe);
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
+       subscribe = sctp_sk(sk)->subscribe;
+       if (copy_to_user(optval, &subscribe, len))
                return -EFAULT;
        return 0;
 }
@@ -4402,6 +4407,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
  */
 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
 {
+       __u32 autoclose;
+
        /* Applicable to UDP-style socket only */
        if (sctp_style(sk, TCP))
                return -EOPNOTSUPP;
@@ -4410,7 +4417,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
        len = sizeof(int);
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+       autoclose = sctp_sk(sk)->autoclose;
+       if (copy_to_user(optval, &autoclose, sizeof(int)))
                return -EFAULT;
        return 0;
 }
@@ -4784,12 +4792,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
  */
 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
 {
+       struct sctp_initmsg initmsg;
+
        if (len < sizeof(struct sctp_initmsg))
                return -EINVAL;
        len = sizeof(struct sctp_initmsg);
        if (put_user(len, optlen))
                return -EFAULT;
-       if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
+       initmsg = sctp_sk(sk)->initmsg;
+       if (copy_to_user(optval, &initmsg, len))
                return -EFAULT;
        return 0;
 }
@@ -4830,6 +4841,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
                              ->addr_to_user(sp, &temp);
                if (space_left < addrlen)
                        return -ENOMEM;
+               if (addrlen > sizeof(temp) || addrlen < 0)
+                       return -EFAULT;
                if (copy_to_user(to, &temp, addrlen))
                        return -EFAULT;
                to += addrlen;
index 2e9ada10fd846c10bc6281c30f29cf1fed02ca3c..40f425d02808cc6fb19cc8d09ed699b4569221ba 100644 (file)
@@ -321,7 +321,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       struct ctl_table tbl;
+       ctl_table_no_const tbl;
        bool changed = false;
        char *none = "none";
        char tmp[8];
@@ -369,7 +369,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
        struct net *net = current->nsproxy->net_ns;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       struct ctl_table tbl;
+       ctl_table_no_const tbl;
        int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
@@ -398,7 +398,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
        struct net *net = current->nsproxy->net_ns;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       struct ctl_table tbl;
+       ctl_table_no_const tbl;
        int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
@@ -436,7 +436,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
                             loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       struct ctl_table tbl;
+       ctl_table_no_const tbl;
        int new_value, ret;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
@@ -463,7 +463,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
 
 int sctp_sysctl_net_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
        int i;
 
        table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
index 418795caa8979fd0eb5d19e4d1fe3376fc0e9b20..c33fa46c623083844f5a1f7f87fc176c54cc99bf 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/in.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <linux/atalk.h>
 #include <net/busy_poll.h>
 #include <linux/errqueue.h>
+#include <linux/grsock.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
 unsigned int sysctl_net_busy_read __read_mostly;
@@ -162,7 +164,7 @@ static const struct file_operations socket_file_ops = {
  */
 
 static DEFINE_SPINLOCK(net_family_lock);
-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
+const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
  *     Statistics counters of the socket lists
@@ -328,7 +330,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
                &sockfs_dentry_operations, SOCKFS_MAGIC);
 }
 
-static struct vfsmount *sock_mnt __read_mostly;
+struct vfsmount *sock_mnt __read_mostly;
 
 static struct file_system_type sock_fs_type = {
        .name =         "sockfs",
@@ -1235,6 +1237,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
                return -EAFNOSUPPORT;
        if (type < 0 || type >= SOCK_MAX)
                return -EINVAL;
+       if (protocol < 0)
+               return -EINVAL;
 
        /* Compatibility.
 
@@ -1255,6 +1259,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
        if (err)
                return err;
 
+       if(!kern && !gr_search_socket(family, type, protocol)) {
+               if (rcu_access_pointer(net_families[family]) == NULL)
+                       return -EAFNOSUPPORT;
+               else
+                       return -EACCES;
+       }
+
+       if (!kern && gr_handle_sock_all(family, type, protocol)) {
+               if (rcu_access_pointer(net_families[family]) == NULL)
+                       return -EAFNOSUPPORT;
+               else
+                       return -EACCES;
+       }
+
        /*
         *      Allocate the socket and allow the family to set things up. if
         *      the protocol is 0, the family is instructed to select an appropriate
@@ -1506,6 +1524,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
        if (sock) {
                err = move_addr_to_kernel(umyaddr, addrlen, &address);
                if (err >= 0) {
+                       if (gr_handle_sock_server((struct sockaddr *)&address)) {
+                               err = -EACCES;
+                               goto error;
+                       }
+                       err = gr_search_bind(sock, (struct sockaddr_in *)&address);
+                       if (err)
+                               goto error;
+
                        err = security_socket_bind(sock,
                                                   (struct sockaddr *)&address,
                                                   addrlen);
@@ -1514,6 +1540,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
                                                      (struct sockaddr *)
                                                      &address, addrlen);
                }
+error:
                fput_light(sock->file, fput_needed);
        }
        return err;
@@ -1537,10 +1564,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
                if ((unsigned int)backlog > somaxconn)
                        backlog = somaxconn;
 
+               if (gr_handle_sock_server_other(sock->sk)) {
+                       err = -EPERM;
+                       goto error;
+               }
+
+               err = gr_search_listen(sock);
+               if (err)
+                       goto error;
+
                err = security_socket_listen(sock, backlog);
                if (!err)
                        err = sock->ops->listen(sock, backlog);
 
+error:
                fput_light(sock->file, fput_needed);
        }
        return err;
@@ -1584,6 +1621,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        newsock->type = sock->type;
        newsock->ops = sock->ops;
 
+       if (gr_handle_sock_server_other(sock->sk)) {
+               err = -EPERM;
+               sock_release(newsock);
+               goto out_put;
+       }
+
+       err = gr_search_accept(sock);
+       if (err) {
+               sock_release(newsock);
+               goto out_put;
+       }
+
        /*
         * We don't need try_module_get here, as the listening socket (sock)
         * has the protocol module (sock->ops->owner) held.
@@ -1629,6 +1678,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
        fd_install(newfd, newfile);
        err = newfd;
 
+       gr_attach_curr_ip(newsock->sk);
+
 out_put:
        fput_light(sock->file, fput_needed);
 out:
@@ -1661,6 +1712,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
                int, addrlen)
 {
        struct socket *sock;
+       struct sockaddr *sck;
        struct sockaddr_storage address;
        int err, fput_needed;
 
@@ -1671,6 +1723,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
        if (err < 0)
                goto out_put;
 
+       sck = (struct sockaddr *)&address;
+
+       if (gr_handle_sock_client(sck)) {
+               err = -EACCES;
+               goto out_put;
+       }
+
+       err = gr_search_connect(sock, (struct sockaddr_in *)sck);
+       if (err)
+               goto out_put;
+
        err =
            security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
        if (err)
@@ -1752,6 +1815,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
  *     the protocol.
  */
 
+asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int);
+
 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
                unsigned int, flags, struct sockaddr __user *, addr,
                int, addr_len)
@@ -1765,6 +1830,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
 
        if (len > INT_MAX)
                len = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+               return -EFAULT;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
@@ -1817,12 +1884,14 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
        struct socket *sock;
        struct iovec iov;
        struct msghdr msg;
-       struct sockaddr_storage address;
+       struct sockaddr_storage address = { };
        int err, err2;
        int fput_needed;
 
        if (size > INT_MAX)
                size = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+               return -EFAULT;
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
@@ -2065,7 +2134,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
                 * checking falls down on this.
                 */
                if (copy_from_user(ctl_buf,
-                                  (void __user __force *)msg_sys->msg_control,
+                                  (void __force_user *)msg_sys->msg_control,
                                   ctl_len))
                        goto out_freectl;
                msg_sys->msg_control = ctl_buf;
@@ -2216,7 +2285,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
        ssize_t err;
 
        /* kernel mode address */
-       struct sockaddr_storage addr;
+       struct sockaddr_storage addr = { };
 
        /* user mode address pointers */
        struct sockaddr __user *uaddr;
@@ -2862,7 +2931,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
        ifr = compat_alloc_user_space(buf_size);
        rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
 
-       if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+       if (copy_in_user(ifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
                return -EFAULT;
 
        if (put_user(convert_in ? rxnfc : compat_ptr(data),
@@ -2973,7 +3042,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
                old_fs = get_fs();
                set_fs(KERNEL_DS);
                err = dev_ioctl(net, cmd,
-                               (struct ifreq __user __force *) &kifr);
+                               (struct ifreq __force_user *) &kifr);
                set_fs(old_fs);
 
                return err;
@@ -3066,7 +3135,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
 
        old_fs = get_fs();
        set_fs(KERNEL_DS);
-       err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
+       err = dev_ioctl(net, cmd, (void  __force_user *)&ifr);
        set_fs(old_fs);
 
        if (cmd == SIOCGIFMAP && !err) {
@@ -3150,7 +3219,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
                ret |= get_user(rtdev, &(ur4->rt_dev));
                if (rtdev) {
                        ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
-                       r4.rt_dev = (char __user __force *)devname;
+                       r4.rt_dev = (char __force_user *)devname;
                        devname[15] = 0;
                } else
                        r4.rt_dev = NULL;
@@ -3377,8 +3446,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
        int __user *uoptlen;
        int err;
 
-       uoptval = (char __user __force *) optval;
-       uoptlen = (int __user __force *) optlen;
+       uoptval = (char __force_user *) optval;
+       uoptlen = (int __force_user *) optlen;
 
        set_fs(KERNEL_DS);
        if (level == SOL_SOCKET)
@@ -3398,7 +3467,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
        char __user *uoptval;
        int err;
 
-       uoptval = (char __user __force *) optval;
+       uoptval = (char __force_user *) optval;
 
        set_fs(KERNEL_DS);
        if (level == SOL_SOCKET)
index 224a82f24d3c75e60c702bd89215b7934ded1ea8..7a42b5168b5dc796bee79d1834595c6df1f49b58 100644 (file)
@@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
                                uint64_t *handle)
 {
        struct rsc rsci, *rscp = NULL;
-       static atomic64_t ctxhctr;
+       static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0);
        long long ctxh;
        struct gss_api_mech *gm = NULL;
        time_t expiry;
@@ -1151,7 +1151,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
        status = -ENOMEM;
        /* the handle needs to be just a unique id,
         * use a static counter */
-       ctxh = atomic64_inc_return(&ctxhctr);
+       ctxh = atomic64_inc_return_unchecked(&ctxhctr);
 
        /* make a copy for the caller */
        *handle = ctxh;
index 5199bb1a017e47b1b7503caf8075191dac1ade57..567a54ce9a02484ae90ef78b11c993c91500e43c 100644 (file)
@@ -1595,7 +1595,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
        struct sunrpc_net *sn;
 
        sn = net_generic(net, sunrpc_net_id);
-       cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
+       cd->u.procfs.proc_ent = proc_mkdir_restrict(cd->name, sn->proc_net_rpc);
        if (cd->u.procfs.proc_ent == NULL)
                goto out_nomem;
        cd->u.procfs.channel_ent = NULL;
index 3f5d4d48f0cbd28d407ce71f239bf2538c98bd0b..554f997d75a91033da70a65238900476193222a1 100644 (file)
@@ -1438,7 +1438,9 @@ call_start(struct rpc_task *task)
                        (RPC_IS_ASYNC(task) ? "async" : "sync"));
 
        /* Increment call count */
-       task->tk_msg.rpc_proc->p_count++;
+       pax_open_kernel();
+       (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
+       pax_close_kernel();
        clnt->cl_stats->rpccnt++;
        task->tk_action = call_reserve;
 }
index d20f2329eea3f4e05f5ff679368ed82a281ff991..ec9cd2894bdfffe5117ef9b3719d993e368ba00b 100644 (file)
@@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(struct wait_bit_key *key)
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 static void rpc_task_set_debuginfo(struct rpc_task *task)
 {
-       static atomic_t rpc_pid;
+       static atomic_unchecked_t rpc_pid;
 
-       task->tk_pid = atomic_inc_return(&rpc_pid);
+       task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
 }
 #else
 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
index 9711a155bc50071995402518d1ddd56a78b24a2a..fb0eb4d5b1f104cd8bc95aed7a2efed63ad470da 100644 (file)
@@ -278,7 +278,7 @@ int rpc_proc_init(struct net *net)
 
        dprintk("RPC:       registering /proc/net/rpc\n");
        sn = net_generic(net, sunrpc_net_id);
-       sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
+       sn->proc_net_rpc = proc_mkdir_restrict("rpc", net->proc_net);
        if (sn->proc_net_rpc == NULL)
                return -ENOMEM;
 
index 91eaef1844c8f09ba77d350786a346919df05725..08434d4cecf0e1e8c760db6ab125b224ce7fd5a1 100644 (file)
@@ -1169,7 +1169,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
        svc_putnl(resv, RPC_SUCCESS);
 
        /* Bump per-procedure stats counter */
-       procp->pc_count++;
+       pax_open_kernel();
+       (*(unsigned int *)&procp->pc_count)++;
+       pax_close_kernel();
 
        /* Initialize storage for argp and resp */
        memset(rqstp->rq_argp, 0, procp->pc_argsize);
index 621ca7b4a1552e393f332fda279be25d7f082152..59421dd30d138aff6439d3e1f075492d60846784 100644 (file)
@@ -414,7 +414,7 @@ struct unix_gid {
        struct group_info       *gi;
 };
 
-static int unix_gid_hash(kuid_t uid)
+static int __intentional_overflow(-1) unix_gid_hash(kuid_t uid)
 {
        return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
 }
@@ -470,7 +470,7 @@ static void unix_gid_request(struct cache_detail *cd,
        (*bpp)[-1] = '\n';
 }
 
-static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
+static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
 
 static int unix_gid_parse(struct cache_detail *cd,
                        char *mesg, int mlen)
index c1b6270262c297f8af8c902c40eec32aa6eae215..05089c12679e768a92ab5bc82ba0d33ee6914388 100644 (file)
@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
 static unsigned int min_max_inline = 4096;
 static unsigned int max_max_inline = 65536;
 
-atomic_t rdma_stat_recv;
-atomic_t rdma_stat_read;
-atomic_t rdma_stat_write;
-atomic_t rdma_stat_sq_starve;
-atomic_t rdma_stat_rq_starve;
-atomic_t rdma_stat_rq_poll;
-atomic_t rdma_stat_rq_prod;
-atomic_t rdma_stat_sq_poll;
-atomic_t rdma_stat_sq_prod;
+atomic_unchecked_t rdma_stat_recv;
+atomic_unchecked_t rdma_stat_read;
+atomic_unchecked_t rdma_stat_write;
+atomic_unchecked_t rdma_stat_sq_starve;
+atomic_unchecked_t rdma_stat_rq_starve;
+atomic_unchecked_t rdma_stat_rq_poll;
+atomic_unchecked_t rdma_stat_rq_prod;
+atomic_unchecked_t rdma_stat_sq_poll;
+atomic_unchecked_t rdma_stat_sq_prod;
 
 /* Temporary NFS request map and context caches */
 struct kmem_cache *svc_rdma_map_cachep;
@@ -110,7 +110,7 @@ static int read_reset_stat(struct ctl_table *table, int write,
                len -= *ppos;
                if (len > *lenp)
                        len = *lenp;
-               if (len && copy_to_user(buffer, str_buf, len))
+               if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
                        return -EFAULT;
                *lenp = len;
                *ppos += len;
@@ -151,63 +151,63 @@ static struct ctl_table svcrdma_parm_table[] = {
        {
                .procname       = "rdma_stat_read",
                .data           = &rdma_stat_read,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_recv",
                .data           = &rdma_stat_recv,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_write",
                .data           = &rdma_stat_write,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_sq_starve",
                .data           = &rdma_stat_sq_starve,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_rq_starve",
                .data           = &rdma_stat_rq_starve,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_rq_poll",
                .data           = &rdma_stat_rq_poll,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_rq_prod",
                .data           = &rdma_stat_rq_prod,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_sq_poll",
                .data           = &rdma_stat_sq_poll,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
        {
                .procname       = "rdma_stat_sq_prod",
                .data           = &rdma_stat_sq_prod,
-               .maxlen         = sizeof(atomic_t),
+               .maxlen         = sizeof(atomic_unchecked_t),
                .mode           = 0644,
                .proc_handler   = read_reset_stat,
        },
index e0110270d650cfe51ed47f4efaaad24c7de5b6f7..9d3c4e1f6aa9f1b27e797ee3c32e1a1c912025eb 100644 (file)
@@ -220,7 +220,7 @@ static int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
        *page_no = pg_no;
        *page_offset = pg_off;
        ret = read;
-       atomic_inc(&rdma_stat_read);
+       atomic_inc_unchecked(&rdma_stat_read);
        return ret;
  err:
        svc_rdma_unmap_dma(ctxt);
@@ -356,7 +356,7 @@ static int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
        *page_no = pg_no;
        *page_offset = pg_off;
        ret = read;
-       atomic_inc(&rdma_stat_read);
+       atomic_inc_unchecked(&rdma_stat_read);
        return ret;
  err:
        svc_rdma_unmap_dma(ctxt);
@@ -540,7 +540,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
                                  dto_q);
                list_del_init(&ctxt->dto_q);
        } else {
-               atomic_inc(&rdma_stat_rq_starve);
+               atomic_inc_unchecked(&rdma_stat_rq_starve);
                clear_bit(XPT_DATA, &xprt->xpt_flags);
                ctxt = NULL;
        }
@@ -559,7 +559,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
        dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
                ctxt, rdma_xprt, rqstp, ctxt->wc_status);
        BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
-       atomic_inc(&rdma_stat_recv);
+       atomic_inc_unchecked(&rdma_stat_recv);
 
        /* Build up the XDR from the receive buffers. */
        rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
index 9f1b50689c0f06e593652b6decde76f9a134ce28..2e0b32173be719bdc2511496735985e8d7a7945a 100644 (file)
@@ -208,7 +208,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
        write_wr.wr.rdma.remote_addr = to;
 
        /* Post It */
-       atomic_inc(&rdma_stat_write);
+       atomic_inc_unchecked(&rdma_stat_write);
        if (svc_rdma_send(xprt, &write_wr))
                goto err;
        return write_len - bc;
index 4e618808bc98d2960f16f4d195b2194e947374d0..1f0d96336f5cc724c0010a1177c4d7de4b64d70d 100644 (file)
@@ -295,7 +295,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
                return;
 
        ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
-       atomic_inc(&rdma_stat_rq_poll);
+       atomic_inc_unchecked(&rdma_stat_rq_poll);
 
        while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
                ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
@@ -317,7 +317,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
        }
 
        if (ctxt)
-               atomic_inc(&rdma_stat_rq_prod);
+               atomic_inc_unchecked(&rdma_stat_rq_prod);
 
        set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
        /*
@@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
                return;
 
        ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-       atomic_inc(&rdma_stat_sq_poll);
+       atomic_inc_unchecked(&rdma_stat_sq_poll);
        while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
                int i;
 
@@ -420,7 +420,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
        }
 
        if (ctxt)
-               atomic_inc(&rdma_stat_sq_prod);
+               atomic_inc_unchecked(&rdma_stat_sq_prod);
 }
 
 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -1264,7 +1264,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
                spin_lock_bh(&xprt->sc_lock);
                if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
                        spin_unlock_bh(&xprt->sc_lock);
-                       atomic_inc(&rdma_stat_sq_starve);
+                       atomic_inc_unchecked(&rdma_stat_sq_starve);
 
                        /* See if we can opportunistically reap SQ WR to make room */
                        sq_cq_reap(xprt);
index e7000be321b0148469264524ed6fce75c3952955..e3b0ba726df5d7d524f2ae9b4e13831ed2608a8b 100644 (file)
@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
        kgid_t root_gid = make_kgid(net->user_ns, 0);
 
        /* Allow network administrator to have same access as root. */
-       if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
+       if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
            uid_eq(root_uid, current_euid())) {
                int mode = (table->mode >> 6) & 7;
                return (mode << 6) | (mode << 3) | mode;
index 0344206b984f7f168a742726e8ad853940ff9cec..bff8abf0d33f62264a8beeb48886621d35222521 100644 (file)
@@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
        struct tipc_subscriber *subscriber = sub->subscriber;
        struct kvec msg_sect;
 
-       msg_sect.iov_base = (void *)&sub->evt;
+       msg_sect.iov_base = &sub->evt;
        msg_sect.iov_len = sizeof(struct tipc_event);
        sub->evt.event = htohl(event, sub->swap);
        sub->evt.found_lower = htohl(found_lower, sub->swap);
index 8e1b10274b02702345abba0b4b458d8319f2f841..900d70fcfc26a7129ce232d74b4a0095197f4788 100644 (file)
@@ -791,6 +791,12 @@ static struct sock *unix_find_other(struct net *net,
                err = -ECONNREFUSED;
                if (!S_ISSOCK(inode->i_mode))
                        goto put_fail;
+
+               if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
+                       err = -EACCES;
+                       goto put_fail;
+               }
+
                u = unix_find_socket_byinode(inode);
                if (!u)
                        goto put_fail;
@@ -811,6 +817,13 @@ static struct sock *unix_find_other(struct net *net,
                if (u) {
                        struct dentry *dentry;
                        dentry = unix_sk(u)->path.dentry;
+
+                       if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
+                               err = -EPERM;
+                               sock_put(u);
+                               goto fail;
+                       }
+
                        if (dentry)
                                touch_atime(&unix_sk(u)->path);
                } else
@@ -844,12 +857,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
         */
        err = security_path_mknod(&path, dentry, mode, 0);
        if (!err) {
+               if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
+                       err = -EACCES;
+                       goto out;
+               }
                err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
                if (!err) {
                        res->mnt = mntget(path.mnt);
                        res->dentry = dget(dentry);
+                       gr_handle_create(dentry, path.mnt);
                }
        }
+out:
        done_path_create(&path, dentry);
        return err;
 }
@@ -2248,11 +2267,14 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
        writable = unix_writable(sk);
        other = unix_peer_get(sk);
        if (other) {
-               if (unix_peer(other) != sk) {
+               unix_state_lock(other);
+               if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) != sk) {
+                       unix_state_unlock(other);
                        sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
                        if (unix_recvq_full(other))
                                writable = 0;
-               }
+               } else
+                       unix_state_unlock(other);
                sock_put(other);
        }
 
@@ -2349,9 +2371,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
                seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
                         "Inode Path\n");
        else {
-               struct sock *s = v;
+               struct sock *s = v, *peer;
                struct unix_sock *u = unix_sk(s);
                unix_state_lock(s);
+               peer = unix_peer(s);
+               unix_state_unlock(s);
+
+               unix_state_double_lock(s, peer);
 
                seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
                        s,
@@ -2378,8 +2404,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
                        }
                        for ( ; i < len; i++)
                                seq_putc(seq, u->addr->name->sun_path[i]);
-               }
-               unix_state_unlock(s);
+               } else if (peer)
+                       seq_printf(seq, " P%lu", sock_i_ino(peer));
+
+               unix_state_double_unlock(s, peer);
                seq_putc(seq, '\n');
        }
 
index b3d515021b74314f6859867f0a3d749450b2cf5d..ff3a8378bdcfc6db0846c404c530ac829567f911 100644 (file)
@@ -28,7 +28,7 @@ static struct ctl_table unix_table[] = {
 
 int __net_init unix_sysctl_register(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
        if (table == NULL)
index 9b7f207f2bee08770675f603a2ac20926937f06c..2596621533bda285d4c80aa9e6a2f154e786ec6d 100644 (file)
@@ -662,19 +662,19 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
 
 /* Socket control packet based operations. */
 struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
-       vmci_transport_notify_pkt_socket_init,
-       vmci_transport_notify_pkt_socket_destruct,
-       vmci_transport_notify_pkt_poll_in,
-       vmci_transport_notify_pkt_poll_out,
-       vmci_transport_notify_pkt_handle_pkt,
-       vmci_transport_notify_pkt_recv_init,
-       vmci_transport_notify_pkt_recv_pre_block,
-       vmci_transport_notify_pkt_recv_pre_dequeue,
-       vmci_transport_notify_pkt_recv_post_dequeue,
-       vmci_transport_notify_pkt_send_init,
-       vmci_transport_notify_pkt_send_pre_block,
-       vmci_transport_notify_pkt_send_pre_enqueue,
-       vmci_transport_notify_pkt_send_post_enqueue,
-       vmci_transport_notify_pkt_process_request,
-       vmci_transport_notify_pkt_process_negotiate,
+       .socket_init = vmci_transport_notify_pkt_socket_init,
+       .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+       .poll_in = vmci_transport_notify_pkt_poll_in,
+       .poll_out = vmci_transport_notify_pkt_poll_out,
+       .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+       .recv_init = vmci_transport_notify_pkt_recv_init,
+       .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+       .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+       .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+       .send_init = vmci_transport_notify_pkt_send_init,
+       .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+       .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+       .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+       .process_request = vmci_transport_notify_pkt_process_request,
+       .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
 };
index dc9c7929a2f959f533e198c6e0c178571f2abd9d..3089de06ccc82aa25af6b4404455ff18e1f6637b 100644 (file)
@@ -420,19 +420,19 @@ vmci_transport_notify_pkt_send_pre_enqueue(
 
 /* Socket always on control packet based operations. */
 struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
-       vmci_transport_notify_pkt_socket_init,
-       vmci_transport_notify_pkt_socket_destruct,
-       vmci_transport_notify_pkt_poll_in,
-       vmci_transport_notify_pkt_poll_out,
-       vmci_transport_notify_pkt_handle_pkt,
-       vmci_transport_notify_pkt_recv_init,
-       vmci_transport_notify_pkt_recv_pre_block,
-       vmci_transport_notify_pkt_recv_pre_dequeue,
-       vmci_transport_notify_pkt_recv_post_dequeue,
-       vmci_transport_notify_pkt_send_init,
-       vmci_transport_notify_pkt_send_pre_block,
-       vmci_transport_notify_pkt_send_pre_enqueue,
-       vmci_transport_notify_pkt_send_post_enqueue,
-       vmci_transport_notify_pkt_process_request,
-       vmci_transport_notify_pkt_process_negotiate,
+       .socket_init = vmci_transport_notify_pkt_socket_init,
+       .socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+       .poll_in = vmci_transport_notify_pkt_poll_in,
+       .poll_out = vmci_transport_notify_pkt_poll_out,
+       .handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+       .recv_init = vmci_transport_notify_pkt_recv_init,
+       .recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+       .recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+       .recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+       .send_init = vmci_transport_notify_pkt_send_init,
+       .send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+       .send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+       .send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+       .process_request = vmci_transport_notify_pkt_process_request,
+       .process_negotiate = vmci_transport_notify_pkt_process_negotiate,
 };
index c8717c1d082e702f9b071c480e873b408b400daf..08539f5ec2e42841e6b21716fd3c7a1a6f16e77c 100644 (file)
@@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                 */
 
                /* Support for very large requests */
-               if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
-                   (user_length > descr->max_tokens)) {
+               if (user_length > descr->max_tokens) {
                        /* Allow userspace to GET more than max so
                         * we can support any size GET requests.
                         * There is still a limit : -ENOMEM.
@@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                }
        }
 
-       if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
-               /*
-                * If this is a GET, but not NOMAX, it means that the extra
-                * data is not bounded by userspace, but by max_tokens. Thus
-                * set the length to max_tokens. This matches the extra data
-                * allocation.
-                * The driver should fill it with the number of tokens it
-                * provided, and it may check iwp->length rather than having
-                * knowledge of max_tokens. If the driver doesn't change the
-                * iwp->length, this ioctl just copies back max_token tokens
-                * filled with zeroes. Hopefully the driver isn't claiming
-                * them to be valid data.
-                */
-               iwp->length = descr->max_tokens;
-       }
-
        err = handler(dev, info, (union iwreq_data *) iwp, extra);
 
        iwp->length += essid_compat;
index 43239527a2058007242bd32ae4bf1891f6622252..a06dfe143c675039b57cdc5a83164228730d3e5e 100644 (file)
@@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = {
                .mode =         0644,
                .proc_handler = proc_dointvec,
        },
-       { 0, },
+       { },
 };
 
 void __init x25_register_sysctl(void)
index 0917f047f2cfbf7e2ba86c453187b40db0e0146b..f4e3d8c795cc13f6c78c5a1c19cd37c86f654c86 100644 (file)
@@ -209,7 +209,7 @@ static const struct file_operations x25_seq_forward_fops = {
 
 int __init x25_proc_init(void)
 {
-       if (!proc_mkdir("x25", init_net.proc_net))
+       if (!proc_mkdir_restrict("x25", init_net.proc_net))
                return -ENOMEM;
 
        if (!proc_create("x25/route", S_IRUGO, init_net.proc_net,
index cee479bc655c4f317edb4e90cbcb06ec3b424b81..0ea3567d2083ae15a779e4216c9c35b1e8f1317b 100644 (file)
@@ -331,7 +331,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 {
        policy->walk.dead = 1;
 
-       atomic_inc(&policy->genid);
+       atomic_inc_unchecked(&policy->genid);
 
        if (del_timer(&policy->polq.hold_timer))
                xfrm_pol_put(policy);
@@ -781,7 +781,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        else
                hlist_add_head(&policy->bydst, chain);
        __xfrm_policy_link(policy, dir);
-       atomic_inc(&net->xfrm.flow_cache_genid);
+       atomic_inc_unchecked(&net->xfrm.flow_cache_genid);
 
        /* After previous checking, family can either be AF_INET or AF_INET6 */
        if (policy->family == AF_INET)
@@ -1877,7 +1877,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
        xdst->num_pols = num_pols;
        memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
-       xdst->policy_genid = atomic_read(&pols[0]->genid);
+       xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
 
        return xdst;
 }
@@ -2690,10 +2690,11 @@ void xfrm_garbage_collect(struct net *net)
 }
 EXPORT_SYMBOL(xfrm_garbage_collect);
 
-static void xfrm_garbage_collect_deferred(struct net *net)
+void xfrm_garbage_collect_deferred(struct net *net)
 {
        flow_cache_flush_deferred(net);
 }
+EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
 
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
@@ -2743,7 +2744,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
                if (xdst->xfrm_genid != dst->xfrm->genid)
                        return 0;
                if (xdst->num_pols > 0 &&
-                   xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
+                   xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
                        return 0;
 
                mtu = dst_mtu(dst->child);
@@ -2831,8 +2832,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->link_failure = xfrm_link_failure;
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
-               if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
        }
        spin_unlock(&xfrm_policy_afinfo_lock);
@@ -2886,7 +2885,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
                dst_ops->check = NULL;
                dst_ops->negative_advice = NULL;
                dst_ops->link_failure = NULL;
-               afinfo->garbage_collect = NULL;
        }
        return err;
 }
@@ -3283,7 +3281,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
                               sizeof(pol->xfrm_vec[i].saddr));
                        pol->xfrm_vec[i].encap_family = mp->new_family;
                        /* flush bundles */
-                       atomic_inc(&pol->genid);
+                       atomic_inc_unchecked(&pol->genid);
                }
        }
 
index de971b6d38c58310f744988bb970ccb5438ef39b..b843409e30a72d17d5044be7afa1370dbbf4d3a5 100644 (file)
@@ -166,12 +166,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
 
        if (unlikely(afinfo == NULL))
                return -EAFNOSUPPORT;
-       typemap = afinfo->type_map;
+       typemap = (const struct xfrm_type **)afinfo->type_map;
        spin_lock_bh(&xfrm_type_lock);
 
-       if (likely(typemap[type->proto] == NULL))
+       if (likely(typemap[type->proto] == NULL)) {
+               pax_open_kernel();
                typemap[type->proto] = type;
-       else
+               pax_close_kernel();
+       } else
                err = -EEXIST;
        spin_unlock_bh(&xfrm_type_lock);
        xfrm_state_put_afinfo(afinfo);
@@ -187,13 +189,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
 
        if (unlikely(afinfo == NULL))
                return -EAFNOSUPPORT;
-       typemap = afinfo->type_map;
+       typemap = (const struct xfrm_type **)afinfo->type_map;
        spin_lock_bh(&xfrm_type_lock);
 
        if (unlikely(typemap[type->proto] != type))
                err = -ENOENT;
-       else
+       else {
+               pax_open_kernel();
                typemap[type->proto] = NULL;
+               pax_close_kernel();
+       }
        spin_unlock_bh(&xfrm_type_lock);
        xfrm_state_put_afinfo(afinfo);
        return err;
@@ -203,7 +208,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
 {
        struct xfrm_state_afinfo *afinfo;
-       const struct xfrm_type **typemap;
        const struct xfrm_type *type;
        int modload_attempted = 0;
 
@@ -211,9 +215,8 @@ retry:
        afinfo = xfrm_state_get_afinfo(family);
        if (unlikely(afinfo == NULL))
                return NULL;
-       typemap = afinfo->type_map;
 
-       type = typemap[proto];
+       type = afinfo->type_map[proto];
        if (unlikely(type && !try_module_get(type->owner)))
                type = NULL;
        if (!type && !modload_attempted) {
@@ -247,7 +250,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
                return -EAFNOSUPPORT;
 
        err = -EEXIST;
-       modemap = afinfo->mode_map;
+       modemap = (struct xfrm_mode **)afinfo->mode_map;
        spin_lock_bh(&xfrm_mode_lock);
        if (modemap[mode->encap])
                goto out;
@@ -256,8 +259,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
        if (!try_module_get(afinfo->owner))
                goto out;
 
-       mode->afinfo = afinfo;
+       pax_open_kernel();
+       *(const void **)&mode->afinfo = afinfo;
        modemap[mode->encap] = mode;
+       pax_close_kernel();
        err = 0;
 
 out:
@@ -281,10 +286,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
                return -EAFNOSUPPORT;
 
        err = -ENOENT;
-       modemap = afinfo->mode_map;
+       modemap = (struct xfrm_mode **)afinfo->mode_map;
        spin_lock_bh(&xfrm_mode_lock);
        if (likely(modemap[mode->encap] == mode)) {
+               pax_open_kernel();
                modemap[mode->encap] = NULL;
+               pax_close_kernel();
                module_put(mode->afinfo->owner);
                err = 0;
        }
@@ -1505,10 +1512,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
 u32 xfrm_get_acqseq(void)
 {
        u32 res;
-       static atomic_t acqseq;
+       static atomic_unchecked_t acqseq;
 
        do {
-               res = atomic_inc_return(&acqseq);
+               res = atomic_inc_return_unchecked(&acqseq);
        } while (!res);
 
        return res;
index 05a6e3d9c258c0815e870db6660fda8e4802d31f..6716ec93cc51387a797d0ed957ce44168349da8b 100644 (file)
@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
 
 int __net_init xfrm_sysctl_init(struct net *net)
 {
-       struct ctl_table *table;
+       ctl_table_no_const *table;
 
        __xfrm_sysctl_init(net);
 
index edd2794569db96a052579b3700b30ac9335510a4..73c7db9663b3154596616fdafeb79a424d8c040d 100644 (file)
@@ -144,7 +144,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 cc-ldoption = $(call try-run,\
-       $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+       $(CC) $(1) -Wl,-r -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # ld-option
 # Usage: LDFLAGS += $(call ld-option, -X)
index 649ce68440331cdd5c5446ac76e01a487d2bcf6a..f6bc05cba82bd5030c0b07d48e8f8ffff14d5e79 100644 (file)
@@ -60,7 +60,7 @@ endif
 endif
 
 # Do not include host rules unless needed
-ifneq ($(hostprogs-y)$(hostprogs-m),)
+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
 include scripts/Makefile.host
 endif
 
index 627f8cbbedb88ca29667bbf1f88eb2004d5ee461..dbe0bcac48b08630a2658a4366f528f239c286f6 100644 (file)
@@ -38,7 +38,8 @@ subdir-ymn    := $(addprefix $(obj)/,$(subdir-ymn))
 __clean-files  := $(extra-y) $(extra-m) $(extra-)       \
                   $(always) $(targets) $(clean-files)   \
                   $(host-progs)                         \
-                  $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
+                  $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
+                  $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
index 133edfae5b8a8d7a66f59b9c1ef3130cb63eb928..c9aa07fd65f114a853117e88b59c10fa941c23b7 100644 (file)
 # Will compile qconf as a C++ program, and menu as a C program.
 # They are linked as C++ code to the executable qconf
 
+# hostprogs-y := conf
+# conf-objs  := conf.o libkconfig.so
+# libkconfig-objs := expr.o type.o
+# Will create a shared library named libkconfig.so that consists of
+# expr.o and type.o (they are both compiled as C code and the object files
+# are made as position independent code).
+# conf.c is compiled as a C program, and conf.o is linked together with
+# libkconfig.so as the executable conf.
+# Note: Shared libraries consisting of C++ files are not supported
+
 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
+__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
+__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
 
 # C code
 # Executables compiled from a single .c file
@@ -42,6 +54,19 @@ host-cxxmulti        := $(foreach m,$(__hostprogs),$(if $($(m)-cxxobjs),$(m)))
 # C++ Object (.o) files compiled from .cc files
 host-cxxobjs   := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
 
+# Shared libaries (only .c supported)
+# Shared libraries (.so) - all .so files referenced in "xxx-objs"
+host-cshlib    := $(sort $(filter %.so, $(host-cobjs)))
+host-cshlib    += $(sort $(filter %.so, $(__hostlibs)))
+host-cxxshlib  := $(sort $(filter %.so, $(__hostcxxlibs)))
+# Remove .so files from "xxx-objs"
+host-cobjs     := $(filter-out %.so,$(host-cobjs))
+host-cxxobjs   := $(filter-out %.so,$(host-cxxobjs))
+
+# Object (.o) files used by the shared libaries
+host-cshobjs   := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
+host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
+
 # output directory for programs/.o files
 # hostprogs-y := tools/build may have been specified.
 # Retrieve also directory of .o files from prog-objs or prog-cxxobjs notation
@@ -56,6 +81,10 @@ host-cmulti  := $(addprefix $(obj)/,$(host-cmulti))
 host-cobjs     := $(addprefix $(obj)/,$(host-cobjs))
 host-cxxmulti  := $(addprefix $(obj)/,$(host-cxxmulti))
 host-cxxobjs   := $(addprefix $(obj)/,$(host-cxxobjs))
+host-cshlib    := $(addprefix $(obj)/,$(host-cshlib))
+host-cxxshlib  := $(addprefix $(obj)/,$(host-cxxshlib))
+host-cshobjs   := $(addprefix $(obj)/,$(host-cshobjs))
+host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
 host-objdirs    := $(addprefix $(obj)/,$(host-objdirs))
 
 obj-dirs += $(host-objdirs)
@@ -124,5 +153,37 @@ quiet_cmd_host-cxxobjs     = HOSTCXX $@
 $(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
        $(call if_changed_dep,host-cxxobjs)
 
+# Compile .c file, create position independent .o file
+# host-cshobjs -> .o
+quiet_cmd_host-cshobjs = HOSTCC  -fPIC $@
+      cmd_host-cshobjs = $(HOSTCC) $(hostc_flags) -fPIC -c -o $@ $<
+$(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
+       $(call if_changed_dep,host-cshobjs)
+
+# Compile .c file, create position independent .o file
+# host-cxxshobjs -> .o
+quiet_cmd_host-cxxshobjs       = HOSTCXX -fPIC $@
+      cmd_host-cxxshobjs       = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
+$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
+       $(call if_changed_dep,host-cxxshobjs)
+
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cshlib)
+quiet_cmd_host-cshlib  = HOSTLLD -shared $@
+      cmd_host-cshlib  = $(HOSTCC) $(HOSTLDFLAGS) -shared -o $@ \
+                         $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+                         $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
+       $(call if_changed,host-cshlib)
+
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cxxshlib)
+quiet_cmd_host-cxxshlib        = HOSTLLD -shared $@
+      cmd_host-cxxshlib        = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
+                         $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+                         $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
+       $(call if_changed,host-cxxshlib)
+
 targets += $(host-csingle)  $(host-cmulti) $(host-cobjs)\
-          $(host-cxxmulti) $(host-cxxobjs)
+          $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
index b30406860b7397881e4e6ffeedcda1f5b2959cc0..462d24e648ad7f5c1a4faf571ae4bc3348418d83 100644 (file)
@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
 /*
  * Lookup a value in the configuration string.
  */
-static int is_defined_config(const char *name, int len, unsigned int hash)
+static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
 {
        struct item *aux;
 
@@ -211,10 +211,10 @@ static void clear_config(void)
 /*
  * Record the use of a CONFIG_* word.
  */
-static void use_config(const char *m, int slen)
+static void use_config(const char *m, unsigned int slen)
 {
        unsigned int hash = strhash(m, slen);
-       int c, i;
+       unsigned int c, i;
 
        if (is_defined_config(m, slen, hash))
            return;
@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
 
 static void parse_config_file(const char *map, size_t len)
 {
-       const int *end = (const int *) (map + len);
+       const unsigned int *end = (const unsigned int *) (map + len);
        /* start at +1, so that p can never be < map */
-       const int *m   = (const int *) map + 1;
+       const unsigned int *m   = (const unsigned int *) map + 1;
        const char *p, *q;
 
        for (; m < end; m++) {
@@ -435,7 +435,7 @@ static void print_deps(void)
 static void traps(void)
 {
        static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
-       int *p = (int *)test;
+       unsigned int *p = (unsigned int *)test;
 
        if (*p != INT_CONF) {
                fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
index fdebd66f8fc16c2db0189480318c5f8599aebdf0..a349e33a53ad5448fc00a02b1fb24c66a1dd3922 100755 (executable)
@@ -32,6 +32,7 @@ do
        FILE="$(basename "$i")"
        sed -r \
                -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
+               -e 's/__intentional_overflow\([- \t,0-9]*\)//g' \
                -e 's/__attribute_const__([ \t]|$)/\1/g' \
                -e 's@^#include <linux/compiler.h>@@' \
                -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
index 86a4fe75f453735936e3b218f885dcd887216659..99e91f9005a65c49fc9738a924c4176b16d867a6 100755 (executable)
@@ -166,7 +166,7 @@ else
 fi;
 
 # final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
 
 kallsymso=""
 kallsyms_vmlinux=""
index e614ef689eee1f16dc19daf1c00be4460a184f63..d9d2b01971c7a5ac1690331da4514caceffe636f 100644 (file)
@@ -142,7 +142,7 @@ static void device_id_check(const char *modname, const char *device_id,
                            unsigned long size, unsigned long id_size,
                            void *symval)
 {
-       int i;
+       unsigned int i;
 
        if (size % id_size || size < id_size) {
                fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
@@ -171,7 +171,7 @@ static void device_id_check(const char *modname, const char *device_id,
 /* USB is special because the bcdDevice can be matched against a numeric range */
 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
 static void do_usb_entry(void *symval,
-                        unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
+                        unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
                         unsigned char range_lo, unsigned char range_hi,
                         unsigned char max, struct module *mod)
 {
@@ -281,7 +281,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod)
 {
        unsigned int devlo, devhi;
        unsigned char chi, clo, max;
-       int ndigits;
+       unsigned int ndigits;
 
        DEF_FIELD(symval, usb_device_id, match_flags);
        DEF_FIELD(symval, usb_device_id, idVendor);
@@ -534,7 +534,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
        for (i = 0; i < count; i++) {
                DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
                char acpi_id[sizeof(*id)];
-               int j;
+               unsigned int j;
 
                buf_printf(&mod->dev_table_buf,
                           "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
@@ -563,7 +563,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
 
                for (j = 0; j < PNP_MAX_DEVICES; j++) {
                        const char *id = (char *)(*devs)[j].id;
-                       int i2, j2;
+                       unsigned int i2, j2;
                        int dup = 0;
 
                        if (!id[0])
@@ -589,7 +589,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
                        /* add an individual alias for every device entry */
                        if (!dup) {
                                char acpi_id[PNP_ID_LEN];
-                               int k;
+                               unsigned int k;
 
                                buf_printf(&mod->dev_table_buf,
                                           "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
@@ -939,7 +939,7 @@ static void dmi_ascii_filter(char *d, const char *s)
 static int do_dmi_entry(const char *filename, void *symval,
                        char *alias)
 {
-       int i, j;
+       unsigned int i, j;
        DEF_FIELD_ADDR(symval, dmi_system_id, matches);
        sprintf(alias, "dmi*");
 
index d439856f81760b7e12497e2167d641120ccb4a39..10c1eacf774930915fd1b4ad239578b1f091863a 100644 (file)
@@ -921,6 +921,7 @@ enum mismatch {
        ANY_INIT_TO_ANY_EXIT,
        ANY_EXIT_TO_ANY_INIT,
        EXPORT_TO_INIT_EXIT,
+       DATA_TO_TEXT
 };
 
 struct sectioncheck {
@@ -1007,6 +1008,12 @@ static const struct sectioncheck sectioncheck[] = {
        .tosec   = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
        .mismatch = EXPORT_TO_INIT_EXIT,
        .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
+},
+/* Do not reference code from writable data */
+{
+       .fromsec = { DATA_SECTIONS, NULL },
+       .tosec   = { TEXT_SECTIONS, NULL },
+       .mismatch = DATA_TO_TEXT
 }
 };
 
@@ -1127,10 +1134,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
                        continue;
                if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
                        continue;
-               if (sym->st_value == addr)
-                       return sym;
                /* Find a symbol nearby - addr are maybe negative */
                d = sym->st_value - addr;
+               if (d == 0)
+                       return sym;
                if (d < 0)
                        d = addr - sym->st_value;
                if (d < distance) {
@@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const char *modname,
                tosym, prl_to, prl_to, tosym);
                free(prl_to);
                break;
+       case DATA_TO_TEXT:
+#if 0
+               fprintf(stderr,
+               "The %s %s:%s references\n"
+               "the %s %s:%s%s\n",
+               from, fromsec, fromsym, to, tosec, tosym, to_p);
+#endif
+               break;
        }
        fprintf(stderr, "\n");
 }
@@ -1659,7 +1674,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
 static void check_sec_ref(struct module *mod, const char *modname,
                          struct elf_info *elf)
 {
-       int i;
+       unsigned int i;
        Elf_Shdr *sechdrs = elf->sechdrs;
 
        /* Walk through all sections */
@@ -1790,7 +1805,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
        va_end(ap);
 }
 
-void buf_write(struct buffer *buf, const char *s, int len)
+void buf_write(struct buffer *buf, const char *s, unsigned int len)
 {
        if (buf->size - buf->pos < len) {
                buf->size += len + SZ;
@@ -2009,7 +2024,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
        if (fstat(fileno(file), &st) < 0)
                goto close_write;
 
-       if (st.st_size != b->pos)
+       if (st.st_size != (off_t)b->pos)
                goto close_write;
 
        tmp = NOFAIL(malloc(b->pos));
index 168b43dc0a59b6be4edea0fc5768fb2d63cfa440..77914bff63bcf095b998d39a0932bc2bcb2961ce 100644 (file)
@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
 
 struct buffer {
        char *p;
-       int pos;
-       int size;
+       unsigned int pos;
+       unsigned int size;
 };
 
 void __attribute__((format(printf, 2, 3)))
 buf_printf(struct buffer *buf, const char *fmt, ...);
 
 void
-buf_write(struct buffer *buf, const char *s, int len);
+buf_write(struct buffer *buf, const char *s, unsigned int len);
 
 struct module {
        struct module *next;
index 944418da9fe3369efd596e44fd53ffaffc0f80bd..15291e4e3493c1494c7a04fd842f4d0937d32a12 100644 (file)
@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
                goto out;
        }
 
-       if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
+       if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
                warn("writing sum in %s failed: %s\n",
                        filename, strerror(errno));
                goto out;
index 0865b3e752be8f99b61c927d5cff2a398dd6f8dd..7235dd40ddce2944e5579307eb38e72cbfe554f0 100644 (file)
@@ -6,6 +6,10 @@
 SECTIONS {
        /DISCARD/ : { *(.discard) }
 
+       .rodata : {
+               *(.rodata) *(.rodata.*)
+               *(.data..read_only)
+       }
        __ksymtab               : { *(SORT(___ksymtab+*)) }
        __ksymtab_gpl           : { *(SORT(___ksymtab_gpl+*)) }
        __ksymtab_unused        : { *(SORT(___ksymtab_unused+*)) }
index 99ca6e76eb0a532ffafd0918dca8fa217c88f571..3a1a1a13ced5571791c449b5abf8cac5064e5710 100644 (file)
@@ -46,7 +46,7 @@ rpm-pkg rpm: FORCE
        ln -sf $(srctree) $(KERNELPATH)
        $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
        $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
-       tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(TAR_CONTENT)
+       tar --owner=root --group=root -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(TAR_CONTENT)
        rm $(KERNELPATH)
        rm -f $(objtree)/.scmversion
        $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
index 59726243c2ebab1a9263019a5ff8eb759b2fdda7..4028795f9b1fe19630a09c0550c1b26e36fc2882 100755 (executable)
@@ -293,6 +293,7 @@ fi
 (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
 (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
+(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
index d9ab94b17de0bc119a6fbf3958886d1735085fdf..eb7f04cf54bff78d0be35b0eeac2a739ad4582bc 100755 (executable)
@@ -122,30 +122,41 @@ echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
 echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
 echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
 echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
-echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
-echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
-echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
 fi
 
 echo ""
 echo "%clean"
 echo 'rm -rf $RPM_BUILD_ROOT'
 echo ""
+echo "%pre"
+echo 'chmod -f 0500 /boot'
+echo 'if [ -d /lib/modules ]; then'
+echo 'chmod -f 0500 /lib/modules'
+echo 'fi'
+echo 'if [ -d /lib32/modules ]; then'
+echo 'chmod -f 0500 /lib32/modules'
+echo 'fi'
+echo 'if [ -d /lib64/modules ]; then'
+echo 'chmod -f 0500 /lib64/modules'
+echo 'fi'
+echo ""
+echo "%post devel"
+echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/build"
+echo "ln -sf /usr/src/kernels/$KERNELRELEASE /lib/modules/$KERNELRELEASE/source"
+echo ""
 echo "%post"
-echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+echo "if [ -x /sbin/dracut ]; then"
+echo '/sbin/new-kernel-pkg --dracut --mkinitrd --depmod --install --make-default '"$KERNELRELEASE"' || exit $?'
+echo "else"
+echo '/sbin/new-kernel-pkg --mkinitrd --depmod --install --make-default '"$KERNELRELEASE"' || exit $?'
 echo "fi"
 echo ""
 echo "%files"
-echo '%defattr (-, root, root)'
+echo '%defattr (400, root, root, 500)'
 echo "%dir /lib/modules"
-echo "/lib/modules/$KERNELRELEASE"
 echo "%exclude /lib/modules/$KERNELRELEASE/build"
 echo "%exclude /lib/modules/$KERNELRELEASE/source"
+echo "/lib/modules/$KERNELRELEASE"
 echo "/lib/firmware/$KERNELRELEASE"
 echo "/boot/*"
 echo ""
@@ -155,9 +166,8 @@ echo "/usr/include"
 echo ""
 if ! $PREBUILT; then
 echo "%files devel"
-echo '%defattr (-, root, root)'
+echo '%defattr (400, root, root, 500)'
+echo "%dir /lib/modules/$KERNELRELEASE"
 echo "/usr/src/kernels/$KERNELRELEASE"
-echo "/lib/modules/$KERNELRELEASE/build"
-echo "/lib/modules/$KERNELRELEASE/source"
 echo ""
 fi
index 4718d7895f0b451e4a529a4e1fcce6a85ec5a4c5..9220d58992d2da81b3e9a77a6f0ada00402e1632 100644 (file)
@@ -244,14 +244,14 @@ static void write_header(void)
     fprintf(out, " *  Linux logo %s\n", logoname);
     fputs(" */\n\n", out);
     fputs("#include <linux/linux_logo.h>\n\n", out);
-    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+    fprintf(out, "static unsigned char %s_data[] = {\n",
            logoname);
 }
 
 static void write_footer(void)
 {
     fputs("\n};\n\n", out);
-    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+    fprintf(out, "const struct linux_logo %s = {\n", logoname);
     fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
     fprintf(out, "\t.width\t\t= %d,\n", logo_width);
     fprintf(out, "\t.height\t\t= %d,\n", logo_height);
@@ -381,7 +381,7 @@ static void write_logo_clut224(void)
     fputs("\n};\n\n", out);
 
     /* write logo clut */
-    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+    fprintf(out, "static unsigned char %s_clut[] = {\n",
            logoname);
     write_hex_cnt = 0;
     for (i = 0; i < logo_clutsize; i++) {
index ba8700428e21dd1612d8c6ae4ddde0e0e964433e..3f4852cdc85cb2c83d9b2e2904cb61e2f2e978f6 100644 (file)
@@ -108,9 +108,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
        const char *secstrtab;
        const char *strtab;
        char *extab_image;
-       int extab_index = 0;
-       int i;
-       int idx;
+       unsigned int extab_index = 0;
+       unsigned int i;
+       unsigned int idx;
        unsigned int num_sections;
        unsigned int secindex_strings;
 
index cdb491d845035e59bff19a5cde7f1ca84c28c17f..8d32bfc2a959dcf541f934d0da19eb31efc872ff 100755 (executable)
@@ -26,7 +26,7 @@ else
 fi
 
 # ignore userspace tools
-ignore="$ignore ( -path ${tree}tools ) -prune -o"
+ignore="$ignore ( -path \"${tree}tools/[^g]*\" ) -prune -o"
 
 # Find all available archs
 find_all_archs()
index beb86b500adffd406e65b26d0cf9abeab3891703..653742f85748488034564a6bd7d8e4f17fd81f32 100644 (file)
@@ -4,6 +4,975 @@
 
 menu "Security options"
 
+menu "Grsecurity"
+
+       config ARCH_TRACK_EXEC_LIMIT
+       bool
+
+       config PAX_KERNEXEC_PLUGIN
+       bool
+
+       config PAX_PER_CPU_PGD
+       bool
+
+       config TASK_SIZE_MAX_SHIFT
+       int
+       depends on X86_64
+       default 47 if !PAX_PER_CPU_PGD
+       default 42 if PAX_PER_CPU_PGD
+
+       config PAX_ENABLE_PAE
+       bool
+       default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
+       
+       config PAX_USERCOPY_SLABS
+       bool
+
+config GRKERNSEC
+       bool "Grsecurity"
+       select CRYPTO
+       select CRYPTO_SHA256
+       select PROC_FS
+       select STOP_MACHINE
+       select TTY
+       select DEBUG_KERNEL
+       select DEBUG_LIST
+       help
+         If you say Y here, you will be able to configure many features
+         that will enhance the security of your system.  It is highly
+         recommended that you say Y here and read through the help
+         for each option so that you fully understand the features and
+         can evaluate their usefulness for your machine.
+
+choice
+       prompt "Configuration Method"
+       depends on GRKERNSEC
+       default GRKERNSEC_CONFIG_CUSTOM
+       help
+
+config GRKERNSEC_CONFIG_AUTO
+       bool "Automatic"
+       help
+         If you choose this configuration method, you'll be able to answer a small
+         number of simple questions about how you plan to use this kernel.
+         The settings of grsecurity and PaX will be automatically configured for
+         the highest commonly-used settings within the provided constraints.
+
+         If you require additional configuration, custom changes can still be made
+         from the "custom configuration" menu.
+
+config GRKERNSEC_CONFIG_CUSTOM
+       bool "Custom"
+       help
+         If you choose this configuration method, you'll be able to configure all
+         grsecurity and PaX settings manually.  Via this method, no options are
+         automatically enabled.
+
+endchoice
+
+choice
+       prompt "Usage Type"
+       depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+       default GRKERNSEC_CONFIG_SERVER
+       help
+
+config GRKERNSEC_CONFIG_SERVER
+       bool "Server"
+       help
+         Choose this option if you plan to use this kernel on a server.
+
+config GRKERNSEC_CONFIG_DESKTOP
+       bool "Desktop"
+       help
+         Choose this option if you plan to use this kernel on a desktop.
+
+endchoice
+
+choice
+       prompt "Virtualization Type"
+       depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
+       default GRKERNSEC_CONFIG_VIRT_NONE
+       help
+
+config GRKERNSEC_CONFIG_VIRT_NONE
+       bool "None"
+       help
+         Choose this option if this kernel will be run on bare metal.
+
+config GRKERNSEC_CONFIG_VIRT_GUEST
+       bool "Guest"
+       help
+         Choose this option if this kernel will be run as a VM guest.
+
+config GRKERNSEC_CONFIG_VIRT_HOST
+       bool "Host"
+       help
+         Choose this option if this kernel will be run as a VM host.
+
+endchoice
+
+choice
+       prompt "Virtualization Hardware"
+       depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
+       help
+
+config GRKERNSEC_CONFIG_VIRT_EPT
+       bool "EPT/RVI Processor Support"
+       depends on X86
+       help
+         Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
+         hardware virtualization.  This allows for additional kernel hardening protections
+         to operate without additional performance impact.
+
+         To see if your Intel processor supports EPT, see:
+         http://ark.intel.com/Products/VirtualizationTechnology
+         (Most Core i3/5/7 support EPT)
+
+         To see if your AMD processor supports RVI, see:
+         http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
+
+config GRKERNSEC_CONFIG_VIRT_SOFT
+       bool "First-gen/No Hardware Virtualization"
+       help
+         Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
+         support hardware virtualization or doesn't support the EPT/RVI extensions.
+
+endchoice
+
+choice
+       prompt "Virtualization Software"
+       depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
+       help
+
+config GRKERNSEC_CONFIG_VIRT_XEN
+       bool "Xen"
+       help
+         Choose this option if this kernel is running as a Xen guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_VMWARE
+       bool "VMWare"
+       help
+         Choose this option if this kernel is running as a VMWare guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_KVM
+       bool "KVM"
+       help
+         Choose this option if this kernel is running as a KVM guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
+       bool "VirtualBox"
+       help
+         Choose this option if this kernel is running as a VirtualBox guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_HYPERV
+       bool "Hyper-V"
+       help
+         Choose this option if this kernel is running as a Hyper-V guest.
+
+endchoice
+
+choice
+       prompt "Required Priorities"
+       depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+       default GRKERNSEC_CONFIG_PRIORITY_PERF
+       help
+
+config GRKERNSEC_CONFIG_PRIORITY_PERF
+       bool "Performance"
+       help
+         Choose this option if performance is of highest priority for this deployment
+         of grsecurity.  Features like UDEREF on a 64bit kernel, kernel stack clearing,
+         clearing of structures intended for userland, and freed memory sanitizing will
+         be disabled.
+
+config GRKERNSEC_CONFIG_PRIORITY_SECURITY
+       bool "Security"
+       help
+         Choose this option if security is of highest priority for this deployment of
+         grsecurity.  UDEREF, kernel stack clearing, clearing of structures intended
+         for userland, and freed memory sanitizing will be enabled for this kernel.
+         In a worst-case scenario, these features can introduce a 20% performance hit
+         (UDEREF on x64 contributing half of this hit).
+
+endchoice
+
+menu "Default Special Groups"
+depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+
+config GRKERNSEC_PROC_GID
+       int "GID exempted from /proc restrictions"
+       default 1001
+       help
+         Setting this GID determines which group will be exempted from
+         grsecurity's /proc restrictions, allowing users of the specified
+         group  to view network statistics and the existence of other users'
+         processes on the system.  This GID may also be chosen at boot time
+         via "grsec_proc_gid=" on the kernel commandline.
+
+config GRKERNSEC_TPE_UNTRUSTED_GID
+        int "GID for TPE-untrusted users"
+        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+        default 1005
+        help
+         Setting this GID determines which group untrusted users should
+         be added to.  These users will be placed under grsecurity's Trusted Path
+         Execution mechanism, preventing them from executing their own binaries.
+         The users will only be able to execute binaries in directories owned and
+         writable only by the root user.  If the sysctl option is enabled, a sysctl
+         option with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_TRUSTED_GID
+        int "GID for TPE-trusted users"
+        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+        default 1005
+        help
+          Setting this GID determines what group TPE restrictions will be
+          *disabled* for.  If the sysctl option is enabled, a sysctl option
+          with name "tpe_gid" is created.
+
+config GRKERNSEC_SYMLINKOWN_GID
+        int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
+        depends on GRKERNSEC_CONFIG_SERVER
+        default 1006
+        help
+          Setting this GID determines what group kernel-enforced
+          SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
+          is enabled, a sysctl option with name "symlinkown_gid" is created.
+
+
+endmenu
+
+menu "Customize Configuration"
+depends on GRKERNSEC
+
+menu "PaX"
+
+config PAX
+       bool "Enable various PaX features"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
+       help
+         This allows you to enable various PaX features.  PaX adds
+         intrusion prevention mechanisms to the kernel that reduce
+         the risks posed by exploitable memory corruption bugs.
+
+menu "PaX Control"
+       depends on PAX
+
+config PAX_SOFTMODE
+       bool 'Support soft mode'
+       help
+         Enabling this option will allow you to run PaX in soft mode, that
+         is, PaX features will not be enforced by default, only on executables
+         marked explicitly.  You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
+         support as they are the only way to mark executables for soft mode use.
+
+         Soft mode can be activated by using the "pax_softmode=1" kernel command
+         line option on boot.  Furthermore you can control various PaX features
+         at runtime via the entries in /proc/sys/kernel/pax.
+
+config PAX_EI_PAX
+       bool 'Use legacy ELF header marking'
+       default y if GRKERNSEC_CONFIG_AUTO
+       help
+         Enabling this option will allow you to control PaX features on
+         a per executable basis via the 'chpax' utility available at
+         http://pax.grsecurity.net/.  The control flags will be read from
+         an otherwise reserved part of the ELF header.  This marking has
+         numerous drawbacks (no support for soft-mode, toolchain does not
+         know about the non-standard use of the ELF header) therefore it
+         has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
+         support.
+
+         Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
+         support as well, they will override the legacy EI_PAX marks.
+
+         If you enable none of the marking options then all applications
+         will run with PaX enabled on them by default.
+
+config PAX_PT_PAX_FLAGS
+       bool 'Use ELF program header marking'
+       default y if GRKERNSEC_CONFIG_AUTO
+       help
+         Enabling this option will allow you to control PaX features on
+         a per executable basis via the 'paxctl' utility available at
+         http://pax.grsecurity.net/.  The control flags will be read from
+         a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
+         has the benefits of supporting both soft mode and being fully
+         integrated into the toolchain (the binutils patch is available
+         from http://pax.grsecurity.net).
+
+         Note that if you enable the legacy EI_PAX marking support as well,
+         the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
+         If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+         must make sure that the marks are the same if a binary has both marks.
+
+         If you enable none of the marking options then all applications
+         will run with PaX enabled on them by default.
+
+config PAX_XATTR_PAX_FLAGS
+       bool 'Use filesystem extended attributes marking'
+       default y if GRKERNSEC_CONFIG_AUTO
+       select CIFS_XATTR if CIFS
+       select F2FS_FS_XATTR if F2FS_FS
+       select EXT2_FS_XATTR if EXT2_FS
+       select EXT3_FS_XATTR if EXT3_FS
+       select JFFS2_FS_XATTR if JFFS2_FS
+       select REISERFS_FS_XATTR if REISERFS_FS
+       select SQUASHFS_XATTR if SQUASHFS
+       select TMPFS_XATTR if TMPFS
+       help
+         Enabling this option will allow you to control PaX features on
+         a per executable basis via the 'setfattr' utility.  The control
+         flags will be read from the user.pax.flags extended attribute of
+         the file.  This marking has the benefit of supporting binary-only
+         applications that self-check themselves (e.g., skype) and would
+         not tolerate chpax/paxctl changes.  The main drawback is that
+         extended attributes are not supported by some filesystems (e.g.,
+         isofs, udf, vfat) so copying files through such filesystems will
+         lose the extended attributes and these PaX markings.
+
+         Note that if you enable the legacy EI_PAX marking support as well,
+         the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
+
+         If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+         must make sure that the marks are the same if a binary has both marks.
+
+         If you enable none of the marking options then all applications
+         will run with PaX enabled on them by default.
+
+choice
+       prompt 'MAC system integration'
+       default PAX_HAVE_ACL_FLAGS
+       help
+         Mandatory Access Control systems have the option of controlling
+         PaX flags on a per executable basis, choose the method supported
+         by your particular system.
+
+         - "none": if your MAC system does not interact with PaX,
+         - "direct": if your MAC system defines pax_set_initial_flags() itself,
+         - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
+
+         NOTE: this option is for developers/integrators only.
+
+       config PAX_NO_ACL_FLAGS
+               bool 'none'
+
+       config PAX_HAVE_ACL_FLAGS
+               bool 'direct'
+
+       config PAX_HOOK_ACL_FLAGS
+               bool 'hook'
+endchoice
+
+endmenu
+
+menu "Non-executable pages"
+       depends on PAX
+
+config PAX_NOEXEC
+       bool "Enforce non-executable pages"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
+       help
+         By design some architectures do not allow for protecting memory
+         pages against execution or even if they do, Linux does not make
+         use of this feature.  In practice this means that if a page is
+         readable (such as the stack or heap) it is also executable.
+
+         There is a well known exploit technique that makes use of this
+         fact and a common programming mistake where an attacker can
+         introduce code of his choice somewhere in the attacked program's
+         memory (typically the stack or the heap) and then execute it.
+
+         If the attacked program was running with different (typically
+         higher) privileges than that of the attacker, then he can elevate
+         his own privilege level (e.g. get a root shell, write to files for
+         which he does not have write access to, etc).
+
+         Enabling this option will let you choose from various features
+         that prevent the injection and execution of 'foreign' code in
+         a program.
+
+         This will also break programs that rely on the old behaviour and
+         expect that dynamically allocated memory via the malloc() family
+         of functions is executable (which it is not).  Notable examples
+         are the XFree86 4.x server, the java runtime and wine.
+
+config PAX_PAGEEXEC
+       bool "Paging based non-executable pages"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
+       select ARCH_TRACK_EXEC_LIMIT if X86_32
+       help
+         This implementation is based on the paging feature of the CPU.
+         On i386 without hardware non-executable bit support there is a
+         variable but usually low performance impact, however on Intel's
+         P4 core based CPUs it is very high so you should not enable this
+         for kernels meant to be used on such CPUs.
+
+         On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
+         with hardware non-executable bit support there is no performance
+         impact, on ppc the impact is negligible.
+
+         Note that several architectures require various emulations due to
+         badly designed userland ABIs, this will cause a performance impact
+         but will disappear as soon as userland is fixed. For example, ppc
+         userland MUST have been built with secure-plt by a recent toolchain.
+
+config PAX_SEGMEXEC
+       bool "Segmentation based non-executable pages"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on PAX_NOEXEC && X86_32
+       help
+         This implementation is based on the segmentation feature of the
+         CPU and has a very small performance impact, however applications
+         will be limited to a 1.5 GB address space instead of the normal
+         3 GB.
+
+config PAX_EMUTRAMP
+       bool "Emulate trampolines"
+       default y if PARISC || GRKERNSEC_CONFIG_AUTO
+       depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
+       help
+         There are some programs and libraries that for one reason or
+         another attempt to execute special small code snippets from
+         non-executable memory pages.  Most notable examples are the
+         signal handler return code generated by the kernel itself and
+         the GCC trampolines.
+
+         If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
+         such programs will no longer work under your kernel.
+
+         As a remedy you can say Y here and use the 'chpax' or 'paxctl'
+         utilities to enable trampoline emulation for the affected programs
+         yet still have the protection provided by the non-executable pages.
+
+         On parisc you MUST enable this option and EMUSIGRT as well, otherwise
+         your system will not even boot.
+
+         Alternatively you can say N here and use the 'chpax' or 'paxctl'
+         utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
+         for the affected files.
+
+         NOTE: enabling this feature *may* open up a loophole in the
+         protection provided by non-executable pages that an attacker
+         could abuse.  Therefore the best solution is to not have any
+         files on your system that would require this option.  This can
+         be achieved by not using libc5 (which relies on the kernel
+         signal handler return code) and not using or rewriting programs
+         that make use of the nested function implementation of GCC.
+         Skilled users can just fix GCC itself so that it implements
+         nested function calls in a way that does not interfere with PaX.
+
+config PAX_EMUSIGRT
+       bool "Automatically emulate sigreturn trampolines"
+       depends on PAX_EMUTRAMP && PARISC
+       default y
+       help
+         Enabling this option will have the kernel automatically detect
+         and emulate signal return trampolines executing on the stack
+         that would otherwise lead to task termination.
+
+         This solution is intended as a temporary one for users with
+         legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
+         Modula-3 runtime, etc) or executables linked to such, basically
+         everything that does not specify its own SA_RESTORER function in
+         normal executable memory like glibc 2.1+ does.
+
+         On parisc you MUST enable this option, otherwise your system will
+         not even boot.
+
+         NOTE: this feature cannot be disabled on a per executable basis
+         and since it *does* open up a loophole in the protection provided
+         by non-executable pages, the best solution is to not have any
+         files on your system that would require this option.
+
+config PAX_MPROTECT
+       bool "Restrict mprotect()"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
+       help
+         Enabling this option will prevent programs from
+          - changing the executable status of memory pages that were
+            not originally created as executable,
+          - making read-only executable pages writable again,
+          - creating executable pages from anonymous memory,
+          - making read-only-after-relocations (RELRO) data pages writable again.
+
+         You should say Y here to complete the protection provided by
+         the enforcement of non-executable pages.
+
+         NOTE: you can use the 'chpax' or 'paxctl' utilities to control
+         this feature on a per file basis.
+
+config PAX_MPROTECT_COMPAT
+       bool "Use legacy/compat protection demoting (read help)"
+       default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
+       depends on PAX_MPROTECT
+       help
+         The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
+         by sending the proper error code to the application.  For some broken 
+         userland, this can cause problems with Python or other applications.  The
+         current implementation however allows for applications like clamav to
+         detect if JIT compilation/execution is allowed and to fall back gracefully
+         to an interpreter-based mode if it does not.  While we encourage everyone
+         to use the current implementation as-is and push upstream to fix broken
+         userland (note that the RWX logging option can assist with this), in some
+         environments this may not be possible.  Having to disable MPROTECT
+         completely on certain binaries reduces the security benefit of PaX,
+         so this option is provided for those environments to revert to the old
+         behavior.
+         
+config PAX_ELFRELOCS
+       bool "Allow ELF text relocations (read help)"
+       depends on PAX_MPROTECT
+       default n
+       help
+         Non-executable pages and mprotect() restrictions are effective
+         in preventing the introduction of new executable code into an
+         attacked task's address space.  There remain only two venues
+         for this kind of attack: if the attacker can execute already
+         existing code in the attacked task then he can either have it
+         create and mmap() a file containing his code or have it mmap()
+         an already existing ELF library that does not have position
+         independent code in it and use mprotect() on it to make it
+         writable and copy his code there.  While protecting against
+         the former approach is beyond PaX, the latter can be prevented
+         by having only PIC ELF libraries on one's system (which do not
+         need to relocate their code).  If you are sure this is your case,
+         as is the case with all modern Linux distributions, then leave
+         this option disabled.  You should say 'n' here.
+
+config PAX_ETEXECRELOCS
+       bool "Allow ELF ET_EXEC text relocations"
+       depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
+       select PAX_ELFRELOCS
+       default y
+       help
+         On some architectures there are incorrectly created applications
+         that require text relocations and would not work without enabling
+         this option.  If you are an alpha, ia64 or parisc user, you should
+         enable this option and disable it once you have made sure that
+         none of your applications need it.
+
+config PAX_EMUPLT
+       bool "Automatically emulate ELF PLT"
+       depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
+       default y
+       help
+         Enabling this option will have the kernel automatically detect
+         and emulate the Procedure Linkage Table entries in ELF files.
+         On some architectures such entries are in writable memory, and
+         become non-executable leading to task termination.  Therefore
+         it is mandatory that you enable this option on alpha, parisc,
+         sparc and sparc64, otherwise your system would not even boot.
+
+         NOTE: this feature *does* open up a loophole in the protection
+         provided by the non-executable pages, therefore the proper
+         solution is to modify the toolchain to produce a PLT that does
+         not need to be writable.
+
+config PAX_DLRESOLVE
+       bool 'Emulate old glibc resolver stub'
+       depends on PAX_EMUPLT && SPARC
+       default n
+       help
+         This option is needed if userland has an old glibc (before 2.4)
+         that puts a 'save' instruction into the runtime generated resolver
+         stub that needs special emulation.
+
+config PAX_KERNEXEC
+       bool "Enforce non-executable kernel pages"
+       default y if GRKERNSEC_CONFIG_AUTO && (!X86 || GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
+       depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
+       select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
+       select PAX_KERNEXEC_PLUGIN if X86_64
+       select ARM_KERNMEM_PERMS if ARM
+       help
+         This is the kernel land equivalent of PAGEEXEC and MPROTECT,
+         that is, enabling this option will make it harder to inject
+         and execute 'foreign' code in kernel memory itself.
+
+         Note that on amd64, CONFIG_EFI enabled with "efi=old_map" on
+         the kernel command-line will result in an RWX physical map.
+
+         Likewise, the EFI runtime services are necessarily mapped as
+         RWX.  If CONFIG_EFI is enabled on an EFI-capable system, it
+         is recommended that you boot with "noefi" on the kernel
+         command-line if possible to eliminate the mapping.
+
+choice
+       prompt "Return Address Instrumentation Method"
+       default PAX_KERNEXEC_PLUGIN_METHOD_BTS
+       depends on PAX_KERNEXEC_PLUGIN
+       help
+         Select the method used to instrument function pointer dereferences.
+         Note that binary modules cannot be instrumented by this approach.
+
+         Note that the implementation requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.
+
+       config PAX_KERNEXEC_PLUGIN_METHOD_BTS
+               bool "bts"
+               help
+                 This method is compatible with binary only modules but has
+                 a higher runtime overhead.
+
+       config PAX_KERNEXEC_PLUGIN_METHOD_OR
+               bool "or"
+               depends on !PARAVIRT
+               help
+                 This method is incompatible with binary only modules but has
+                 a lower runtime overhead.
+endchoice
+
+config PAX_KERNEXEC_PLUGIN_METHOD
+       string
+       default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
+       default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
+       default ""
+
+config PAX_KERNEXEC_MODULE_TEXT
+       int "Minimum amount of memory reserved for module code"
+       default "8" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
+       default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
+       depends on PAX_KERNEXEC && X86_32
+       help
+         Due to implementation details the kernel must reserve a fixed
+         amount of memory for runtime allocated code (such as modules)
+         at compile time that cannot be changed at runtime.  Here you
+         can specify the minimum amount in MB that will be reserved.
+         Due to the same implementation details this size will always
+         be rounded up to the next 2/4 MB boundary (depends on PAE) so
+         the actually available memory for runtime allocated code will
+         usually be more than this minimum.
+
+         The default 4 MB should be enough for most users but if you have
+         an excessive number of modules (e.g., most distribution configs
+         compile many drivers as modules) or use huge modules such as
+         nvidia's kernel driver, you will need to adjust this amount.
+         A good rule of thumb is to look at your currently loaded kernel
+         modules and add up their sizes.
+
+endmenu
+
+menu "Address Space Layout Randomization"
+       depends on PAX
+
+config PAX_ASLR
+       bool "Address Space Layout Randomization"
+       default y if GRKERNSEC_CONFIG_AUTO
+       help
+         Many if not most exploit techniques rely on the knowledge of
+         certain addresses in the attacked program.  The following options
+         will allow the kernel to apply a certain amount of randomization
+         to specific parts of the program thereby forcing an attacker to
+         guess them in most cases.  Any failed guess will most likely crash
+         the attacked program which allows the kernel to detect such attempts
+         and react on them.  PaX itself provides no reaction mechanisms,
+         instead it is strongly encouraged that you make use of grsecurity's
+         (http://www.grsecurity.net/) built-in crash detection features or
+         develop one yourself.
+
+         By saying Y here you can choose to randomize the following areas:
+          - top of the task's kernel stack
+          - top of the task's userland stack
+          - base address for mmap() requests that do not specify one
+            (this includes all libraries)
+          - base address of the main executable
+
+         It is strongly recommended to say Y here as address space layout
+         randomization has negligible impact on performance yet it provides
+         a very effective protection.
+
+         NOTE: you can use the 'chpax' or 'paxctl' utilities to control
+         this feature on a per file basis.
+
+config PAX_RANDKSTACK
+       bool "Randomize kernel stack base"
+       default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
+       depends on X86_TSC && X86
+       help
+         By saying Y here the kernel will randomize every task's kernel
+         stack on every system call.  This will not only force an attacker
+         to guess it but also prevent him from making use of possible
+         leaked information about it.
+
+         Since the kernel stack is a rather scarce resource, randomization
+         may cause unexpected stack overflows, therefore you should very
+         carefully test your system.  Note that once enabled in the kernel
+         configuration, this feature cannot be disabled on a per file basis.
+
+config PAX_RANDUSTACK
+       bool
+
+config PAX_RANDMMAP
+       bool "Randomize user stack and mmap() bases"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on PAX_ASLR
+       select PAX_RANDUSTACK
+       help
+         By saying Y here the kernel will randomize every task's userland
+         stack and use a randomized base address for mmap() requests that
+         do not specify one themselves.
+
+         The stack randomization is done in two steps where the second
+         one may apply a big amount of shift to the top of the stack and
+         cause problems for programs that want to use lots of memory (more
+         than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
+
+         As a result of mmap randomization all dynamically loaded libraries
+         will appear at random addresses and therefore be harder to exploit
+         by a technique where an attacker attempts to execute library code
+         for his purposes (e.g. spawn a shell from an exploited program that
+         is running at an elevated privilege level).
+
+         Furthermore, if a program is relinked as a dynamic ELF file, its
+         base address will be randomized as well, completing the full
+         randomization of the address space layout.  Attacking such programs
+         becomes a guess game.  You can find an example of doing this at
+         http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
+         http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
+
+         NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
+         feature on a per file basis.
+
+endmenu
+
+menu "Miscellaneous hardening features"
+
+config PAX_MEMORY_SANITIZE
+       bool "Sanitize all freed memory"
+       default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+       help
+         By saying Y here the kernel will erase memory pages and slab objects
+         as soon as they are freed.  This in turn reduces the lifetime of data
+         stored in them, making it less likely that sensitive information such
+         as passwords, cryptographic secrets, etc stay in memory for too long.
+
+         This is especially useful for programs whose runtime is short, long
+         lived processes and the kernel itself benefit from this as long as
+         they ensure timely freeing of memory that may hold sensitive
+         information.
+
+         A nice side effect of the sanitization of slab objects is the
+         reduction of possible info leaks caused by padding bytes within the
+         leaky structures.  Use-after-free bugs for structures containing
+         pointers can also be detected as dereferencing the sanitized pointer
+         will generate an access violation.
+
+         The tradeoff is performance impact, on a single CPU system kernel
+         compilation sees a 3% slowdown, other systems and workloads may vary
+         and you are advised to test this feature on your expected workload
+         before deploying it.
+
+         The slab sanitization feature excludes a few slab caches per default
+         for performance reasons.  To extend the feature to cover those as
+         well, pass "pax_sanitize_slab=full" as kernel command line parameter.
+
+         To reduce the performance penalty by sanitizing pages only, albeit
+         limiting the effectiveness of this feature at the same time, slab
+         sanitization can be disabled with the kernel command line parameter
+         "pax_sanitize_slab=off".
+
+         Note that this feature does not protect data stored in live pages,
+         e.g., process memory swapped to disk may stay there for a long time.
+
+config PAX_MEMORY_STACKLEAK
+       bool "Sanitize kernel stack"
+       default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+       depends on X86
+       help
+         By saying Y here the kernel will erase the kernel stack before it
+         returns from a system call.  This in turn reduces the information
+         that a kernel stack leak bug can reveal.
+
+         Note that such a bug can still leak information that was put on
+         the stack by the current system call (the one eventually triggering
+         the bug) but traces of earlier system calls on the kernel stack
+         cannot leak anymore.
+
+         The tradeoff is performance impact: on a single CPU system kernel
+         compilation sees a 1% slowdown, other systems and workloads may vary
+         and you are advised to test this feature on your expected workload
+         before deploying it.
+
+         Note that the full feature requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.  Using
+         older gcc versions means that functions with large enough stack
+         frames may leave uninitialized memory behind that may be exposed
+         to a later syscall leaking the stack.
+
+config PAX_MEMORY_STRUCTLEAK
+       bool "Forcibly initialize local variables copied to userland"
+       default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+       help
+         By saying Y here the kernel will zero initialize some local
+         variables that are going to be copied to userland.  This in
+         turn prevents unintended information leakage from the kernel
+         stack should later code forget to explicitly set all parts of
+         the copied variable.
+
+         The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
+         at a much smaller coverage.
+
+         Note that the implementation requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.
+
+config PAX_MEMORY_UDEREF
+       bool "Prevent invalid userland pointer dereference"
+       default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (!X86 || GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
+       depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
+       select PAX_PER_CPU_PGD if X86_64
+       help
+         By saying Y here the kernel will be prevented from dereferencing
+         userland pointers in contexts where the kernel expects only kernel
+         pointers.  This is both a useful runtime debugging feature and a
+         security measure that prevents exploiting a class of kernel bugs.
+
+         The tradeoff is that some virtualization solutions may experience
+         a huge slowdown and therefore you should not enable this feature
+         for kernels meant to run in such environments.  Whether a given VM
+         solution is affected or not is best determined by simply trying it
+         out, the performance impact will be obvious right on boot as this
+         mechanism engages from very early on.  A good rule of thumb is that
+         VMs running on CPUs without hardware virtualization support (i.e.,
+         the majority of IA-32 CPUs) will likely experience the slowdown.
+
+         On X86_64 the kernel will make use of PCID support when available
+         (Intel's Westmere, Sandy Bridge, etc) for better security (default)
+         or performance impact.  Pass pax_weakuderef on the kernel command
+         line to choose the latter.
+
+config PAX_REFCOUNT
+       bool "Prevent various kernel object reference counter overflows"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || PPC || SPARC64 || X86)
+       help
+         By saying Y here the kernel will detect and prevent overflowing
+         various (but not all) kinds of object reference counters.  Such
+         overflows can normally occur due to bugs only and are often, if
+         not always, exploitable.
+
+         The tradeoff is that data structures protected by an overflowed
+         refcount will never be freed and therefore will leak memory.  Note
+         that this leak also happens even without this protection but in
+         that case the overflow can eventually trigger the freeing of the
+         data structure while it is still being used elsewhere, resulting
+         in the exploitable situation that this feature prevents.
+
+         Since this has a negligible performance impact, you should enable
+         this feature.
+
+config PAX_CONSTIFY_PLUGIN
+       bool "Automatically constify eligible structures"
+       default y
+       depends on !UML && PAX_KERNEXEC
+       help
+         By saying Y here the compiler will automatically constify a class
+         of types that contain only function pointers.  This reduces the
+         kernel's attack surface and also produces a better memory layout.
+
+         Note that the implementation requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.
+         Note that if some code really has to modify constified variables
+         then the source code will have to be patched to allow it.  Examples
+         can be found in PaX itself (the no_const attribute) and for some
+         out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
+
+config PAX_USERCOPY
+       bool "Harden heap object copies between kernel and userland"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on ARM || IA64 || PPC || SPARC || X86
+       depends on GRKERNSEC && (SLAB || SLUB || SLOB)
+       select PAX_USERCOPY_SLABS
+       help
+         By saying Y here the kernel will enforce the size of heap objects
+         when they are copied in either direction between the kernel and
+         userland, even if only a part of the heap object is copied.
+
+         Specifically, this checking prevents information leaking from the
+         kernel heap during kernel to userland copies (if the kernel heap
+         object is otherwise fully initialized) and prevents kernel heap
+         overflows during userland to kernel copies.
+
+         Note that the current implementation provides the strictest bounds
+         checks for the SLUB allocator.
+
+         Enabling this option also enables per-slab cache protection against
+         data in a given cache being copied into/out of via userland
+         accessors.  Though the whitelist of regions will be reduced over
+         time, it notably protects important data structures like task structs.
+
+         If frame pointers are enabled on x86, this option will also restrict
+         copies into and out of the kernel stack to local variables within a
+         single frame.
+
+         Since this has a negligible performance impact, you should enable
+         this feature.
+
+config PAX_USERCOPY_DEBUG
+       bool
+       depends on X86 && PAX_USERCOPY
+       default n
+
+config PAX_SIZE_OVERFLOW
+       bool "Prevent various integer overflows in function size parameters"
+       default y if GRKERNSEC_CONFIG_AUTO
+       depends on X86
+       help
+         By saying Y here the kernel recomputes expressions of function
+         arguments marked by a size_overflow attribute with double integer
+         precision (DImode/TImode for 32/64 bit integer types).
+
+         The recomputed argument is checked against TYPE_MAX and an event
+         is logged on overflow and the triggering process is killed.
+
+         Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
+
+         Note that the implementation requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.
+
+config PAX_LATENT_ENTROPY
+       bool "Generate some entropy during boot and runtime"
+       default y if GRKERNSEC_CONFIG_AUTO
+       help
+         By saying Y here the kernel will instrument some kernel code to
+         extract some entropy from both original and artificially created
+         program state.  This will help especially embedded systems where
+         there is little 'natural' source of entropy normally.  The cost
+         is some slowdown of the boot process and fork and irq processing.
+
+         When pax_extra_latent_entropy is passed on the kernel command line,
+         entropy will be extracted from up to the first 4GB of RAM while the
+         runtime memory allocator is being initialized.  This costs even more
+         slowdown of the boot process.
+
+         Note that the implementation requires a gcc with plugin support,
+         i.e., gcc 4.5 or newer.  You may need to install the supporting
+         headers explicitly in addition to the normal gcc package.
+
+         Note that entropy extracted this way is not cryptographically
+         secure!
+
+endmenu
+
+endmenu
+
+source grsecurity/Kconfig
+
+endmenu
+
+endmenu
+
 source security/keys/Kconfig
 
 config SECURITY_DMESG_RESTRICT
@@ -103,7 +1072,7 @@ config INTEL_TXT
 config LSM_MMAP_MIN_ADDR
        int "Low address space for LSM to protect from user allocation"
        depends on SECURITY && SECURITY_SELINUX
-       default 32768 if ARM || (ARM64 && COMPAT)
+       default 32768 if ALPHA || ARM || (ARM64 && COMPAT) || PARISC || SPARC32
        default 65536
        help
          This is the portion of low virtual memory which should be protected
index fdaa50cb1876e2ac5a1cae2d54d74794db5f2f65..2761dcbf61915babbd0c9d8c6866f5850399a0fd 100644 (file)
@@ -348,8 +348,8 @@ static inline bool xindex_is_subset(u32 link, u32 target)
 int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
                 struct path *new_dir, struct dentry *new_dentry)
 {
-       struct path link = { new_dir->mnt, new_dentry };
-       struct path target = { new_dir->mnt, old_dentry };
+       struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+       struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
        struct path_cond cond = {
                old_dentry->d_inode->i_uid,
                old_dentry->d_inode->i_mode
index 65ca451a764db1a38db4c8a68be1acd015f14d4f..ad6f22d476f9470e550048ab77e3b517b01e0696 100644 (file)
@@ -186,7 +186,7 @@ static int common_perm_dir_dentry(int op, struct path *dir,
                                  struct dentry *dentry, u32 mask,
                                  struct path_cond *cond)
 {
-       struct path path = { dir->mnt, dentry };
+       struct path path = { .mnt = dir->mnt, .dentry = dentry };
 
        return common_perm(op, &path, mask, cond);
 }
@@ -203,7 +203,7 @@ static int common_perm_dir_dentry(int op, struct path *dir,
 static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
                                  struct dentry *dentry, u32 mask)
 {
-       struct path path = { mnt, dentry };
+       struct path path = { .mnt = mnt, .dentry = dentry };
        struct path_cond cond = { dentry->d_inode->i_uid,
                                  dentry->d_inode->i_mode
        };
@@ -325,8 +325,8 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
 
        profile = aa_current_profile();
        if (!unconfined(profile)) {
-               struct path old_path = { old_dir->mnt, old_dentry };
-               struct path new_path = { new_dir->mnt, new_dentry };
+               struct path old_path = { .mnt = old_dir->mnt, .dentry = old_dentry };
+               struct path new_path = { .mnt = new_dir->mnt, .dentry = new_dentry };
                struct path_cond cond = { old_dentry->d_inode->i_uid,
                                          old_dentry->d_inode->i_mode
                };
@@ -615,7 +615,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        return error;
 }
 
-static struct security_operations apparmor_ops = {
+static struct security_operations apparmor_ops __read_only = {
        .name =                         "apparmor",
 
        .ptrace_access_check =          apparmor_ptrace_access_check,
index 2915d8503054c0e41e5bc4de2103f0dad4026371..970753f214fa9fba5c081fbce36999ca7d615229 100644 (file)
@@ -427,6 +427,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
        return 0;
 }
 
+/* returns:
+       1 for suid privilege
+       2 for sgid privilege
+       3 for fscap privilege
+*/
+int is_privileged_binary(const struct dentry *dentry)
+{
+       struct cpu_vfs_cap_data capdata;
+       struct inode *inode = dentry->d_inode;
+
+       if (!inode || S_ISDIR(inode->i_mode))
+               return 0;
+
+       if (inode->i_mode & S_ISUID)
+               return 1;
+       if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
+               return 2;
+
+       if (!get_vfs_caps_from_disk(dentry, &capdata)) {
+               if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
+                       return 3;
+       }
+
+       return 0;
+}
+
 /*
  * Attempt to get the on-exec apply capability sets for an executable file from
  * its xattrs and, if present, apply them to the proposed credentials being
@@ -595,6 +621,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
        const struct cred *cred = current_cred();
        kuid_t root_uid = make_kuid(cred->user_ns, 0);
 
+       if (gr_acl_enable_at_secure())
+               return 1;
+
        if (!uid_eq(cred->uid, root_uid)) {
                if (bprm->cap_effective)
                        return 1;
index 8ee997dff13937521157d02c6aac27b25903b46f..24c174b5fa40d21c0a5f3dd97b9223b16481c83a 100644 (file)
@@ -116,8 +116,8 @@ int ima_init_template(void);
 extern spinlock_t ima_queue_lock;
 
 struct ima_h_table {
-       atomic_long_t len;      /* number of stored measurements in the list */
-       atomic_long_t violations;
+       atomic_long_unchecked_t len;    /* number of stored measurements in the list */
+       atomic_long_unchecked_t violations;
        struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
 };
 extern struct ima_h_table ima_htable;
index b8a27c5052d40811bcc6741a5b3e264cf2fed529..50a769fcdcf48364699811a9e430d27d3a1eb5e4 100644 (file)
@@ -137,7 +137,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
        int result;
 
        /* can overflow, only indicator */
-       atomic_long_inc(&ima_htable.violations);
+       atomic_long_inc_unchecked(&ima_htable.violations);
 
        result = ima_alloc_init_template(NULL, file, filename,
                                         NULL, 0, &entry);
index 461215e5fd31d11be9f68c97fac1de09e057b368..9bb12eeb4c12baa5718f3d13de95b1f3785a2039 100644 (file)
 static int valid_policy = 1;
 #define TMPBUFLEN 12
 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
-                                    loff_t *ppos, atomic_long_t *val)
+                                    loff_t *ppos, atomic_long_unchecked_t *val)
 {
        char tmpbuf[TMPBUFLEN];
        ssize_t len;
 
-       len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+       len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
        return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
 }
 
index 552705d5a78d0bbd518cb416e146846cf23df7e4..9920f4fbd160c5796911b1cf29c004db3ba28d15 100644 (file)
@@ -83,7 +83,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
        INIT_LIST_HEAD(&qe->later);
        list_add_tail_rcu(&qe->later, &ima_measurements);
 
-       atomic_long_inc(&ima_htable.len);
+       atomic_long_inc_unchecked(&ima_htable.len);
        key = ima_hash_key(entry->digest);
        hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
        return 0;
index 347896548ad3159a152186a4c1a27cdf92f1f4ad..ec7bb9ed137c2efb39685220a30d09ca91974804 100644 (file)
@@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
        if (ret == 0)
                goto no_payload_free;
 
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+       ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
 err:
        if (iov != iovstack)
                kfree(iov);
index 200e37867336a3c2903437e97f591fbc302e15b7..cdc74b5192f0d52b84c7e636519a062373d79e7c 100644 (file)
@@ -244,7 +244,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
 extern long keyctl_invalidate_key(key_serial_t);
 
 extern long keyctl_instantiate_key_common(key_serial_t,
-                                         const struct iovec *,
+                                         const struct iovec __user *,
                                          unsigned, size_t, key_serial_t);
 #ifdef CONFIG_PERSISTENT_KEYRINGS
 extern long keyctl_get_persistent(uid_t, key_serial_t);
index aee2ec5a18fcafc02e5e9088b9b55b9276bbcdd9..c27607130d8c103bddcae60b5a81ef9d05a6c57f 100644 (file)
@@ -283,7 +283,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
 
        atomic_set(&key->usage, 1);
        init_rwsem(&key->sem);
-       lockdep_set_class(&key->sem, &type->lock_class);
+       lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
        key->index_key.type = type;
        key->user = user;
        key->quotalen = quotalen;
@@ -1073,7 +1073,9 @@ int register_key_type(struct key_type *ktype)
        struct key_type *p;
        int ret;
 
-       memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+       pax_open_kernel();
+       memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
+       pax_close_kernel();
 
        ret = -EEXIST;
        down_write(&key_types_sem);
@@ -1085,7 +1087,7 @@ int register_key_type(struct key_type *ktype)
        }
 
        /* store the type */
-       list_add(&ktype->link, &key_types_list);
+       pax_list_add((struct list_head *)&ktype->link, &key_types_list);
 
        pr_notice("Key type %s registered\n", ktype->name);
        ret = 0;
@@ -1107,7 +1109,7 @@ EXPORT_SYMBOL(register_key_type);
 void unregister_key_type(struct key_type *ktype)
 {
        down_write(&key_types_sem);
-       list_del_init(&ktype->link);
+       pax_list_del_init((struct list_head *)&ktype->link);
        downgrade_write(&key_types_sem);
        key_gc_keytype(ktype);
        pr_notice("Key type %s unregistered\n", ktype->name);
@@ -1125,10 +1127,10 @@ void __init key_init(void)
                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        /* add the special key types */
-       list_add_tail(&key_type_keyring.link, &key_types_list);
-       list_add_tail(&key_type_dead.link, &key_types_list);
-       list_add_tail(&key_type_user.link, &key_types_list);
-       list_add_tail(&key_type_logon.link, &key_types_list);
+       pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
+       pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
+       pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
+       pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
 
        /* record the root user tracking */
        rb_link_node(&root_key_user.node,
index 4743d71e4aa6dd12f2456a5f00496c1222775c6a..170a185ba42427269292e296f926b30c564aa0a4 100644 (file)
@@ -1000,7 +1000,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
 /*
  * Copy the iovec data from userspace
  */
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
+static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
                                 unsigned ioc)
 {
        for (; ioc > 0; ioc--) {
@@ -1022,7 +1022,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
  * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key_common(key_serial_t id,
-                                  const struct iovec *payload_iov,
+                                  const struct iovec __user *payload_iov,
                                   unsigned ioc,
                                   size_t plen,
                                   key_serial_t ringid)
@@ -1117,7 +1117,7 @@ long keyctl_instantiate_key(key_serial_t id,
                        [0].iov_len  = plen
                };
 
-               return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+               return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
        }
 
        return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
@@ -1150,7 +1150,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
        if (ret == 0)
                goto no_payload_free;
 
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+       ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
 err:
        if (iov != iovstack)
                kfree(iov);
index 0c7aea4dea54d8d299edc09b38c9a8f7b5c82be8..486ef6fa393b2cc9d8ceb8cb11f187f97730bd90 100644 (file)
@@ -414,6 +414,7 @@ link_check_failed:
 
 link_prealloc_failed:
        mutex_unlock(&user->cons_lock);
+       key_put(key);
        kleave(" = %d [prelink]", ret);
        return ret;
 
index f728728f193bdc0b1a3d578c0f1fa69b016dfabb..6457a0cfa0b075c93ba7c11211e73c4b1533a7d0 100644 (file)
@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
  */
 static void update_mmap_min_addr(void)
 {
+#ifndef SPARC
 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
        if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
                mmap_min_addr = dac_mmap_min_addr;
@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
 #else
        mmap_min_addr = dac_mmap_min_addr;
 #endif
+#endif
 }
 
 /*
index 18b35c63fc0c80cb3f0ce9dfc734f42c33efe98b..c9fbc92a46ec4af72a253bb63937ada2092ea7f6 100644 (file)
@@ -33,8 +33,8 @@
 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
        CONFIG_DEFAULT_SECURITY;
 
-static struct security_operations *security_ops;
-static struct security_operations default_security_ops = {
+struct security_operations *security_ops __read_only;
+struct security_operations default_security_ops __read_only = {
        .name   = "default",
 };
 
@@ -73,11 +73,6 @@ int __init security_init(void)
        return 0;
 }
 
-void reset_security_ops(void)
-{
-       security_ops = &default_security_ops;
-}
-
 /* Save user chosen LSM */
 static int __init choose_lsm(char *str)
 {
index a18f1fa6440bb707e6b693c383c09c15b5dcaa3f..c9b9fc43ffd6cbd44a80e9afa1c9e0ba57628fe3 100644 (file)
@@ -59,7 +59,7 @@ struct avc_node {
 struct avc_cache {
        struct hlist_head       slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
        spinlock_t              slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
-       atomic_t                lru_hint;       /* LRU hint for reclaim scan */
+       atomic_unchecked_t      lru_hint;       /* LRU hint for reclaim scan */
        atomic_t                active_nodes;
        u32                     latest_notif;   /* latest revocation notification */
 };
@@ -167,7 +167,7 @@ void __init avc_init(void)
                spin_lock_init(&avc_cache.slots_lock[i]);
        }
        atomic_set(&avc_cache.active_nodes, 0);
-       atomic_set(&avc_cache.lru_hint, 0);
+       atomic_set_unchecked(&avc_cache.lru_hint, 0);
 
        avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
                                             0, SLAB_PANIC, NULL);
@@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void)
        spinlock_t *lock;
 
        for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
-               hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+               hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
                head = &avc_cache.slots[hvalue];
                lock = &avc_cache.slots_lock[hvalue];
 
index 6da7532893a1973e660f5eabf525d5769c655fef..f0b1882523fe4391ca86294119ec3fdfcf368d38 100644 (file)
@@ -5807,7 +5807,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
 
 #endif
 
-static struct security_operations selinux_ops = {
+static struct security_operations selinux_ops __read_only = {
+       .name =                         "selinux",
        .name =                         "selinux",
 
        .ptrace_access_check =          selinux_ptrace_access_check,
@@ -6148,6 +6149,9 @@ static void selinux_nf_ip_exit(void)
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 static int selinux_disabled;
 
+extern struct security_operations *security_ops;
+extern struct security_operations default_security_ops;
+
 int selinux_disable(void)
 {
        if (ss_initialized) {
@@ -6165,7 +6169,9 @@ int selinux_disable(void)
        selinux_disabled = 1;
        selinux_enabled = 0;
 
-       reset_security_ops();
+       pax_open_kernel();
+       security_ops = &default_security_ops;
+       pax_close_kernel();
 
        /* Try to destroy the avc node cache */
        avc_disable();
index 1450f85b946da462e5ef3576a50d979d15574b8f..a91e0bc756d526be7133226849b10d0a09a79591 100644 (file)
@@ -48,7 +48,7 @@ static inline void selinux_xfrm_notify_policyload(void)
 
        rtnl_lock();
        for_each_net(net) {
-               atomic_inc(&net->xfrm.flow_cache_genid);
+               atomic_inc_unchecked(&net->xfrm.flow_cache_genid);
                rt_genid_bump_all(net);
        }
        rtnl_unlock();
index a7178773dde77d04b6d352a4f69a9f202afe2388..0747450d659a1c677f3d841ffcaa097e0e7e720e 100644 (file)
@@ -4082,7 +4082,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
        return 0;
 }
 
-struct security_operations smack_ops = {
+struct security_operations smack_ops __read_only = {
        .name =                         "smack",
 
        .ptrace_access_check =          smack_ptrace_access_check,
index 400390790745212764bd99c9178dceea031382ef..13a2b559e7ef5b2b9603981b12054d44134f7ba5 100644 (file)
@@ -692,7 +692,7 @@ int tomoyo_path_number_perm(const u8 type, struct path *path,
 {
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
-               .path1 = *path,
+               .path1 = { .mnt = path->mnt, .dentry = path->dentry },
        };
        int error = -ENOMEM;
        struct tomoyo_path_info buf;
@@ -740,7 +740,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
        struct tomoyo_path_info buf;
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
-               .path1 = *path,
+               .path1 = { .mnt = path->mnt, .dentry = path->dentry },
        };
        int idx;
 
@@ -786,7 +786,7 @@ int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
 {
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
-               .path1 = *path,
+               .path1 = { .mnt = path->mnt, .dentry = path->dentry },
        };
        int error;
        struct tomoyo_path_info buf;
@@ -843,7 +843,7 @@ int tomoyo_mkdev_perm(const u8 operation, struct path *path,
 {
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
-               .path1 = *path,
+               .path1 = { .mnt = path->mnt, .dentry = path->dentry },
        };
        int error = -ENOMEM;
        struct tomoyo_path_info buf;
@@ -890,8 +890,8 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
        struct tomoyo_path_info buf2;
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
-               .path1 = *path1,
-               .path2 = *path2,
+               .path1 = { .mnt = path1->mnt, .dentry = path1->dentry },
+               .path2 = { .mnt = path2->mnt, .dentry = path2->dentry }
        };
        int idx;
 
index 390c646013cb290df2fa61728b79fef56ab31019..f2f8db3f64ff9b22b65a1bfa267aa5e0b824c48c 100644 (file)
@@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
                   type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
                need_dev = -1; /* dev_name is a directory */
        } else {
+               if (!capable(CAP_SYS_ADMIN)) {
+                       error = -EPERM;
+                       goto out;
+               }
                fstype = get_fs_type(type);
                if (!fstype) {
                        error = -ENODEV;
index f0b756e27fed6b143f823d6a7708a408ea630bd8..8aa497b687884abc28e2605a80b48cb59d229706 100644 (file)
@@ -146,7 +146,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
  */
 static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-       struct path path = { mnt, dentry };
+       struct path path = { .mnt = mnt, .dentry = dentry };
        return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
 }
 
@@ -172,7 +172,7 @@ static int tomoyo_path_truncate(struct path *path)
  */
 static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
 {
-       struct path path = { parent->mnt, dentry };
+       struct path path = { .mnt = parent->mnt, .dentry = dentry };
        return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
 }
 
@@ -188,7 +188,7 @@ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
 static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
                             umode_t mode)
 {
-       struct path path = { parent->mnt, dentry };
+       struct path path = { .mnt = parent->mnt, .dentry = dentry };
        return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
                                       mode & S_IALLUGO);
 }
@@ -203,7 +203,7 @@ static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
  */
 static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
 {
-       struct path path = { parent->mnt, dentry };
+       struct path path = { .mnt = parent->mnt, .dentry = dentry };
        return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
 }
 
@@ -219,7 +219,7 @@ static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
 static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
                               const char *old_name)
 {
-       struct path path = { parent->mnt, dentry };
+       struct path path = { .mnt = parent->mnt, .dentry = dentry };
        return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
 }
 
@@ -236,7 +236,7 @@ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
 static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
                             umode_t mode, unsigned int dev)
 {
-       struct path path = { parent->mnt, dentry };
+       struct path path = { .mnt = parent->mnt, .dentry = dentry };
        int type = TOMOYO_TYPE_CREATE;
        const unsigned int perm = mode & S_IALLUGO;
 
@@ -275,8 +275,8 @@ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
 static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
                            struct dentry *new_dentry)
 {
-       struct path path1 = { new_dir->mnt, old_dentry };
-       struct path path2 = { new_dir->mnt, new_dentry };
+       struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
+       struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
        return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
 }
 
@@ -295,8 +295,8 @@ static int tomoyo_path_rename(struct path *old_parent,
                              struct path *new_parent,
                              struct dentry *new_dentry)
 {
-       struct path path1 = { old_parent->mnt, old_dentry };
-       struct path path2 = { new_parent->mnt, new_dentry };
+       struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
+       struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
        return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
 }
 
@@ -424,7 +424,7 @@ static int tomoyo_sb_mount(const char *dev_name, struct path *path,
  */
 static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
 {
-       struct path path = { mnt, mnt->mnt_root };
+       struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
        return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
 }
 
@@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
  * tomoyo_security_ops is a "struct security_operations" which is used for
  * registering TOMOYO.
  */
-static struct security_operations tomoyo_security_ops = {
+static struct security_operations tomoyo_security_ops __read_only = {
        .name                = "tomoyo",
        .cred_alloc_blank    = tomoyo_cred_alloc_blank,
        .cred_prepare        = tomoyo_cred_prepare,
index 20ef5143c0c06bbedbfaf4114bf08c4579d75011..4182bed55cab7f5c3964716d3a099c61af163d41 100644 (file)
@@ -1,6 +1,6 @@
 config SECURITY_YAMA
        bool "Yama support"
-       depends on SECURITY
+       depends on SECURITY && !GRKERNSEC
        select SECURITYFS
        select SECURITY_PATH
        default n
index 13c88fbcf0371cc32340791e335eeb0b4758f875..f8c115e4da716c654f98c9e6e07899d41964b256 100644 (file)
@@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent)
 }
 
 #ifndef CONFIG_SECURITY_YAMA_STACKED
-static struct security_operations yama_ops = {
+static struct security_operations yama_ops __read_only = {
        .name =                 "yama",
 
        .ptrace_access_check =  yama_ptrace_access_check,
@@ -376,28 +376,24 @@ static struct security_operations yama_ops = {
 #endif
 
 #ifdef CONFIG_SYSCTL
+static int zero __read_only;
+static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
+
 static int yama_dointvec_minmax(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int rc;
+       ctl_table_no_const yama_table;
 
        if (write && !capable(CAP_SYS_PTRACE))
                return -EPERM;
 
-       rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-       if (rc)
-               return rc;
-
+       yama_table = *table;
        /* Lock the max value if it ever gets set. */
-       if (write && *(int *)table->data == *(int *)table->extra2)
-               table->extra1 = table->extra2;
-
-       return rc;
+       if (ptrace_scope == max_scope)
+               yama_table.extra1 = &max_scope;
+       return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
 }
 
-static int zero;
-static int max_scope = YAMA_SCOPE_NO_ATTACH;
-
 struct ctl_path yama_sysctl_path[] = {
        { .procname = "kernel", },
        { .procname = "yama", },
index 23c371ecfb6b3841a11d4b375c6e53f0c3c83324..da7c25ee2fea84e95ca9ebe0a44ded84e2d6d802 100644 (file)
@@ -54,7 +54,7 @@ struct onyx {
                                spdif_locked:1,
                                analog_locked:1,
                                original_mute:2;
-       int                     open_count;
+       local_t                 open_count;
        struct codec_info       *codec_info;
 
        /* mutex serializes concurrent access to the device
@@ -747,7 +747,7 @@ static int onyx_open(struct codec_info_item *cii,
        struct onyx *onyx = cii->codec_data;
 
        mutex_lock(&onyx->mutex);
-       onyx->open_count++;
+       local_inc(&onyx->open_count);
        mutex_unlock(&onyx->mutex);
 
        return 0;
@@ -759,8 +759,7 @@ static int onyx_close(struct codec_info_item *cii,
        struct onyx *onyx = cii->codec_data;
 
        mutex_lock(&onyx->mutex);
-       onyx->open_count--;
-       if (!onyx->open_count)
+       if (local_dec_and_test(&onyx->open_count))
                onyx->spdif_locked = onyx->analog_locked = 0;
        mutex_unlock(&onyx->mutex);
 
index ffd20254ff76113b90cef058e79ce5ecc6d63600..df062c972ad34c407e843732a2e6ee994c0de4de 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/i2c.h>
 #include <asm/pmac_low_i2c.h>
 #include <asm/prom.h>
+#include <asm/local.h>
 
 /* PCM3052 register definitions */
 
index ada69d7a8d70a077c18abfa2aa7bf26ee8125b27..5f653863c7bd3e455eb668c76e2e12d630f79532 100644 (file)
@@ -1190,10 +1190,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
                if (in_kernel) {
                        mm_segment_t fs;
                        fs = snd_enter_user();
-                       ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
+                       ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
                        snd_leave_user(fs);
                } else {
-                       ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
+                       ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
                }
                if (ret != -EPIPE && ret != -ESTRPIPE)
                        break;
@@ -1233,10 +1233,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
                if (in_kernel) {
                        mm_segment_t fs;
                        fs = snd_enter_user();
-                       ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
+                       ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
                        snd_leave_user(fs);
                } else {
-                       ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
+                       ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
                }
                if (ret == -EPIPE) {
                        if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
@@ -1332,7 +1332,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
                struct snd_pcm_plugin_channel *channels;
                size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
                if (!in_kernel) {
-                       if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
+                       if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
                                return -EFAULT;
                        buf = runtime->oss.buffer;
                }
@@ -1402,7 +1402,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
                        }
                } else {
                        tmp = snd_pcm_oss_write2(substream,
-                                                (const char __force *)buf,
+                                                (const char __force_kernel *)buf,
                                                 runtime->oss.period_bytes, 0);
                        if (tmp <= 0)
                                goto err;
@@ -1428,7 +1428,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
        struct snd_pcm_runtime *runtime = substream->runtime;
        snd_pcm_sframes_t frames, frames1;
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
-       char __user *final_dst = (char __force __user *)buf;
+       char __user *final_dst = (char __force_user *)buf;
        if (runtime->oss.plugin_first) {
                struct snd_pcm_plugin_channel *channels;
                size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
@@ -1490,7 +1490,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
                        xfer += tmp;
                        runtime->oss.buffer_used -= tmp;
                } else {
-                       tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
+                       tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
                                                runtime->oss.period_bytes, 0);
                        if (tmp <= 0)
                                goto err;
@@ -1659,7 +1659,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
                                                                   size1);
                                        size1 /= runtime->channels; /* frames */
                                        fs = snd_enter_user();
-                                       snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
+                                       snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
                                        snd_leave_user(fs);
                                }
                        } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
index 2d957ba635578758172c9d66addc181a3ce438a9..fda022cdecf6cf9a6b5dbe768426d6d1aa32c5c8 100644 (file)
@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
        int err;
 
        fs = snd_enter_user();
-       err = snd_pcm_delay(substream, &delay);
+       err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
        snd_leave_user(fs);
        if (err < 0)
                return err;
index 64d9863d656526e9365fdefc3a6811c7d4525a6b..969034b3eb64b867488c9a9796b87da1fb3c740f 100644 (file)
@@ -2956,11 +2956,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
        switch (substream->stream) {
        case SNDRV_PCM_STREAM_PLAYBACK:
                result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
-                                                (void __user *)arg);
+                                                (void __force_user *)arg);
                break;
        case SNDRV_PCM_STREAM_CAPTURE:
                result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
-                                               (void __user *)arg);
+                                               (void __force_user *)arg);
                break;
        default:
                result = -EINVAL;
index 16d42679e43fc29e380612274e9f597f9899dc91..fe8b49bce01d5a1e03dfe1b24b6a24d63340ae68 100644 (file)
@@ -69,8 +69,8 @@ static int __init alsa_seq_oss_init(void)
 {
        int rc;
        static struct snd_seq_dev_ops ops = {
-               snd_seq_oss_synth_register,
-               snd_seq_oss_synth_unregister,
+               .init_device = snd_seq_oss_synth_register,
+               .free_device = snd_seq_oss_synth_unregister,
        };
 
        snd_seq_autoload_lock();
index 0631bdadd12bd6ca884bf68578ac8fd7917a3f6b..d0dcd497d81efed839be30967bb3a95ea0d2950d 100644 (file)
@@ -65,7 +65,7 @@ struct ops_list {
        int argsize;            /* argument size */
 
        /* operators */
-       struct snd_seq_dev_ops ops;
+       struct snd_seq_dev_ops *ops;
 
        /* registered devices */
        struct list_head dev_list;      /* list of devices */
@@ -371,7 +371,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
 
        mutex_lock(&ops->reg_mutex);
        /* copy driver operators */
-       ops->ops = *entry;
+       ops->ops = entry;
        ops->driver |= DRIVER_LOADED;
        ops->argsize = argsize;
 
@@ -500,7 +500,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
                           dev->name, ops->id, ops->argsize, dev->argsize);
                return -EINVAL;
        }
-       if (ops->ops.init_device(dev) >= 0) {
+       if (ops->ops->init_device(dev) >= 0) {
                dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
                ops->num_init_devices++;
        } else {
@@ -527,7 +527,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
                           dev->name, ops->id, ops->argsize, dev->argsize);
                return -EINVAL;
        }
-       if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
+       if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
                dev->status = SNDRV_SEQ_DEVICE_FREE;
                dev->driver_data = NULL;
                ops->num_init_devices--;
index a1fd77af60593e26de9cf4c560e8a3b80412d797..69cd65999c7b16156f9f7afa51f274ee50827dea 100644 (file)
@@ -462,8 +462,8 @@ snd_seq_midisynth_unregister_port(struct snd_seq_device *dev)
 static int __init alsa_seq_midi_init(void)
 {
        static struct snd_seq_dev_ops ops = {
-               snd_seq_midisynth_register_port,
-               snd_seq_midisynth_unregister_port,
+               .init_device = snd_seq_midisynth_register_port,
+               .free_device = snd_seq_midisynth_unregister_port,
        };
        memset(&synths, 0, sizeof(synths));
        snd_seq_autoload_lock();
index f1333060bf1cc71bb391673017a5bd84a6e8123a..3ec0dab5f68c9aaa4f190cac0df9e54b8b7de74d 100644 (file)
@@ -86,7 +86,7 @@ static void snd_request_other(int minor)
        case SNDRV_MINOR_TIMER:         str = "snd-timer";      break;
        default:                        return;
        }
-       request_module(str);
+       request_module("%s", str);
 }
 
 #endif /* modular kernel */
index 2a008a9ccf85c48f743b7eb7ffd7cece8d76758c..a1efb3f64d033309e84e69ce73aec4aea615422b 100644 (file)
@@ -29,6 +29,7 @@
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <sound/control.h>
+#include <asm/local.h>
 
 #define CARD_NAME "Miditerminal 4140"
 #define DRIVER_NAME "MTS64"
@@ -67,7 +68,7 @@ struct mts64 {
        struct pardevice *pardev;
        int pardev_claimed;
 
-       int open_count;
+       local_t open_count;
        int current_midi_output_port;
        int current_midi_input_port;
        u8 mode[MTS64_NUM_INPUT_PORTS];
@@ -687,7 +688,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
 {
        struct mts64 *mts = substream->rmidi->private_data;
 
-       if (mts->open_count == 0) {
+       if (local_read(&mts->open_count) == 0) {
                /* We don't need a spinlock here, because this is just called 
                   if the device has not been opened before. 
                   So there aren't any IRQs from the device */
@@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
 
                msleep(50);
        }
-       ++(mts->open_count);
+       local_inc(&mts->open_count);
 
        return 0;
 }
@@ -705,8 +706,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
        struct mts64 *mts = substream->rmidi->private_data;
        unsigned long flags;
 
-       --(mts->open_count);
-       if (mts->open_count == 0) {
+       if (local_dec_return(&mts->open_count) == 0) {
                /* We need the spinlock_irqsave here because we can still
                   have IRQs at this point */
                spin_lock_irqsave(&mts->lock, flags);
@@ -715,8 +715,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
 
                msleep(500);
 
-       } else if (mts->open_count < 0)
-               mts->open_count = 0;
+       } else if (local_read(&mts->open_count) < 0)
+               local_set(&mts->open_count, 0);
 
        return 0;
 }
index 68399538e435ffbbd324175258bde0f3949827a2..7a0f4b9fe74ca3d72a65a253f25d4209dcc103de 100644 (file)
@@ -281,8 +281,8 @@ static int __init alsa_opl3_seq_init(void)
 {
        static struct snd_seq_dev_ops ops =
        {
-               snd_opl3_seq_new_device,
-               snd_opl3_seq_delete_device
+               .init_device = snd_opl3_seq_new_device,
+               .free_device = snd_opl3_seq_delete_device
        };
 
        return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops,
index b953fb4aa298031a294a7b673a38f27037438f92..1999c01529073688118cbad906ff95d8b8c9c596 100644 (file)
@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("OPL4 driver");
 MODULE_LICENSE("GPL");
 
-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
+static inline void snd_opl4_wait(struct snd_opl4 *opl4)
 {
        int timeout = 10;
        while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
index 99197699c55a63f50bd33f45d454a40f4dd5cf70..d7de36ce883387f871a6eca44323493b50d2146f 100644 (file)
@@ -198,8 +198,8 @@ static int snd_opl4_seq_delete_device(struct snd_seq_device *dev)
 static int __init alsa_opl4_synth_init(void)
 {
        static struct snd_seq_dev_ops ops = {
-               snd_opl4_seq_new_device,
-               snd_opl4_seq_delete_device
+               .init_device = snd_opl4_seq_new_device,
+               .free_device = snd_opl4_seq_delete_device
        };
 
        return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops,
index 464385a480e491e2c325ceb01f56d9cade82f4fd..46ab3f67c3369e37817c985a36dcfd7c8425b9f8 100644 (file)
@@ -48,6 +48,7 @@
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <sound/control.h>
+#include <asm/local.h>
 
 #define CARD_NAME "Portman 2x4"
 #define DRIVER_NAME "portman"
@@ -85,7 +86,7 @@ struct portman {
        struct pardevice *pardev;
        int pardev_claimed;
 
-       int open_count;
+       local_t open_count;
        int mode[PORTMAN_NUM_INPUT_PORTS];
        struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
 };
index 911341b2a897b7a4bcb794371806234fc67c2df7..bd18b63cbaaf9fad8ea0f696a2271ce9b2f42378 100644 (file)
@@ -573,7 +573,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
        ptr = s->pcm_buffer_pointer + frames;
        if (ptr >= pcm->runtime->buffer_size)
                ptr -= pcm->runtime->buffer_size;
-       ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
+       ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
 
        s->pcm_period_pointer += frames;
        if (s->pcm_period_pointer >= pcm->runtime->period_size) {
@@ -1013,7 +1013,7 @@ EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
  */
 void amdtp_stream_update(struct amdtp_stream *s)
 {
-       ACCESS_ONCE(s->source_node_id_field) =
+       ACCESS_ONCE_RW(s->source_node_id_field) =
                (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
 }
 EXPORT_SYMBOL(amdtp_stream_update);
index 8a03a91e728b0f9bc4feb23a4bdd9f22fdf5952d..aaacc0c98611c8406ef420ddcd36e7a3c74ed3f5 100644 (file)
@@ -231,7 +231,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
 static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
                                            struct snd_pcm_substream *pcm)
 {
-       ACCESS_ONCE(s->pcm) = pcm;
+       ACCESS_ONCE_RW(s->pcm) = pcm;
 }
 
 /**
@@ -249,7 +249,7 @@ static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s,
                                             struct snd_rawmidi_substream *midi)
 {
        if (port < s->midi_ports)
-               ACCESS_ONCE(s->midi[port]) = midi;
+               ACCESS_ONCE_RW(s->midi[port]) = midi;
 }
 
 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
index 48d6dca471c6bc5713cd7a6ee7bc466c008a1997..a0266c23325d05b32c210257b27d68eb647f109e 100644 (file)
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
        ptr += count;
        if (ptr >= runtime->buffer_size)
                ptr -= runtime->buffer_size;
-       ACCESS_ONCE(isight->buffer_pointer) = ptr;
+       ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
 
        isight->period_counter += count;
        if (isight->period_counter >= runtime->period_size) {
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
        if (err < 0)
                return err;
 
-       ACCESS_ONCE(isight->pcm_active) = true;
+       ACCESS_ONCE_RW(isight->pcm_active) = true;
 
        return 0;
 }
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
 {
        struct isight *isight = substream->private_data;
 
-       ACCESS_ONCE(isight->pcm_active) = false;
+       ACCESS_ONCE_RW(isight->pcm_active) = false;
 
        mutex_lock(&isight->mutex);
        isight_stop_streaming(isight);
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               ACCESS_ONCE(isight->pcm_running) = true;
+               ACCESS_ONCE_RW(isight->pcm_running) = true;
                break;
        case SNDRV_PCM_TRIGGER_STOP:
-               ACCESS_ONCE(isight->pcm_running) = false;
+               ACCESS_ONCE_RW(isight->pcm_running) = false;
                break;
        default:
                return -EINVAL;
index 2dba848a781fd19f1fb2ca8c25bf2ff56fb0eabd..c682aef94c7fdba27899e906fb56372c7345d7ba 100644 (file)
@@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
 {
        struct scs *scs = stream->rmidi->private_data;
 
-       ACCESS_ONCE(scs->output) = up ? stream : NULL;
+       ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
        if (up) {
                scs->output_idle = false;
                tasklet_schedule(&scs->tasklet);
@@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
 {
        struct scs *scs = stream->rmidi->private_data;
 
-       ACCESS_ONCE(scs->input) = up ? stream : NULL;
+       ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
 }
 
 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
@@ -473,8 +473,8 @@ static void scs_remove(struct fw_unit *unit)
 
        snd_card_disconnect(scs->card);
 
-       ACCESS_ONCE(scs->output) = NULL;
-       ACCESS_ONCE(scs->input) = NULL;
+       ACCESS_ONCE_RW(scs->output) = NULL;
+       ACCESS_ONCE_RW(scs->input) = NULL;
 
        wait_event(scs->idle_wait, scs->output_idle);
 
index 95b39beb61c19b8d10f38b5e65e86b1fb7acdf9b..42f142c2df501755fb47c30af12d673d474a4079 100644 (file)
@@ -119,8 +119,8 @@ static int __init alsa_emu8000_init(void)
 {
        
        static struct snd_seq_dev_ops ops = {
-               snd_emu8000_new_device,
-               snd_emu8000_delete_device,
+               .init_device = snd_emu8000_new_device,
+               .free_device = snd_emu8000_delete_device,
        };
        return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops,
                                              sizeof(struct snd_emu8000*));
index 048439a16000ef08a8ead209285240224ac2683b..3be9f6f853a9b8b8288aa1b638d70be8e5a9dfdf 100644 (file)
@@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
                buf16 = (signed short *)(localbuf + localoffs);
                while (c)
                {
-                       locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+                       locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
                        if (copy_from_user(lbuf8,
                                           userbuf+useroffs + p,
                                           locallen))
index a33e8ce8085bdf2954338727c0404d735dce7b00..2262303feb62d945ae23f5d4599e6eef7ef2f487 100644 (file)
@@ -2623,7 +2623,6 @@ static int __init cs4297a_init(void)
 {
        struct cs4297a_state *s;
        u32 pwr, id;
-       mm_segment_t fs;
        int rval;
        u64 cfg;
        int mdio_val;
@@ -2709,22 +2708,23 @@ static int __init cs4297a_init(void)
         if (!rval) {
                char *sb1250_duart_present;
 
+#if 0
+                mm_segment_t fs;
                 fs = get_fs();
                 set_fs(KERNEL_DS);
-#if 0
                 val = SOUND_MASK_LINE;
                 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
                 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
                         val = initvol[i].vol;
                         mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
                 }
+                set_fs(fs);
 //                cs4297a_write_ac97(s, 0x18, 0x0808);
 #else
                 //                cs4297a_write_ac97(s, 0x5e, 0x180);
                 cs4297a_write_ac97(s, 0x02, 0x0808);
                 cs4297a_write_ac97(s, 0x18, 0x0808);
 #endif
-                set_fs(fs);
 
                 list_add(&s->list, &cs4297a_devs);
 
index 4c41c903a840b5a6d3f802970b6ced5246b54462..37f36318ad975137e23c24eb14e6db9134e9251c 100644 (file)
@@ -108,8 +108,8 @@ static int __init alsa_emu10k1_synth_init(void)
 {
        
        static struct snd_seq_dev_ops ops = {
-               snd_emu10k1_synth_new_device,
-               snd_emu10k1_synth_delete_device,
+               .init_device = snd_emu10k1_synth_new_device,
+               .free_device = snd_emu10k1_synth_delete_device,
        };
        return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops,
                                              sizeof(struct snd_emu10k1_synth_arg));
index 2fe86d2e1b09dfc6f5d8f095a3fc3ad4792680f5..8f1cd087165570514ae6005994389185f9781869 100644 (file)
@@ -2865,7 +2865,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
                /* FIXME: set_fs() hack for obtaining user-space TLV data */
                mm_segment_t fs = get_fs();
                set_fs(get_ds());
-               if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), _tlv))
+               if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), (unsigned int __force_user *)_tlv))
                        tlv = _tlv;
                set_fs(fs);
        } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
index 4631a23489151f8631e878e4a98fe31b51ab066b..001ae57f797ca65aa6ce0ea4e3bf5cd738a2388d 100644 (file)
@@ -358,7 +358,7 @@ struct snd_ymfpci {
        spinlock_t reg_lock;
        spinlock_t voice_lock;
        wait_queue_head_t interrupt_sleep;
-       atomic_t interrupt_sleep_count;
+       atomic_unchecked_t interrupt_sleep_count;
        struct snd_info_entry *proc_entry;
        const struct firmware *dsp_microcode;
        const struct firmware *controller_microcode;
index 81c916a5eb96a09ba4f4cb8cec1f75339f9f2dca..516f0bf6d3dee72ef30fe6155d53db29db3a73e7 100644 (file)
@@ -204,8 +204,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
                if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
                        break;
        }
-       if (atomic_read(&chip->interrupt_sleep_count)) {
-               atomic_set(&chip->interrupt_sleep_count, 0);
+       if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
+               atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
                wake_up(&chip->interrupt_sleep);
        }
       __end:
@@ -789,7 +789,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
                        continue;
                init_waitqueue_entry(&wait, current);
                add_wait_queue(&chip->interrupt_sleep, &wait);
-               atomic_inc(&chip->interrupt_sleep_count);
+               atomic_inc_unchecked(&chip->interrupt_sleep_count);
                schedule_timeout_uninterruptible(msecs_to_jiffies(50));
                remove_wait_queue(&chip->interrupt_sleep, &wait);
        }
@@ -827,8 +827,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
                snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
                spin_unlock(&chip->reg_lock);
 
-               if (atomic_read(&chip->interrupt_sleep_count)) {
-                       atomic_set(&chip->interrupt_sleep_count, 0);
+               if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
+                       atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
                        wake_up(&chip->interrupt_sleep);
                }
        }
@@ -2423,7 +2423,7 @@ int snd_ymfpci_create(struct snd_card *card,
        spin_lock_init(&chip->reg_lock);
        spin_lock_init(&chip->voice_lock);
        init_waitqueue_head(&chip->interrupt_sleep);
-       atomic_set(&chip->interrupt_sleep_count, 0);
+       atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
        chip->card = card;
        chip->pci = pci;
        chip->irq = -1;
index 08d7259bbaabad727709281234905011e990f07d..03362249eb58be50f70084988d810f90929d051d 100644 (file)
@@ -271,8 +271,10 @@ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
        if (ret)
                return ret;
 
-       ops->warm_reset = snd_soc_ac97_warm_reset;
-       ops->reset = snd_soc_ac97_reset;
+       pax_open_kernel();
+       *(void **)&ops->warm_reset = snd_soc_ac97_warm_reset;
+       *(void **)&ops->reset = snd_soc_ac97_reset;
+       pax_close_kernel();
 
        snd_ac97_rst_cfg = cfg;
        return 0;
index 7778b8e19782e24a3ca56a5aa5dff9e00bf33e4d..3d619fc053092f34df9c1f73db91d01cdba9be0c 100644 (file)
@@ -33,13 +33,13 @@ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *inf
  * MIDI emulation operators
  */
 static struct snd_midi_op emux_ops = {
-       snd_emux_note_on,
-       snd_emux_note_off,
-       snd_emux_key_press,
-       snd_emux_terminate_note,
-       snd_emux_control,
-       snd_emux_nrpn,
-       snd_emux_sysex,
+       .note_on = snd_emux_note_on,
+       .note_off = snd_emux_note_off,
+       .key_press = snd_emux_key_press,
+       .note_terminate = snd_emux_terminate_note,
+       .control = snd_emux_control,
+       .nrpn = snd_emux_nrpn,
+       .sysex = snd_emux_sysex,
 };
 
 
index 88461f09cc860b0d17ab87c3d4a629ee44214af7..6fb70a0c3f6b05780e6a00c22ad5a67ef9dcfe5c 100644 (file)
 # define unlikely(x)           __builtin_expect(!!(x), 0)
 #endif
 
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
+
+#ifndef __intentional_overflow
+# define __intentional_overflow(...)
+#endif
+
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 #endif /* _TOOLS_LINUX_COMPILER_H */
index 36c08b1f4afbc9379007c3b49e79fb5f87a3f1c9..87c72d1e6988bf36d78d8b376cf3e8ee97d608a7 100644 (file)
@@ -21,7 +21,7 @@ LIB_OBJS += $(OUTPUT)fd/array.o
 
 LIBFILE = libapikfs.a
 
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
+CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
 EXTLIBS = -lelf -lpthread -lrt -lm
 ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
index 6789d788d4947d31f890ca56660d8ef968901b82..4afd019e3a96bd3907e1b68c009377e4aff7ac0d 100644 (file)
@@ -5,4 +5,7 @@
 
 #define altinstruction_entry #
 
+       .macro pax_force_retaddr rip=0, reload=0
+       .endm
+
 #endif
index 0a578fe18653b8b282af9ee101da0a558da222ba..b81f62d2ed84218eef197fe304227f282f70a2b8 100644 (file)
@@ -13,7 +13,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
 ({                                                             \
        typeof(ptr) __pu_ptr = (ptr);                           \
        __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr));            \
-       ACCESS_ONCE(*(__pu_ptr)) = x;                           \
+       ACCESS_ONCE_RW(*(__pu_ptr)) = x;                        \
        0;                                                      \
 })
 
index 1cc6e2e199827093093e6c48eab72c6531136a13..85d2e86a4b79f58931b342fe44e08e6d2769a52b 100644 (file)
@@ -78,12 +78,17 @@ LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;
 static int kvm_usage_count = 0;
-static atomic_t hardware_enable_failed;
+static atomic_unchecked_t hardware_enable_failed;
 
 struct kmem_cache *kvm_vcpu_cache;
 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
 
-static __read_mostly struct preempt_ops kvm_preempt_ops;
+static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
+static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
+static struct preempt_ops kvm_preempt_ops = {
+       .sched_in = kvm_sched_in,
+       .sched_out = kvm_sched_out,
+};
 
 struct dentry *kvm_debugfs_dir;
 
@@ -785,7 +790,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        /* We can read the guest memory with __xxx_user() later on. */
        if ((mem->slot < KVM_USER_MEM_SLOTS) &&
            ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
-            !access_ok(VERIFY_WRITE,
+            !access_ok_noprefault(VERIFY_WRITE,
                        (void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size)))
                goto out;
@@ -1684,9 +1689,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-       const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
+       int r;
+       unsigned long addr;
 
-       return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
+       addr = gfn_to_hva(kvm, gfn);
+       if (kvm_is_error_hva(addr))
+               return -EFAULT;
+       r = __clear_user((void __user *)addr + offset, len);
+       if (r)
+               return -EFAULT;
+       mark_page_dirty(kvm, gfn);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1931,7 +1944,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-static struct file_operations kvm_vcpu_fops = {
+static file_operations_no_const kvm_vcpu_fops __read_only = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
 #ifdef CONFIG_COMPAT
@@ -2647,7 +2660,7 @@ out:
 }
 #endif
 
-static struct file_operations kvm_vm_fops = {
+static file_operations_no_const kvm_vm_fops __read_only = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
 #ifdef CONFIG_COMPAT
@@ -2718,7 +2731,7 @@ out:
        return r;
 }
 
-static struct file_operations kvm_chardev_ops = {
+static file_operations_no_const kvm_chardev_ops __read_only = {
        .unlocked_ioctl = kvm_dev_ioctl,
        .compat_ioctl   = kvm_dev_ioctl,
        .llseek         = noop_llseek,
@@ -2744,7 +2757,7 @@ static void hardware_enable_nolock(void *junk)
 
        if (r) {
                cpumask_clear_cpu(cpu, cpus_hardware_enabled);
-               atomic_inc(&hardware_enable_failed);
+               atomic_inc_unchecked(&hardware_enable_failed);
                printk(KERN_INFO "kvm: enabling virtualization on "
                                 "CPU%d failed\n", cpu);
        }
@@ -2800,10 +2813,10 @@ static int hardware_enable_all(void)
 
        kvm_usage_count++;
        if (kvm_usage_count == 1) {
-               atomic_set(&hardware_enable_failed, 0);
+               atomic_set_unchecked(&hardware_enable_failed, 0);
                on_each_cpu(hardware_enable_nolock, NULL, 1);
 
-               if (atomic_read(&hardware_enable_failed)) {
+               if (atomic_read_unchecked(&hardware_enable_failed)) {
                        hardware_disable_all_nolock();
                        r = -EBUSY;
                }
@@ -3210,7 +3223,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
        kvm_arch_vcpu_put(vcpu);
 }
 
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
                  struct module *module)
 {
        int r;
@@ -3257,7 +3270,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        if (!vcpu_align)
                vcpu_align = __alignof__(struct kvm_vcpu);
        kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
-                                          0, NULL);
+                                          SLAB_USERCOPY, NULL);
        if (!kvm_vcpu_cache) {
                r = -ENOMEM;
                goto out_free_3;
@@ -3267,9 +3280,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        if (r)
                goto out_free;
 
+       pax_open_kernel();
        kvm_chardev_ops.owner = module;
        kvm_vm_fops.owner = module;
        kvm_vcpu_fops.owner = module;
+       pax_close_kernel();
 
        r = misc_register(&kvm_dev);
        if (r) {
@@ -3279,9 +3294,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 
        register_syscore_ops(&kvm_syscore_ops);
 
-       kvm_preempt_ops.sched_in = kvm_sched_in;
-       kvm_preempt_ops.sched_out = kvm_sched_out;
-
        r = kvm_init_debug();
        if (r) {
                printk(KERN_ERR "kvm: create debugfs files failed\n");