+zconf.lex.c
zoffset.h
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
-index 74b6c6d..eac0e77 100644
+index d2b1c40..3e90a74 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 6726139..c825c0a 100644
+index cd03a0f..b8d72be 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1223,6 +1223,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -2333,6 +2340,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2341,6 +2348,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2631,6 +2642,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2639,6 +2650,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index e3cdec4..56ae73d 100644
+index b8591e5..1d9e8c0 100644
--- a/Makefile
+++ b/Makefile
@@ -299,7 +299,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -883,7 +953,7 @@ export mod_sign_cmd
+@@ -884,7 +954,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -933,6 +1003,8 @@ endif
+@@ -934,6 +1004,8 @@ endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -942,7 +1014,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+@@ -943,7 +1015,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
$(Q)$(MAKE) $(build)=$@
define filechk_kernel.release
-@@ -985,10 +1057,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+@@ -986,10 +1058,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
prepare: prepare0
# Generate some files
-@@ -1096,6 +1171,8 @@ all: modules
+@@ -1097,6 +1172,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1111,7 +1188,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1112,7 +1189,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
# Target to install modules
PHONY += modules_install
-@@ -1177,7 +1254,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1178,7 +1255,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.priv signing_key.x509 x509.genkey \
extra_certificates signing_key.x509.keyid \
# clean - Delete most, but leave enough to build external modules
#
-@@ -1216,7 +1296,7 @@ distclean: mrproper
+@@ -1217,7 +1297,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-type f -print | xargs rm -f
-@@ -1382,6 +1462,8 @@ PHONY += $(module-dirs) modules
+@@ -1383,6 +1463,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1522,17 +1604,21 @@ else
+@@ -1523,17 +1605,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1544,11 +1630,15 @@ endif
+@@ -1545,11 +1631,15 @@ endif
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
Documentation/: headers_install
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 985227c..8acc029 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -304,6 +304,9 @@ INSTALL_TARGETS = zinstall uinstall install
+
+ PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
++bootpImage uImage: zImage
++zImage: Image
++
+ $(BOOT_TARGETS): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..abe7041 100644
--- a/arch/arm/include/asm/atomic.h
- return page;
-}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index cca5b87..68f0f73 100644
+index f11d825..bbe686f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -76,7 +76,7 @@ enum ipi_msg_type {
#include <asm/smp_scu.h>
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
-index 3b56722..33ac281 100644
+index 6833df4..3e059b2 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 752969f..a34b446 100644
+index 5286e77..fdd234c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
extern void ux500_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
-index 52d768f..5f93180 100644
+index f66816c..228b951 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -24,6 +24,7 @@
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index e0e2358..a4ee460 100644
+index e0e2358..96c6791 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -20,6 +20,7 @@
#include "bpf_jit_32.h"
-@@ -72,34 +73,58 @@ struct jit_ctx {
+@@ -72,7 +73,11 @@ struct jit_ctx {
#endif
};
int bpf_jit_enable __read_mostly;
+#endif
--static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
-+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
-+ unsigned int size)
-+{
-+ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
-+
-+ if (!ptr)
-+ return -EFAULT;
-+ memcpy(ret, ptr, size);
-+ return 0;
-+}
-+
-+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
+ static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
{
- u8 ret;
- int err;
-
-- err = skb_copy_bits(skb, offset, &ret, 1);
-+ if (offset < 0)
-+ err = call_neg_helper(skb, offset, &ret, 1);
-+ else
-+ err = skb_copy_bits(skb, offset, &ret, 1);
-
- return (u64)err << 32 | ret;
- }
-
--static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
-+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
- {
- u16 ret;
- int err;
-
-- err = skb_copy_bits(skb, offset, &ret, 2);
-+ if (offset < 0)
-+ err = call_neg_helper(skb, offset, &ret, 2);
-+ else
-+ err = skb_copy_bits(skb, offset, &ret, 2);
-
- return (u64)err << 32 | ntohs(ret);
- }
-
--static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
-+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
- {
- u32 ret;
- int err;
-
-- err = skb_copy_bits(skb, offset, &ret, 4);
-+ if (offset < 0)
-+ err = call_neg_helper(skb, offset, &ret, 4);
-+ else
-+ err = skb_copy_bits(skb, offset, &ret, 4);
-
- return (u64)err << 32 | ntohl(ret);
- }
-@@ -179,8 +204,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+@@ -179,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
/* We are guaranteed to have aligned memory. */
}
static void build_prologue(struct jit_ctx *ctx)
-@@ -536,9 +563,6 @@ static int build_body(struct jit_ctx *ctx)
- case BPF_LD | BPF_B | BPF_ABS:
- load_order = 0;
- load:
-- /* the interpreter will deal with the negative K */
-- if ((int)k < 0)
-- return -ENOTSUPP;
- emit_mov_i(r_off, k, ctx);
- load_common:
- ctx->seen |= SEEN_DATA | SEEN_CALL;
-@@ -547,12 +571,24 @@ load_common:
+@@ -547,7 +554,7 @@ load_common:
emit(ARM_SUB_I(r_scratch, r_skb_hl,
1 << load_order), ctx);
emit(ARM_CMP_R(r_scratch, r_off), ctx);
} else {
emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
condt = ARM_COND_HI;
- }
-
-+ /*
-+ * test for negative offset, only if we are
-+ * currently scheduled to take the fast
-+ * path. this will update the flags so that
-+ * the slowpath instruction are ignored if the
-+ * offset is negative.
-+ *
-+ * for loard_order == 0 the HI condition will
-+ * make loads at offset 0 take the slow path too.
-+ */
-+ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
-+
- _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
- ctx);
-
-@@ -860,9 +896,11 @@ b_epilogue:
+@@ -860,9 +867,11 @@ b_epilogue:
off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index f501665..b107753 100644
+index a3b1ffe..7d61ca6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2585,6 +2585,7 @@ source "kernel/Kconfig.preempt"
+@@ -2586,6 +2586,7 @@ source "kernel/Kconfig.preempt"
config KEXEC
bool "Kexec system call"
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 819af9d..439839d 100644
+index 70f6e7f..11f4ada 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index d2d1c19..3e21d8d 100644
+index 5f5f44e..cf10625 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+@@ -696,7 +696,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
siginfo_t info;
prev_state = exception_enter();
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
-index 3a08eae..08fef28 100644
+index 3edbb9f..08fef28 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-@@ -72,7 +77,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-
- static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- {
-- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
-+ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
- /*
- * This is the permanent pmd attached to the pgd;
- * cannot free it.
-@@ -81,6 +86,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
- */
- mm_inc_nr_pmds(mm);
- return;
-+ }
- free_pages((unsigned long)pmd, PMD_ORDER);
- }
-
-@@ -96,6 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+@@ -97,6 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
-index 0a18375..d613939 100644
+index f93c4a4..cfd5663 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
-@@ -213,6 +213,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+@@ -231,6 +231,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 47ee620..1107387 100644
+index 7f67c4c..d85c11d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
-@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm,regs->iaoq[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index d3a831a..3a33123 100644
+index da50e0c..5ff6307 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
-@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+@@ -1009,7 +1009,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index dc5edc2..7d34ae3 100644
+index 8f587d8..0642516b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -200,27 +200,3 @@ unsigned long get_wchan(struct task_struct *p)
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index 1d649a9..fbc5bfc 100644
+index 8069ce1..c2e23c4 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 226d569..d420edc 100644
+index 226d569..297bf74 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,7 +32,7 @@ config X86
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1717,6 +1721,7 @@ source kernel/Kconfig.hz
+@@ -1286,7 +1290,6 @@ config X86_PAE
+
+ config ARCH_PHYS_ADDR_T_64BIT
+ def_bool y
+- depends on X86_64 || X86_PAE
+
+ config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+@@ -1717,6 +1720,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1899,7 +1904,9 @@ config X86_NEED_RELOCS
+@@ -1899,7 +1903,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1982,6 +1989,7 @@ config COMPAT_VDSO
+@@ -1982,6 +1988,7 @@ config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
depends on X86_32 || IA32_EMULATION
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
-@@ -2046,6 +2054,22 @@ config CMDLINE_OVERRIDE
+@@ -2046,6 +2053,22 @@ config CMDLINE_OVERRIDE
This is used to work around broken boot loaders. This should
be set to 'N' under normal conditions.
struct compat_timespec {
compat_time_t tv_sec;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 3d6606f..5e22255 100644
+index 3d6606f..300641d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -214,7 +214,8 @@
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
- warn_pre_alternatives();
-+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
++ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID && bit != X86_FEATURE_PCIDUDEREF)
+ warn_pre_alternatives();
return false;
#endif
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index 883f6b93..5184058 100644
+index e997f70..5d819f7 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
#endif /* __ASSEMBLY__ */
#include <asm-generic/memory_model.h>
+diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
+index 904f528..b4d0d24 100644
+--- a/arch/x86/include/asm/page_32.h
++++ b/arch/x86/include/asm/page_32.h
+@@ -7,11 +7,17 @@
+
+ #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
+ #ifdef CONFIG_DEBUG_VIRTUAL
+-extern unsigned long __phys_addr(unsigned long);
++extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
+ #else
+-#define __phys_addr(x) __phys_addr_nodebug(x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
++{
++ return __phys_addr_nodebug(x);
++}
+ #endif
+-#define __phys_addr_symbol(x) __phys_addr(x)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x)
++{
++ return __phys_addr(x);
++}
+ #define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
+
+ #ifdef CONFIG_FLATMEM
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
-index b3bebf9..13ac22e 100644
+index b3bebf9..cb419e7 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -7,9 +7,9 @@
{
unsigned long y = x - __START_KERNEL_map;
-@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
+@@ -20,12 +20,14 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
}
#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
#else
#define __phys_addr(x) __phys_addr_nodebug(x)
- #define __phys_addr_symbol(x) \
+-#define __phys_addr_symbol(x) \
+- ((unsigned long)(x) - __START_KERNEL_map + phys_base)
++static inline unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long x)
++{
++ return x - __START_KERNEL_map + phys_base;
++}
+ #endif
+
+ #define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8957810..f34efb4 100644
--- a/arch/x86/include/asm/paravirt.h
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
-index 751bf4b..a1278b5 100644
+index 751bf4b..3cc39f1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
-@@ -112,7 +112,7 @@ do { \
+@@ -79,12 +79,12 @@ do { \
+ #else /* CONFIG_X86_32 */
+
+ /* frame pointer must be last for get_wchan */
+-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
++#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+ #define __EXTRA_CLOBBER \
+ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+- "r12", "r13", "r14", "r15", "flags"
++ "r12", "r13", "r14", "r15"
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #define __switch_canary \
+@@ -100,11 +100,7 @@ do { \
+ #define __switch_canary_iparam
+ #endif /* CC_STACKPROTECTOR */
+
+-/*
+- * There is no need to save or restore flags, because flags are always
+- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
+- * has no effect.
+- */
++/* Save restore flags to clear handle leaking NT */
+ #define switch_to(prev, next, last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+@@ -112,7 +108,7 @@ do { \
"call __switch_to\n\t" \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
"movq %%rax,%%rdi\n\t" \
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
-@@ -123,7 +123,7 @@ do { \
+@@ -123,7 +119,7 @@ do { \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
bogus_magic:
jmp bogus_magic
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index aef6531..2044b66 100644
+index aef6531..d7ca83a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -248,7 +248,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
insns += noplen;
len -= noplen;
}
-@@ -276,6 +278,11 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
+@@ -276,6 +278,13 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
if (a->replacementlen != 5)
return;
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+ if (orig_insn < (u8 *)_text || (u8 *)_einittext <= orig_insn)
+ orig_insn = ktva_ktla(orig_insn);
++ else
++ orig_insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
o_dspl = *(s32 *)(insnbuf + 1);
/* next_rip of the replacement JMP */
-@@ -362,7 +369,23 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+@@ -346,6 +355,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ {
+ struct alt_instr *a;
+ u8 *instr, *replacement;
++ u8 *vinstr, *vreplacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ DPRINTK("alt table %p -> %p", start, end);
+@@ -361,46 +371,71 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ for (a = start; a < end; a++) {
int insnbuf_sz = 0;
- instr = (u8 *)&a->instr_offset + a->instr_offset;
+- instr = (u8 *)&a->instr_offset + a->instr_offset;
+- replacement = (u8 *)&a->repl_offset + a->repl_offset;
++ vinstr = instr = (u8 *)&a->instr_offset + a->instr_offset;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if ((u8 *)_text <= instr && instr < (u8 *)_einittext) {
++ if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= instr &&
++ instr < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ instr = ktla_ktva(instr);
++ vinstr = ktla_ktva(instr);
++ } else if ((u8 *)_text <= instr && instr < (u8 *)_einittext) {
++ vinstr = ktla_ktva(instr);
++ } else {
++ instr = ktva_ktla(instr);
+ }
+#endif
+
- replacement = (u8 *)&a->repl_offset + a->repl_offset;
++ vreplacement = replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) {
++ if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= replacement &&
++ replacement < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
+ replacement += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ replacement = ktla_ktva(replacement);
-+ }
++ vreplacement = ktla_ktva(replacement);
++ } else if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) {
++ vreplacement = ktla_ktva(replacement);
++ } else
++ replacement = ktva_ktla(replacement);
+#endif
+
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
if (!boot_cpu_has(a->cpuid)) {
-@@ -402,6 +425,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+ if (a->padlen > 1)
+- optimize_nops(a, instr);
++ optimize_nops(a, vinstr);
+
+ continue;
}
- DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
-+ instr = ktva_ktla(instr);
-+#endif
-+
+- DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
++ DPRINTK("feat: %d*32+%d, old: (%p/%p, len: %d), repl: (%p, len: %d), pad: %d",
+ a->cpuid >> 5,
+ a->cpuid & 0x1f,
+- instr, a->instrlen,
+- replacement, a->replacementlen, a->padlen);
++ instr, vinstr, a->instrlen,
++ vreplacement, a->replacementlen, a->padlen);
+
+- DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
+- DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
++ DUMP_BYTES(vinstr, a->instrlen, "%p: old_insn: ", vinstr);
++ DUMP_BYTES(vreplacement, a->replacementlen, "%p: rpl_insn: ", vreplacement);
+
+- memcpy(insnbuf, replacement, a->replacementlen);
++ memcpy(insnbuf, vreplacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ /* 0xe8 is a relative jump; fix the offset. */
+ if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+- *(s32 *)(insnbuf + 1) += replacement - instr;
++ *(s32 *)(insnbuf + 1) += vreplacement - vinstr;
+ DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
+ *(s32 *)(insnbuf + 1),
+- (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
++ (unsigned long)vinstr + *(s32 *)(insnbuf + 1) + 5);
+ }
+
+- if (a->replacementlen && is_jmp(replacement[0]))
+- recompute_jump(a, instr, replacement, insnbuf);
++ if (a->replacementlen && is_jmp(vreplacement[0]))
++ recompute_jump(a, instr, vreplacement, insnbuf);
+
+ if (a->instrlen > a->replacementlen) {
+ add_nops(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+- DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
++ DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", vinstr);
+
text_poke_early(instr, insnbuf, insnbuf_sz);
}
- }
-@@ -416,10 +444,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+@@ -416,10 +451,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
text_poke(ptr, ((unsigned char []){0xf0}), 1);
}
mutex_unlock(&text_mutex);
-@@ -434,10 +468,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+@@ -434,10 +475,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
text_poke(ptr, ((unsigned char []){0x3E}), 1);
}
mutex_unlock(&text_mutex);
-@@ -574,7 +614,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+@@ -574,7 +621,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
(unsigned long)p->instr, p->len);
-@@ -621,7 +661,7 @@ void __init alternative_instructions(void)
+@@ -621,7 +668,7 @@ void __init alternative_instructions(void)
if (!uniproc_patched || num_possible_cpus() == 1)
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
#endif
apply_paravirt(__parainstructions, __parainstructions_end);
-@@ -641,13 +681,17 @@ void __init alternative_instructions(void)
+@@ -641,13 +688,17 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
-@@ -669,36 +713,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+@@ -669,36 +720,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
return addr;
}
-@@ -752,7 +782,7 @@ int poke_int3_handler(struct pt_regs *regs)
+@@ -752,7 +789,7 @@ int poke_int3_handler(struct pt_regs *regs)
*/
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
bp_int3_handler = handler;
bp_int3_addr = (u8 *)addr + sizeof(int3);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index dcb5285..cc79e9d 100644
+index cde732c..6365ac2 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index a62cf04..56afd65 100644
+index a62cf04..a55415c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
static int __init x86_xsave_setup(char *s)
{
if (strlen(s))
-@@ -306,6 +252,62 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -306,6 +252,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#ifdef CONFIG_X86_64
++static bool uderef_enabled __read_only = true;
++unsigned long pax_user_shadow_base __read_only;
++EXPORT_SYMBOL(pax_user_shadow_base);
++extern char pax_enter_kernel_user[];
++extern char pax_exit_kernel_user[];
++
++static int __init setup_pax_weakuderef(char *str)
++{
++ if (uderef_enabled)
++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++ return 1;
++}
++__setup("pax_weakuderef", setup_pax_weakuderef);
++#endif
++
++static int __init setup_pax_nouderef(char *str)
++{
++#ifdef CONFIG_X86_32
++ unsigned int cpu;
++ struct desc_struct *gdt;
++
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ gdt = get_cpu_gdt_table(cpu);
++ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
++ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
++ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
++ }
++ loadsegment(ds, __KERNEL_DS);
++ loadsegment(es, __KERNEL_DS);
++ loadsegment(ss, __KERNEL_DS);
++#else
++ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
++ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
++ clone_pgd_mask = ~(pgdval_t)0UL;
++ pax_user_shadow_base = 0UL;
++ setup_clear_cpu_cap(X86_FEATURE_PCIDUDEREF);
++ uderef_enabled = false;
++#endif
++
++ return 0;
++}
++early_param("pax_nouderef", setup_pax_nouderef);
++#endif
++
+#ifdef CONFIG_X86_64
+static __init int setup_disable_pcid(char *arg)
+{
+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (clone_pgd_mask != ~(pgdval_t)0UL)
++ if (uderef_enabled)
+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+#endif
+
+ printk("PAX: INVPCID detected\n");
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ if (clone_pgd_mask == ~(pgdval_t)0UL) {
++ if (!uderef_enabled) {
+ printk("PAX: UDEREF disabled\n");
+ return;
+ }
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -406,7 +408,7 @@ void switch_to_new_gdt(int cpu)
+@@ -406,7 +455,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -935,6 +937,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -935,6 +984,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -1009,7 +1025,7 @@ void enable_sep_cpu(void)
+@@ -1009,7 +1072,7 @@ void enable_sep_cpu(void)
int cpu;
cpu = get_cpu();
if (!boot_cpu_has(X86_FEATURE_SEP))
goto out;
-@@ -1155,14 +1171,16 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1155,14 +1218,16 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
-@@ -1367,7 +1385,7 @@ void cpu_init(void)
+@@ -1367,7 +1432,7 @@ void cpu_init(void)
*/
load_ucode_ap();
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1399,7 +1417,6 @@ void cpu_init(void)
+@@ -1399,7 +1464,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
x2apic_setup();
/*
-@@ -1451,7 +1468,7 @@ void cpu_init(void)
+@@ -1451,7 +1515,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
__bts_event_stop(event);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
-index e4d1b8b..2c6ffa0 100644
+index cb77b11..8867302 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
-@@ -1352,7 +1352,9 @@ static int __init intel_cqm_init(void)
+@@ -1360,7 +1360,9 @@ static int __init intel_cqm_init(void)
goto out;
}
#endif
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 02c2eff..a13739f 100644
+index 4bd6c19..a0eba01 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -46,6 +46,8 @@
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ call pax_exit_kernel
+#endif
-+
+ .endm
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
+ btr $2,%ebx
+ jnc 111f
+ GET_CR3_INTO_RDI
/*
* The iretq could re-enable interrupts:
*/
-@@ -793,8 +1232,6 @@ retint_kernel:
- restore_c_regs_and_iret:
- RESTORE_C_REGS
- REMOVE_PT_GPREGS_FROM_STACK 8
--
--irq_return:
- INTERRUPT_RETURN
-
- ENTRY(native_iret)
-@@ -824,15 +1261,15 @@ native_irq_return_ldt:
+@@ -822,15 +1261,15 @@ native_irq_return_ldt:
SWAPGS
movq PER_CPU_VAR(espfix_waddr),%rdi
movq %rax,(0*8)(%rdi) /* RAX */
movq %rax,(4*8)(%rdi)
andl $0xffff0000,%eax
popq_cfi %rdi
-@@ -875,7 +1312,7 @@ retint_signal:
+@@ -873,7 +1312,7 @@ retint_signal:
jmp retint_with_reschedule
CFI_ENDPROC
/*
* APIC interrupts.
-@@ -889,7 +1326,7 @@ ENTRY(\sym)
+@@ -887,7 +1326,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
.endm
#ifdef CONFIG_TRACING
-@@ -962,7 +1399,7 @@ apicinterrupt IRQ_WORK_VECTOR \
+@@ -960,7 +1399,7 @@ apicinterrupt IRQ_WORK_VECTOR \
/*
* Exception entry points.
*/
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
-@@ -1018,6 +1455,12 @@ ENTRY(\sym)
+@@ -1016,6 +1455,12 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
-@@ -1065,7 +1508,7 @@ ENTRY(\sym)
+@@ -1063,7 +1508,7 @@ ENTRY(\sym)
.endif
CFI_ENDPROC
.endm
#ifdef CONFIG_TRACING
-@@ -1106,9 +1549,10 @@ gs_change:
+@@ -1104,9 +1549,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
_ASM_EXTABLE(gs_change,bad_gs)
.section .fixup,"ax"
-@@ -1136,9 +1580,10 @@ ENTRY(do_softirq_own_stack)
+@@ -1134,9 +1580,10 @@ ENTRY(do_softirq_own_stack)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
#ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-@@ -1179,7 +1624,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1177,7 +1624,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
#endif
jmp error_exit
CFI_ENDPROC
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1240,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1238,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
SAVE_EXTRA_REGS
jmp error_exit
CFI_ENDPROC
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1286,9 +1731,39 @@ ENTRY(paranoid_entry)
+@@ -1284,9 +1731,39 @@ ENTRY(paranoid_entry)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
/*
* "Paranoid" exit path from exception stack. This is invoked
-@@ -1305,20 +1780,27 @@ ENTRY(paranoid_exit)
+@@ -1303,20 +1780,27 @@ ENTRY(paranoid_exit)
DEFAULT_FRAME
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
/*
* Save all registers in pt_regs, and switch gs if needed.
-@@ -1330,12 +1812,23 @@ ENTRY(error_entry)
+@@ -1328,12 +1812,23 @@ ENTRY(error_entry)
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
xorl %ebx,%ebx
ret
/*
-@@ -1370,7 +1863,7 @@ error_bad_iret:
+@@ -1368,7 +1863,7 @@ error_bad_iret:
decl %ebx /* Return to usergs */
jmp error_sti
CFI_ENDPROC
/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
-@@ -1381,7 +1874,7 @@ ENTRY(error_exit)
+@@ -1379,7 +1874,7 @@ ENTRY(error_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx)
jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl TI_flags(%rcx),%edx
-@@ -1390,7 +1883,7 @@ ENTRY(error_exit)
+@@ -1388,7 +1883,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
/* Runs on exception stack */
ENTRY(nmi)
-@@ -1413,11 +1906,12 @@ ENTRY(nmi)
- * If the variable is not set and the stack is not the NMI
- * stack then:
- * o Set the special variable on the stack
-- * o Copy the interrupt frame into a "saved" location on the stack
-- * o Copy the interrupt frame into a "copy" location on the stack
-+ * o Copy the interrupt frame into an "outermost" location on the
-+ * stack
-+ * o Copy the interrupt frame into an "iret" location on the stack
- * o Continue processing the NMI
- * If the variable is set or the previous stack is the NMI stack:
-- * o Modify the "copy" location to jump to the repeate_nmi
-+ * o Modify the "iret" location to jump to the repeat_nmi
- * o return back to the first NMI
- *
- * Now on exit of the first NMI, we first clear the stack variable
-@@ -1426,32 +1920,185 @@ ENTRY(nmi)
- * a nested NMI that updated the copy interrupt stack frame, a
- * jump will be made to the repeat_nmi code that will handle the second
- * NMI.
-+ *
-+ * However, espfix prevents us from directly returning to userspace
-+ * with a single IRET instruction. Similarly, IRET to user mode
-+ * can fault. We therefore handle NMIs from user space like
-+ * other IST entries.
- */
-
- /* Use %rdx as our temp variable throughout */
- pushq_cfi %rdx
- CFI_REL_OFFSET rdx, 0
+@@ -1473,6 +1968,12 @@ ENTRY(nmi)
+ pushq %r14 /* pt_regs->r14 */
+ pushq %r15 /* pt_regs->r15 */
-+ testb $3, CS-RIP+8(%rsp)
-+ jz .Lnmi_from_kernel
-+
-+ /*
-+ * NMI from user mode. We need to run on the thread stack, but we
-+ * can't go through the normal entry paths: NMIs are masked, and
-+ * we don't want to enable interrupts, because then we'll end
-+ * up in an awkward situation in which IRQs are on but NMIs
-+ * are off.
-+ */
-+
-+ SWAPGS
-+ cld
-+ movq %rsp, %rdx
-+ movq PER_CPU_VAR(kernel_stack), %rsp
-+ pushq 5*8(%rdx) /* pt_regs->ss */
-+ pushq 4*8(%rdx) /* pt_regs->rsp */
-+ pushq 3*8(%rdx) /* pt_regs->flags */
-+ pushq 2*8(%rdx) /* pt_regs->cs */
-+ pushq 1*8(%rdx) /* pt_regs->rip */
-+ pushq $-1 /* pt_regs->orig_ax */
-+ pushq %rdi /* pt_regs->di */
-+ pushq %rsi /* pt_regs->si */
-+ pushq (%rdx) /* pt_regs->dx */
-+ pushq %rcx /* pt_regs->cx */
-+ pushq %rax /* pt_regs->ax */
-+ pushq %r8 /* pt_regs->r8 */
-+ pushq %r9 /* pt_regs->r9 */
-+ pushq %r10 /* pt_regs->r10 */
-+ pushq %r11 /* pt_regs->r11 */
-+ pushq %rbx /* pt_regs->rbx */
-+ pushq %rbp /* pt_regs->rbp */
-+ pushq %r12 /* pt_regs->r12 */
-+ pushq %r13 /* pt_regs->r13 */
-+ pushq %r14 /* pt_regs->r14 */
-+ pushq %r15 /* pt_regs->r15 */
-+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ xorl %ebx,%ebx
+#endif
+ pax_enter_kernel_nmi
+
/*
-- * If %cs was not the kernel segment, then the NMI triggered in user
-- * space, which means it is definitely not nested.
-+ * At this point we no longer need to worry about stack damage
-+ * due to nesting -- we're on the normal thread stack and we're
-+ * done with the NMI stack.
- */
-- cmpl $__KERNEL_CS, 16(%rsp)
-- jne first_nmi
-+
-+ movq %rsp, %rdi
-+ movq $-1, %rsi
-+ call do_nmi
-+
+ * At this point we no longer need to worry about stack damage
+ * due to nesting -- we're on the normal thread stack and we're
+@@ -1482,12 +1983,19 @@ ENTRY(nmi)
+ movq $-1, %rsi
+ call do_nmi
+
+ pax_exit_kernel_nmi
+
-+ /*
-+ * Return back to user mode. We must *not* do the normal exit
-+ * work, because we don't want to enable interrupts. Fortunately,
-+ * do_nmi doesn't modify pt_regs.
-+ */
-+ SWAPGS
-+
-+ /*
-+ * Open-code the entire return process for compatibility with varying
-+ * register layouts across different kernel versions.
-+ */
-+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ movq RBX(%rsp), %rbx /* pt_regs->rbx*/
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq R12(%rsp), %r12 /* pt_regs->r12*/
-+#endif
-+
-+ addq $6*8, %rsp /* skip bx, bp, and r12-r15 */
-+ popq %r11 /* pt_regs->r11 */
-+ popq %r10 /* pt_regs->r10 */
-+ popq %r9 /* pt_regs->r9 */
-+ popq %r8 /* pt_regs->r8 */
-+ popq %rax /* pt_regs->ax */
-+ popq %rcx /* pt_regs->cx */
-+ popq %rdx /* pt_regs->dx */
-+ popq %rsi /* pt_regs->si */
-+ popq %rdi /* pt_regs->di */
-+ addq $8, %rsp /* skip orig_ax */
-+ INTERRUPT_RETURN
-+
-+.Lnmi_from_kernel:
-+ /*
-+ * Here's what our stack frame will look like:
-+ * +---------------------------------------------------------+
-+ * | original SS |
-+ * | original Return RSP |
-+ * | original RFLAGS |
-+ * | original CS |
-+ * | original RIP |
-+ * +---------------------------------------------------------+
-+ * | temp storage for rdx |
-+ * +---------------------------------------------------------+
-+ * | "NMI executing" variable |
-+ * +---------------------------------------------------------+
-+ * | iret SS } Copied from "outermost" frame |
-+ * | iret Return RSP } on each loop iteration; overwritten |
-+ * | iret RFLAGS } by a nested NMI to force another |
-+ * | iret CS } iteration if needed. |
-+ * | iret RIP } |
-+ * +---------------------------------------------------------+
-+ * | outermost SS } initialized in first_nmi; |
-+ * | outermost Return RSP } will not be changed before |
-+ * | outermost RFLAGS } NMI processing is done. |
-+ * | outermost CS } Copied to "iret" frame on each |
-+ * | outermost RIP } iteration. |
-+ * +---------------------------------------------------------+
-+ * | pt_regs |
-+ * +---------------------------------------------------------+
-+ *
-+ * The "original" frame is used by hardware. Before re-enabling
-+ * NMIs, we need to be done with it, and we need to leave enough
-+ * space for the asm code here.
-+ *
-+ * We return by executing IRET while RSP points to the "iret" frame.
-+ * That will either return for real or it will loop back into NMI
-+ * processing.
-+ *
-+ * The "outermost" frame is copied to the "iret" frame on each
-+ * iteration of the loop, so each iteration starts with the "iret"
-+ * frame pointing to the final return target.
-+ */
-+
-+ /*
-+ * If we interrupted kernel code between repeat_nmi and
-+ * end_repeat_nmi, then we are a nested NMI. We must not
-+ * modify the "iret" frame because it's being written by
-+ * the outer NMI. That's okay: the outer NMI handler is
-+ * about to about to call do_nmi anyway, so we can just
-+ * resume the outer NMI.
-+ */
-+
-+ movq $repeat_nmi, %rdx
-+ cmpq 8(%rsp), %rdx
-+ ja 1f
-+ movq $end_repeat_nmi, %rdx
-+ cmpq 8(%rsp), %rdx
-+ ja nested_nmi_out
-+1:
-
- /*
-- * Check the special variable on the stack to see if NMIs are
-- * executing.
-+ * Now check "NMI executing". If it's set, then we're nested.
-+ *
-+ * First check "NMI executing". If it's set, then we're nested.
-+ * This will not detect if we interrupted an outer NMI just
-+ * before IRET.
- */
- cmpl $1, -8(%rsp)
- je nested_nmi
-
/*
-- * Now test if the previous stack was an NMI stack.
-- * We need the double check. We check the NMI stack to satisfy the
-- * race when the first NMI clears the variable before returning.
-- * We check the variable because the first NMI could be in a
-- * breakpoint routine using a breakpoint stack.
-+ * Now test if the previous stack was an NMI stack. This covers
-+ * the case where we interrupt an outer NMI after it clears
-+ * "NMI executing" but before IRET. We need to be careful, though:
-+ * there is one case in which RSP could point to the NMI stack
-+ * despite there being no NMI active: naughty userspace controls
-+ * RSP at the very beginning of the SYSCALL targets. We can
-+ * pull a fast one on naughty userspace, though: we program
-+ * SYSCALL to mask DF, so userspace cannot cause DF to be set
-+ * if it controls the kernel's RSP. We set DF before we clear
-+ * "NMI executing".
+ * Return back to user mode. We must *not* do the normal exit
+ * work, because we don't want to enable interrupts. Fortunately,
+ * do_nmi doesn't modify pt_regs.
*/
- lea 6*8(%rsp), %rdx
- /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
-@@ -1462,27 +2109,22 @@ ENTRY(nmi)
- cmpq %rdx, 4*8(%rsp)
- /* If it is below the NMI stack, it is a normal NMI */
- jb first_nmi
-- /* Ah, it is within the NMI stack, treat it as nested */
+ SWAPGS
+
-+ /* Ah, it is within the NMI stack. */
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ movq_cfi_restore RBX, rbx
++#endif
+
-+ testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
-+ jz first_nmi /* RSP was user controlled. */
-
- CFI_REMEMBER_STATE
+ jmp restore_c_regs_and_iret
-+ /* This is a nested NMI. */
-+
- nested_nmi:
- /*
-- * Do nothing if we interrupted the fixup in repeat_nmi.
-- * It's about to repeat the NMI handler, so we are fine
-- * with ignoring this one.
-+ * Modify the "iret" frame to point to repeat_nmi, forcing another
-+ * iteration of NMI handling.
+ .Lnmi_from_kernel:
+@@ -1595,8 +2103,7 @@ nested_nmi:
+ * Modify the "iret" frame to point to repeat_nmi, forcing another
+ * iteration of NMI handling.
*/
-- movq $repeat_nmi, %rdx
-- cmpq 8(%rsp), %rdx
-- ja 1f
-- movq $end_repeat_nmi, %rdx
-- cmpq 8(%rsp), %rdx
-- ja nested_nmi_out
--
--1:
-- /* Set up the interrupted NMIs stack to jump to repeat_nmi */
- leaq -1*8(%rsp), %rdx
- movq %rdx, %rsp
+ subq $8, %rsp
CFI_ADJUST_CFA_OFFSET 1*8
leaq -10*8(%rsp), %rdx
pushq_cfi $__KERNEL_DS
-@@ -1499,60 +2141,24 @@ nested_nmi_out:
- popq_cfi %rdx
+@@ -1614,6 +2121,7 @@ nested_nmi_out:
CFI_RESTORE rdx
-- /* No need to check faults here */
-+ /* We are returning to kernel mode, so this cannot result in a fault. */
+ /* We are returning to kernel mode, so this cannot result in a fault. */
+# pax_force_retaddr_bts
INTERRUPT_RETURN
CFI_RESTORE_STATE
- first_nmi:
-- /*
-- * Because nested NMIs will use the pushed location that we
-- * stored in rdx, we must keep that space available.
-- * Here's what our stack frame will look like:
-- * +-------------------------+
-- * | original SS |
-- * | original Return RSP |
-- * | original RFLAGS |
-- * | original CS |
-- * | original RIP |
-- * +-------------------------+
-- * | temp storage for rdx |
-- * +-------------------------+
-- * | NMI executing variable |
-- * +-------------------------+
-- * | copied SS |
-- * | copied Return RSP |
-- * | copied RFLAGS |
-- * | copied CS |
-- * | copied RIP |
-- * +-------------------------+
-- * | Saved SS |
-- * | Saved Return RSP |
-- * | Saved RFLAGS |
-- * | Saved CS |
-- * | Saved RIP |
-- * +-------------------------+
-- * | pt_regs |
-- * +-------------------------+
-- *
-- * The saved stack frame is used to fix up the copied stack frame
-- * that a nested NMI may change to make the interrupted NMI iret jump
-- * to the repeat_nmi. The original stack frame and the temp storage
-- * is also used by nested NMIs and can not be trusted on exit.
-- */
-- /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
-+ /* Restore rdx. */
- movq (%rsp), %rdx
- CFI_RESTORE rdx
-
- /* Set the NMI executing variable on the stack. */
- pushq_cfi $1
-
-- /*
-- * Leave room for the "copied" frame
-- */
-+ /* Leave room for the "iret" frame */
- subq $(5*8), %rsp
- CFI_ADJUST_CFA_OFFSET 5*8
-
-- /* Copy the stack frame to the Saved frame */
-+ /* Copy the "original" frame to the "outermost" frame */
- .rept 5
- pushq_cfi 11*8(%rsp)
- .endr
-@@ -1560,6 +2166,7 @@ first_nmi:
-
- /* Everything up to here is safe from nested NMIs */
-
-+repeat_nmi:
- /*
- * If there was a nested NMI, the first NMI's iret will return
- * here. But NMIs are still enabled and we can take another
-@@ -1568,16 +2175,21 @@ first_nmi:
- * it will just return, as we are about to repeat an NMI anyway.
- * This makes it safe to copy to the stack frame that a nested
- * NMI will update.
-- */
--repeat_nmi:
-- /*
-- * Update the stack variable to say we are still in NMI (the update
-- * is benign for the non-repeat case, where 1 was pushed just above
-- * to this very stack slot).
-+ *
-+ * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
-+ * we're repeating an NMI, gsbase has the same value that it had on
-+ * the first iteration. paranoid_entry will load the kernel
-+ * gsbase if needed before we call do_nmi.
-+ *
-+ * Set "NMI executing" in case we came back here via IRET.
- */
- movq $1, 10*8(%rsp)
-
-- /* Make another copy, this one may be modified by nested NMIs */
-+ /*
-+ * Copy the "outermost" frame to the "iret" frame. NMIs that nest
-+ * here must not modify the "iret" frame while we're writing to
-+ * it or it will end up containing garbage.
-+ */
- addq $(10*8), %rsp
- CFI_ADJUST_CFA_OFFSET -10*8
- .rept 5
-@@ -1588,66 +2200,65 @@ repeat_nmi:
- end_repeat_nmi:
-
- /*
-- * Everything below this point can be preempted by a nested
-- * NMI if the first NMI took an exception and reset our iret stack
-- * so that we repeat another NMI.
-+ * Everything below this point can be preempted by a nested NMI.
-+ * If this happens, then the inner NMI will change the "iret"
-+ * frame to point back to repeat_nmi.
- */
- pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
+@@ -1679,13 +2187,13 @@ end_repeat_nmi:
ALLOC_PT_GPREGS_ON_STACK
/*
+ call paranoid_entry_nmi
DEFAULT_FRAME 0
-- /*
-- * Save off the CR2 register. If we take a page fault in the NMI then
-- * it could corrupt the CR2 value. If the NMI preempts a page fault
-- * handler before it was able to read the CR2 register, and then the
-- * NMI itself takes a page fault, the page fault that was preempted
-- * will read the information from the NMI page fault and not the
-- * origin fault. Save it off and restore it if it changes.
-- * Use the r12 callee-saved register.
-- */
-- movq %cr2, %r12
--
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
- movq %rsp,%rdi
+@@ -1693,7 +2201,9 @@ end_repeat_nmi:
movq $-1,%rsi
call do_nmi
-- /* Did the NMI take a page fault? Restore cr2 if it did */
-- movq %cr2, %rcx
-- cmpq %rcx, %r12
-- je 1f
-- movq %r12, %cr2
--1:
--
- testl %ebx,%ebx /* swapgs needed? */
++ pax_exit_kernel_nmi
++
+ testl $1,%ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
SWAPGS_UNSAFE_STACK
- nmi_restore:
-+ pax_exit_kernel_nmi
- RESTORE_EXTRA_REGS
- RESTORE_C_REGS
-- /* Pop the extra iret frame at once */
-+
+@@ -1704,6 +2214,8 @@ nmi_restore:
+ /* Point RSP at the "iret" frame. */
REMOVE_PT_GPREGS_FROM_STACK 6*8
-- /* Clear the NMI executing stack variable */
-- movq $0, 5*8(%rsp)
-- jmp irq_return
+ pax_force_retaddr_bts
+
-+ /*
-+ * Clear "NMI executing". Set DF first so that we can easily
-+ * distinguish the remaining code between here and IRET from
-+ * the SYSCALL entry and exit paths. On a native kernel, we
-+ * could just inspect RIP, but, on paravirt kernels,
-+ * INTERRUPT_RETURN can translate into a jump into a
-+ * hypercall page.
-+ */
-+ std
-+ movq $0, 5*8(%rsp) /* clear "NMI executing" */
-+
-+ /*
-+ * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
-+ * stack in a single instruction. We are returning to kernel
-+ * mode, so this cannot result in a fault.
-+ */
-+ INTERRUPT_RETURN
+ /*
+ * Clear "NMI executing". Set DF first so that we can easily
+ * distinguish the remaining code between here and IRET from
+@@ -1722,12 +2234,12 @@ nmi_restore:
+ */
+ INTERRUPT_RETURN
CFI_ENDPROC
-END(nmi)
+ENDPROC(nmi)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
-index 5a46681..1ef7ffa 100644
+index f129a9a..af8f6da 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -68,12 +68,12 @@ again:
}
pmd = (physaddr & PMD_MASK) + early_pmd_flags;
pmd_p[pmd_index(address)] = pmd;
-@@ -177,7 +177,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
- */
- load_ucode_bsp();
+@@ -163,8 +163,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+
+ clear_bss();
- clear_page(init_level4_pgt);
- /* set init_level4_pgt kernel high mapping*/
- init_level4_pgt[511] = early_level4_pgt[511];
+-
+ kasan_early_init();
+ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7e429c9..7244a52 100644
--- a/arch/x86/kernel/head_32.S
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index df7e780..e97a497 100644
+index 7e5da2c..761adf1 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
NEXT_PAGE(level2_kernel_pgt)
/*
* 512 MB kernel mapping. We spend a full page on this pagetable
-@@ -494,23 +557,61 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -494,31 +557,69 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
-@@ -534,8 +635,8 @@ NEXT_PAGE(kasan_zero_pud)
-
+ .quad 0x0000000000000000
#include "../../x86/xen/xen-head.S"
-
panic("low stack detected by irq handler - check messages\n");
#endif
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
-index 26d5a55..a01160a 100644
+index 26d5a55..bf8b49b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
-@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
+@@ -31,6 +31,8 @@ static void bug_at(unsigned char *ip, int line)
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
++ ip = ktla_ktva(ip);
++ pr_warning("Unexpected op at %pS [%p] %s:%d\n", ip, ip, __FILE__, line);
+ pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
+ ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
+ BUG();
+@@ -51,7 +53,7 @@ static void __jump_label_transform(struct jump_entry *entry,
* Jump label is enabled for the first time.
* So we expect a default_nop...
*/
!= 0))
bug_at((void *)entry->code, __LINE__);
} else {
-@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
+@@ -59,7 +61,7 @@ static void __jump_label_transform(struct jump_entry *entry,
* ...otherwise expect an ideal_nop. Otherwise
* something went horribly wrong.
*/
!= 0))
bug_at((void *)entry->code, __LINE__);
}
-@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
+@@ -75,13 +77,13 @@ static void __jump_label_transform(struct jump_entry *entry,
* are converting the default nop to the ideal nop.
*/
if (init) {
};
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
-index c3e985d..f690edd 100644
+index d05bd2e..f690edd 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
break;
}
}
-@@ -408,15 +409,15 @@ static void default_do_nmi(struct pt_regs *regs)
- NOKPROBE_SYMBOL(default_do_nmi);
-
- /*
-- * NMIs can hit breakpoints which will cause it to lose its
-- * NMI context with the CPU when the breakpoint does an iret.
-- */
--#ifdef CONFIG_X86_32
--/*
-- * For i386, NMIs use the same stack as the kernel, and we can
-- * add a workaround to the iret problem in C (preventing nested
-- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
-- * can be in:
-+ * NMIs can page fault or hit breakpoints which will cause it to lose
-+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
-+ *
-+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
-+ * NMI processing. On x86_64, the asm glue protects us from nested NMIs
-+ * if the outer NMI came from kernel mode, but we can still nest if the
-+ * outer NMI came from user mode.
-+ *
-+ * To handle these nested NMIs, we have three states:
- *
- * 1) not running
- * 2) executing
-@@ -430,15 +431,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
- * (Note, the latch is binary, thus multiple NMIs triggering,
- * when one is running, are ignored. Only one NMI is restarted.)
- *
-- * If an NMI hits a breakpoint that executes an iret, another
-- * NMI can preempt it. We do not want to allow this new NMI
-- * to run, but we want to execute it when the first one finishes.
-- * We set the state to "latched", and the exit of the first NMI will
-- * perform a dec_return, if the result is zero (NOT_RUNNING), then
-- * it will simply exit the NMI handler. If not, the dec_return
-- * would have set the state to NMI_EXECUTING (what we want it to
-- * be when we are running). In this case, we simply jump back
-- * to rerun the NMI handler again, and restart the 'latched' NMI.
-+ * If an NMI executes an iret, another NMI can preempt it. We do not
-+ * want to allow this new NMI to run, but we want to execute it when the
-+ * first one finishes. We set the state to "latched", and the exit of
-+ * the first NMI will perform a dec_return, if the result is zero
-+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
-+ * dec_return would have set the state to NMI_EXECUTING (what we want it
-+ * to be when we are running). In this case, we simply jump back to
-+ * rerun the NMI handler again, and restart the 'latched' NMI.
- *
- * No trap (breakpoint or page fault) should be hit before nmi_restart,
- * thus there is no race between the first check of state for NOT_RUNNING
-@@ -461,49 +461,47 @@ enum nmi_states {
- static DEFINE_PER_CPU(enum nmi_states, nmi_state);
- static DEFINE_PER_CPU(unsigned long, nmi_cr2);
-
--#define nmi_nesting_preprocess(regs) \
-- do { \
-- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
-- this_cpu_write(nmi_state, NMI_LATCHED); \
-- return; \
-- } \
-- this_cpu_write(nmi_state, NMI_EXECUTING); \
-- this_cpu_write(nmi_cr2, read_cr2()); \
-- } while (0); \
-- nmi_restart:
--
--#define nmi_nesting_postprocess() \
-- do { \
-- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
-- write_cr2(this_cpu_read(nmi_cr2)); \
-- if (this_cpu_dec_return(nmi_state)) \
-- goto nmi_restart; \
-- } while (0)
--#else /* x86_64 */
-+#ifdef CONFIG_X86_64
- /*
-- * In x86_64 things are a bit more difficult. This has the same problem
-- * where an NMI hitting a breakpoint that calls iret will remove the
-- * NMI context, allowing a nested NMI to enter. What makes this more
-- * difficult is that both NMIs and breakpoints have their own stack.
-- * When a new NMI or breakpoint is executed, the stack is set to a fixed
-- * point. If an NMI is nested, it will have its stack set at that same
-- * fixed address that the first NMI had, and will start corrupting the
-- * stack. This is handled in entry_64.S, but the same problem exists with
-- * the breakpoint stack.
-+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
-+ * some care, the inner breakpoint will clobber the outer breakpoint's
-+ * stack.
- *
-- * If a breakpoint is being processed, and the debug stack is being used,
-- * if an NMI comes in and also hits a breakpoint, the stack pointer
-- * will be set to the same fixed address as the breakpoint that was
-- * interrupted, causing that stack to be corrupted. To handle this case,
-- * check if the stack that was interrupted is the debug stack, and if
-- * so, change the IDT so that new breakpoints will use the current stack
-- * and not switch to the fixed address. On return of the NMI, switch back
-- * to the original IDT.
-+ * If a breakpoint is being processed, and the debug stack is being
-+ * used, if an NMI comes in and also hits a breakpoint, the stack
-+ * pointer will be set to the same fixed address as the breakpoint that
-+ * was interrupted, causing that stack to be corrupted. To handle this
-+ * case, check if the stack that was interrupted is the debug stack, and
-+ * if so, change the IDT so that new breakpoints will use the current
-+ * stack and not switch to the fixed address. On return of the NMI,
-+ * switch back to the original IDT.
- */
- static DEFINE_PER_CPU(int, update_debug_stack);
-+#endif
-
--static inline void nmi_nesting_preprocess(struct pt_regs *regs)
-+dotraplinkage notrace void
-+do_nmi(struct pt_regs *regs, long error_code)
+@@ -481,6 +482,17 @@ static DEFINE_PER_CPU(int, update_debug_stack);
+ dotraplinkage notrace void
+ do_nmi(struct pt_regs *regs, long error_code)
{
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+ }
+#endif
+
-+ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
-+ this_cpu_write(nmi_state, NMI_LATCHED);
-+ return;
-+ }
-+ this_cpu_write(nmi_state, NMI_EXECUTING);
-+ this_cpu_write(nmi_cr2, read_cr2());
-+nmi_restart:
-+
-+#ifdef CONFIG_X86_64
- /*
- * If we interrupted a breakpoint, it is possible that
- * the nmi handler will have breakpoints too. We need to
-@@ -514,22 +512,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
- debug_stack_set_zero();
- this_cpu_write(update_debug_stack, 1);
- }
--}
--
--static inline void nmi_nesting_postprocess(void)
--{
-- if (unlikely(this_cpu_read(update_debug_stack))) {
-- debug_stack_reset();
-- this_cpu_write(update_debug_stack, 0);
-- }
--}
- #endif
-
--dotraplinkage notrace void
--do_nmi(struct pt_regs *regs, long error_code)
--{
-- nmi_nesting_preprocess(regs);
--
- nmi_enter();
-
- inc_irq_stat(__nmi_count);
-@@ -539,8 +523,17 @@ do_nmi(struct pt_regs *regs, long error_code)
-
- nmi_exit();
-
-- /* On i386, may loop back to preprocess */
-- nmi_nesting_postprocess();
-+#ifdef CONFIG_X86_64
-+ if (unlikely(this_cpu_read(update_debug_stack))) {
-+ debug_stack_reset();
-+ this_cpu_write(update_debug_stack, 0);
-+ }
-+#endif
-+
-+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
-+ write_cr2(this_cpu_read(nmi_cr2));
-+ if (this_cpu_dec_return(nmi_state))
-+ goto nmi_restart;
- }
- NOKPROBE_SYMBOL(do_nmi);
-
+ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+ this_cpu_write(nmi_state, NMI_LATCHED);
+ return;
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 6d9582e..f746287 100644
--- a/arch/x86/kernel/nmi_selftest.c
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 6e338e3..82f946e 100644
+index 9717437..44bc9aa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,7 +38,8 @@
{
local_irq_disable();
/*
-@@ -531,16 +536,43 @@ static int __init idle_setup(char *str)
+@@ -533,16 +538,43 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
identity_mapped:
/* set return address to 0 if not preserving context */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index d74ac33..d9efe04 100644
+index d74ac33..6d14941 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -111,6 +111,7 @@
u64 size = __pa_symbol(_end) - start;
/*
-@@ -860,8 +863,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+@@ -860,8 +863,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
void __init setup_arch(char **cmdline_p)
{
-+#ifdef CONFIG_X86_32
-+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
-+#else
- memblock_reserve(__pa_symbol(_text),
- (unsigned long)__bss_stop - (unsigned long)_text);
-+#endif
+- memblock_reserve(__pa_symbol(_text),
+- (unsigned long)__bss_stop - (unsigned long)_text);
++ memblock_reserve(__pa_symbol(ktla_ktva((unsigned long)_text)),
++ (unsigned long)__bss_stop - ktla_ktva((unsigned long)_text));
early_reserve_initrd();
-@@ -959,16 +966,16 @@ void __init setup_arch(char **cmdline_p)
+@@ -959,16 +962,16 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 1ea14fd..b16147f 100644
+index e0fd5f47..b551e66 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
-@@ -183,7 +183,7 @@ static unsigned long align_sigframe(unsigned long sp)
+@@ -189,7 +189,7 @@ static unsigned long align_sigframe(unsigned long sp)
* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0.
*/
#else /* !CONFIG_X86_32 */
sp = round_down(sp, 16) - 8;
#endif
-@@ -291,10 +291,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -297,10 +297,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
}
if (current->mm->context.vdso)
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
-@@ -308,7 +307,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -314,7 +313,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
if (err)
return -EFAULT;
-@@ -355,8 +354,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -361,8 +360,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);
-@@ -368,7 +369,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -374,7 +375,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
} put_user_catch(err);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-@@ -598,7 +599,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -594,7 +595,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
{
int usig = ksig->sig;
sigset_t *set = sigmask_to_save();
/* Set up the stack frame */
if (is_ia32_frame()) {
-@@ -609,7 +615,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -605,7 +611,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
} else if (is_x32_frame()) {
return x32_setup_rt_frame(ksig, cset, regs);
} else {
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
-diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
-index 9d28383..c4ea87e 100644
---- a/arch/x86/kvm/lapic.h
-+++ b/arch/x86/kvm/lapic.h
-@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
-
- static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
- {
-- return vcpu->arch.apic->pending_events;
-+ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
- }
-
- bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6e6d115..43fecbf 100644
--- a/arch/x86/kvm/paging_tmpl.h
return (void *)vaddr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-index 70e7444..75b9a13 100644
+index 70e7444..e9904fd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
-@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+@@ -56,12 +56,10 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
unsigned long i;
for (i = 0; i < nr_pages; ++i)
+ !PageReserved(pfn_to_page(start_pfn + i))))
return 1;
- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-@@ -288,7 +288,7 @@ EXPORT_SYMBOL(ioremap_prot);
+- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
+-
+ return 0;
+ }
+
+@@ -91,7 +89,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ pgprot_t prot;
+ int retval;
+ void __iomem *ret_addr;
+- int ram_region;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+@@ -114,23 +111,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+- /* First check if whole region can be identified as RAM or not */
+- ram_region = region_is_ram(phys_addr, size);
+- if (ram_region > 0) {
+- WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
+- (unsigned long int)phys_addr,
+- (unsigned long int)last_addr);
++ pfn = phys_addr >> PAGE_SHIFT;
++ last_pfn = last_addr >> PAGE_SHIFT;
++ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
++ __ioremap_check_ram) == 1) {
++ WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n",
++ phys_addr, last_addr);
+ return NULL;
+ }
+
+- /* If could not be identified(-1), check page by page */
+- if (ram_region < 0) {
+- pfn = phys_addr >> PAGE_SHIFT;
+- last_pfn = last_addr >> PAGE_SHIFT;
+- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+- __ioremap_check_ram) == 1)
+- return NULL;
+- }
+ /*
+ * Mappings have to be page-aligned
+ */
+@@ -288,7 +277,7 @@ EXPORT_SYMBOL(ioremap_prot);
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
{
struct vm_struct *p, *o;
-@@ -351,32 +351,36 @@ int arch_ioremap_pmd_supported(void)
+@@ -351,32 +340,36 @@ int arch_ioremap_pmd_supported(void)
*/
void *xlate_dev_mem_ptr(phys_addr_t phys)
{
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
-@@ -412,8 +416,7 @@ void __init early_ioremap_init(void)
+@@ -412,8 +405,7 @@ void __init early_ioremap_init(void)
early_ioremap_setup();
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
pte = kmemcheck_pte_lookup(address);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 9d518d6..8a091f5 100644
+index 844b06d..f363c86 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
return TASK_UNMAPPED_BASE + rnd;
}
-@@ -113,16 +126,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -113,18 +126,29 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
+#endif
+
}
+
+ const char *arch_vma_name(struct vm_area_struct *vma)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 0057a7a..95c7edd 100644
--- a/arch/x86/mm/mmio-mod.c
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
-index 3250f23..7a97ba2 100644
+index 90b924a..4197ac2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -45,7 +45,11 @@ void leave_mm(int cpu)
.callback = fix_broken_hp_bios_irq9,
.ident = "HP Pavilion N5400 Series Laptop",
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
-index 9b83b90..4112152 100644
+index 9b83b90..2c256c5 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -79,7 +79,7 @@ union bios32 {
unsigned long address;
unsigned short segment;
-} bios32_indirect __initdata = { 0, __KERNEL_CS };
-+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
++} bios32_indirect __initdata = { 0, __PCIBIOS_CS };
/*
* Returns the entry point for the given service, NULL on error
{
int cpu = smp_processor_id();
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
-index e88fda8..76ce7ce 100644
+index 4841453..d59a203 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -9,6 +9,7 @@ config XEN
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 46957ea..ef7b714 100644
+index a671e83..a9dc1d9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
__read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+@@ -584,8 +582,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
int f;
/*
-@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -633,8 +630,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
int f;
/*
-@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -642,7 +638,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
* 8-byte entries, or 16 4k pages..
*/
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-@@ -1223,30 +1219,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1263,30 +1259,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
#endif
};
{
if (pm_power_off)
pm_power_off();
-@@ -1399,8 +1395,11 @@ static void __ref xen_setup_gdt(int cpu)
+@@ -1439,8 +1435,11 @@ static void __ref xen_setup_gdt(int cpu)
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
pv_cpu_ops.load_gdt = xen_load_gdt_boot;
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
pv_cpu_ops.load_gdt = xen_load_gdt;
-@@ -1515,7 +1514,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1555,7 +1554,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Work out if we support NX */
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
-@@ -1543,13 +1552,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1583,13 +1592,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
machine_ops = xen_machine_ops;
mov %rsi,xen_start_info
mov $init_thread_union+THREAD_SIZE,%rsp
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
-index 9e195c6..523ed36 100644
+index bef30cb..f1a0d68 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -16,8 +16,6 @@ void xen_syscall_target(void);
#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
diff --git a/block/bio.c b/block/bio.c
-index f66a4ea..73ddf55 100644
+index 4441522..dedbafc 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
-diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 594eea0..2dc1fd6 100644
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -1968,7 +1968,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- goto err_hctxs;
-
- setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
-- blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
-+ blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
-
- q->nr_queues = nr_cpu_ids;
- q->nr_hw_queues = set->nr_hw_queues;
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 53b1737..08177d2e 100644
--- a/block/blk-softirq.c
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 577849c..920847c 100644
+index e0064d1..e53c75e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -102,7 +102,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
struct ata_force_param {
const char *name;
-@@ -4801,7 +4801,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -4800,7 +4800,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
ap = qc->ap;
qc->flags = 0;
-@@ -4818,7 +4818,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -4817,7 +4817,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
-@@ -5925,6 +5925,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5924,6 +5924,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
return;
spin_lock(&lock);
for (cur = ops->inherits; cur; cur = cur->inherits) {
void **inherit = (void **)cur;
-@@ -5938,8 +5939,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5937,8 +5938,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
if (IS_ERR(*pp))
*pp = NULL;
spin_unlock(&lock);
}
-@@ -6135,7 +6137,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+@@ -6134,7 +6136,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
-index 3131adc..93e7aa0 100644
+index 0d7f0da..bc20aa6 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
-@@ -4209,7 +4209,7 @@ int ata_sas_port_init(struct ata_port *ap)
+@@ -4193,7 +4193,7 @@ int ata_sas_port_init(struct ata_port *ap)
if (rc)
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
-index a998a17..8de4bf4 100644
+index f840ca1..edd6ef3 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -53,7 +53,7 @@ enum {
if (ti.nwa_v) {
pd->nwa = be32_to_cpu(ti.next_writable);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index ec6c5c6..820ee2abc 100644
+index 010ce0b..7c0049e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -64,7 +64,7 @@
{
struct hpet_timer __iomem *timer;
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
-index a43048b..14724d5 100644
+index 3c1a123..a33c99f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -790,7 +790,7 @@ static const struct i8k_config_data i8k_config_data[] = {
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index c45d274..0f469f7 100644
+index 6f9d27f..14385d1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -134,10 +134,10 @@ struct pstate_funcs {
static int hwp_active;
struct perf_limits {
-@@ -721,18 +721,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+@@ -722,18 +722,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu->pstate.current_pstate = pstate;
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-@@ -1056,15 +1056,15 @@ static unsigned int force_load;
+@@ -1057,15 +1057,15 @@ static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
{
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.p_gain_pct = policy->p_gain_pct;
-@@ -1076,12 +1076,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
+@@ -1077,12 +1077,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
static void copy_cpu_funcs(struct pstate_funcs *funcs)
{
err = pci_request_regions(pdev, name);
if (err)
-diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
-index 4630709..0a70e46 100644
---- a/drivers/crypto/omap-des.c
-+++ b/drivers/crypto/omap-des.c
-@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
- dmaengine_terminate_all(dd->dma_lch_in);
- dmaengine_terminate_all(dd->dma_lch_out);
-
-- dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
-- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
--
- return err;
- }
-
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca1b362..01cae6a 100644
--- a/drivers/devfreq/devfreq.c
container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
-index 4fd9961..52d60ce 100644
+index d425374..1da1716 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
EXPORT_SYMBOL_GPL(cper_next_record_id);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
-index e14363d..c3d5d84 100644
+index 63226e9..302716e 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -159,14 +159,16 @@ static struct attribute_group efi_subsys_attr_group = {
+@@ -164,14 +164,16 @@ static struct attribute_group efi_subsys_attr_group = {
};
static struct efivars generic_efivars;
dev->driver->context_dtor(dev, ctx->handle);
drm_legacy_ctxbitmap_free(dev, ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 3007b44..420b4a3 100644
+index 800a025..c88f1a4 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
-@@ -4176,7 +4176,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -4179,7 +4179,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
goto done;
}
else
type = types[map->type];
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
-index aa8bbb4..0f62630 100644
+index 9cfcd0a..7142a7f 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
-@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+@@ -459,7 +459,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
-@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+@@ -520,7 +520,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
-@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+@@ -1075,7 +1075,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
return 0;
}
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
-@@ -1062,7 +1062,6 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+@@ -1122,7 +1122,6 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
int ret;
/* Assume that ioctls without an explicit compat routine will just
-@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+@@ -1132,10 +1131,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return drm_ioctl(filp, cmd, arg);
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(dev))
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
-index 176de63..b50b66a 100644
+index 23aa04c..1d25960 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- drm_ioctl_compat_t *fn = NULL;
int ret;
- if (nr < DRM_COMMAND_BASE)
+ if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
ret = drm_ioctl(filp, cmd, arg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index d0f3cbc..f3ab4cc 100644
+index 57c8878..8ef38a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -13604,13 +13604,13 @@ struct intel_quirk {
+@@ -13617,13 +13617,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -13618,18 +13618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -13631,18 +13631,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
-index 8904933..9624b38 100644
+index cd6dae0..f25eb48 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
-@@ -941,7 +941,8 @@ static struct drm_driver
+@@ -943,7 +943,8 @@ static struct drm_driver
driver_stub = {
.driver_features =
DRIVER_USE_AGP |
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
-index 9782364..89bd954 100644
+index f33251d..22f6cb1 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
-index b110883..dd06418 100644
+index 7354a4c..f37d7f9 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
-@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+@@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
{
struct qxl_drawable *draw = fb_cmd;
-@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+@@ -203,7 +203,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct drm_qxl_reloc reloc;
if (copy_from_user(&reloc,
sizeof(reloc))) {
ret = -EFAULT;
goto out_free_bos;
-@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+@@ -296,10 +296,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index a7fdfa4..04a3964 100644
+index 604c44d..6eb6c4b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
-diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
-index 5ecfaf2..c87c4b1 100644
---- a/drivers/iommu/intel-iommu.c
-+++ b/drivers/iommu/intel-iommu.c
-@@ -1756,8 +1756,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
-
- static void domain_exit(struct dmar_domain *domain)
- {
-+ struct dmar_drhd_unit *drhd;
-+ struct intel_iommu *iommu;
- struct page *freelist = NULL;
-- int i;
-
- /* Domain 0 is reserved, so dont process it */
- if (!domain)
-@@ -1777,8 +1778,10 @@ static void domain_exit(struct dmar_domain *domain)
-
- /* clear attached or cached domains */
- rcu_read_lock();
-- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
-- iommu_detach_domain(domain, g_iommus[i]);
-+ for_each_active_iommu(iommu, drhd)
-+ if (domain_type_is_vm(domain) ||
-+ test_bit(iommu->seq_id, domain->iommu_bmp))
-+ iommu_detach_domain(domain, iommu);
- rcu_read_unlock();
-
- dma_free_pagelist(freelist);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e46021..f0a24fef 100644
--- a/drivers/iommu/io-pgtable-arm.c
cl->fn = fn;
cl->wq = wq;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
-index 135a090..f7872f6 100644
+index c90118e..226d9e5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
-@@ -1927,7 +1927,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
+@@ -1936,7 +1936,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
chunk_kb ? "KB" : "B");
if (bitmap->storage.file) {
seq_printf(seq, ", file: ");
return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
-index f478a4c..4b8e5ef 100644
+index 419bdd4..e5eb76d 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -382,7 +382,7 @@ do_sync_free:
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index 79f6941..b33b4e0 100644
+index cde1d67..4c88a5ce 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 2caf492..0c0dcac 100644
+index 697f34f..8301900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -191,9 +191,9 @@ struct mapped_device {
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
-@@ -2298,8 +2298,8 @@ static struct mapped_device *alloc_dev(int minor)
+@@ -2287,8 +2287,8 @@ static struct mapped_device *alloc_dev(int minor)
spin_lock_init(&md->deferred_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
INIT_LIST_HEAD(&md->uevent_list);
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
-@@ -2466,7 +2466,7 @@ static void event_callback(void *context)
+@@ -2455,7 +2455,7 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
wake_up(&md->eventq);
}
-@@ -3465,18 +3465,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+@@ -3454,18 +3454,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 4dbed4a..bed2a6a 100644
+index e462151..8ac9655 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -197,10 +197,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
-@@ -7232,7 +7232,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+@@ -5746,16 +5746,16 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
+
+ err = 0;
+ spin_lock(&mddev->lock);
+- /* bitmap disabled, zero the first byte and copy out */
+- if (!mddev->bitmap_info.file)
+- file->pathname[0] = '\0';
+- else if ((ptr = d_path(&mddev->bitmap_info.file->f_path,
+- file->pathname, sizeof(file->pathname))),
+- IS_ERR(ptr))
+- err = PTR_ERR(ptr);
+- else
+- memmove(file->pathname, ptr,
+- sizeof(file->pathname)-(ptr-file->pathname));
++ /* bitmap enabled */
++ if (mddev->bitmap_info.file) {
++ ptr = d_path(&mddev->bitmap_info.file->f_path,
++ file->pathname, sizeof(file->pathname));
++ if (IS_ERR(ptr))
++ err = PTR_ERR(ptr);
++ else
++ memmove(file->pathname, ptr,
++ sizeof(file->pathname)-(ptr-file->pathname));
++ }
+ spin_unlock(&mddev->lock);
+
+ if (err == 0 &&
+@@ -7237,7 +7237,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
return 0;
}
if (v == (void*)2) {
-@@ -7335,7 +7335,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+@@ -7340,7 +7340,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
seq = file->private_data;
return error;
}
-@@ -7352,7 +7352,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+@@ -7357,7 +7357,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
/* always allow read */
mask = POLLIN | POLLRDNORM;
mask |= POLLERR | POLLPRI;
return mask;
}
-@@ -7448,7 +7448,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+@@ -7453,7 +7453,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
struct md_personality
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
-index e8a9042..35bd145 100644
+index 5309129..7fb096e 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
-@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+@@ -691,7 +691,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* Flick into a mode where all blocks get allocated in the new area.
*/
smm->begin = old_len;
/*
* Extend.
-@@ -714,7 +714,7 @@ out:
+@@ -728,7 +728,7 @@ out:
/*
* Switch back to normal behaviour.
*/
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 9157a29..0d462f0 100644
+index 5ce3cd5c..f147017 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
-@@ -1934,7 +1934,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (r1_sync_page_io(rdev, sect, s,
bio->bi_io_vec[idx].bv_page,
READ) != 0)
}
sectors -= s;
sect += s;
-@@ -2167,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -2169,7 +2169,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
!test_bit(Faulty, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
/*
* Timer function to enforce the timelimit on the partition disengage.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index 60f7141..ba97c1a 100644
+index 31d2627..d1b80a5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
-@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+@@ -579,7 +579,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
mmc->max_busy_timeout = 0;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
-index 9df2b68..6d5ed1a 100644
+index d0abdffb..bb1f8d7 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2004,7 +2004,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
-index e9b1810..5c2f3f9 100644
+index aede704..b516b4d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
-@@ -964,7 +964,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
+@@ -961,7 +961,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
-index 0ce868d..e5dc8bd 100644
+index 674f367..ec3a31f 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
-@@ -166,7 +166,7 @@ static void vcan_setup(struct net_device *dev)
+@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
dev->destructor = free_netdev;
}
#include "ftmac100.h"
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index c754b20..c9da1b5 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
+
+ static inline bool fm10k_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b772..250fe69 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
smp_mb(); /* Force the above update. */
}
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index a0a9b1f..3fe93e7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6584,7 +6584,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+
+ static inline bool igb_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 5be12a0..463ff47 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+
+ static inline bool ixgbe_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5ba040..d47531c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index e71cdde..1d7b00b 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+
+ static inline bool ixgbevf_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ /**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 74d0389..086ac03 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
__vxge_hw_mempool_create(vpath->hldev,
fifo->config->memblock_size,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
-index 33669c2..a29c75e 100644
+index 33669c2..674c39a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+ if (fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (fw->size & 0xF); i++)
+- data[i] = temp[size + i];
++ data[i] = ((u8 *)temp)[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
/* we will have to manufacture ethernet headers, prepare template */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 63c7810..4ad33aa 100644
+index 63c7810..a694d2b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
#define VIRTNET_DRIVER_VERSION "1.0.0"
+@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
+ /* Do we support "hardware" checksums? */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ /* This opens up the world of extra features. */
+- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG;
+ if (csum)
+- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 61c0840..92e7f7e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
struct ath_nf_limits {
s16 max;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
-index b0badef..3e3464c 100644
+index d5f2fbf..0740c8b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
-@@ -2573,16 +2573,18 @@ void ath9k_fill_chanctx_ops(void)
+@@ -2575,16 +2575,18 @@ void ath9k_fill_chanctx_ops(void)
if (!ath9k_is_chanctx_enabled())
return;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index dc17909..989c9fb 100644
+index 699a480..1801fc3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
-@@ -1919,7 +1919,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+@@ -1935,7 +1935,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
u32 reset_flag;
memset(buf, 0, sizeof(buf));
-@@ -1940,7 +1940,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+@@ -1956,7 +1956,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char buf[8];
struct board_type {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
-index 1b3a094..068e683 100644
+index 30f9ef0..a1e29ac 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -101,12 +101,12 @@ struct fc_exch_mgr {
} stats;
};
-@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+@@ -809,7 +809,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
/* allocate memory for exchange */
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
if (!ep) {
goto out;
}
memset(ep, 0, sizeof(*ep));
-@@ -874,7 +874,7 @@ out:
+@@ -872,7 +872,7 @@ out:
return ep;
err:
spin_unlock_bh(&pool->lock);
mempool_free(ep, mp->ep_pool);
return NULL;
}
-@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+@@ -1021,7 +1021,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
xid = ntohs(fh->fh_ox_id); /* we originated exch */
ep = fc_exch_find(mp, xid);
if (!ep) {
reject = FC_RJT_OX_ID;
goto out;
}
-@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+@@ -1051,7 +1051,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
ep = fc_exch_find(mp, xid);
if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
if (ep) {
reject = FC_RJT_RX_ID;
goto rel;
}
-@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+@@ -1062,7 +1062,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
}
xid = ep->xid; /* get our XID */
} else if (!ep) {
reject = FC_RJT_RX_ID; /* XID not found */
goto out;
}
-@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+@@ -1080,7 +1080,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
} else {
sp = &ep->seq;
if (sp->id != fh->fh_seq_id) {
if (f_ctl & FC_FC_END_SEQ) {
/*
* Update sequence_id based on incoming last
-@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+@@ -1531,22 +1531,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
if (!ep) {
goto rel;
}
sof = fr_sof(fp);
-@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+@@ -1555,7 +1555,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
} else if (sp->id != fh->fh_seq_id) {
goto rel;
}
-@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+@@ -1618,9 +1618,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
if (!sp)
if (drv->done)
good_bytes = drv->done(cmd);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
-index b1a2631..5bcd9c8 100644
+index 448ebda..9bd345f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index 1ac38e7..6acc656 100644
+index 9ad4116..4e736fc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 7f9d65f..e856438 100644
+index 11ea52b..7968d4d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -111,7 +111,7 @@ static int sd_resume(struct device *);
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
-index 8bd54a6..dd037a5 100644
+index 8bd54a6..58fa0d6 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -80,7 +80,7 @@ static DEFINE_MUTEX(sr_mutex);
static int sr_runtime_suspend(struct device *dev);
static struct dev_pm_ops sr_pm_ops = {
-@@ -312,11 +312,11 @@ do_tur:
+@@ -312,13 +312,13 @@ do_tur:
* It will be notified on the end of a SCSI read / write, and will take one
* of several actions based on success or failure.
*/
int result = SCpnt->result;
- int this_count = scsi_bufflen(SCpnt);
- int good_bytes = (result == 0 ? this_count : 0);
+- int block_sectors = 0;
+- long error_sector;
+ unsigned int this_count = scsi_bufflen(SCpnt);
+ unsigned int good_bytes = (result == 0 ? this_count : 0);
- int block_sectors = 0;
- long error_sector;
++ unsigned int block_sectors = 0;
++ sector_t error_sector;
struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
-diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
-index 9a1c342..525ab4c 100644
---- a/drivers/scsi/st.c
-+++ b/drivers/scsi/st.c
-@@ -1274,9 +1274,9 @@ static int st_open(struct inode *inode, struct file *filp)
- spin_lock(&st_use_lock);
- STp->in_use = 0;
- spin_unlock(&st_use_lock);
-- scsi_tape_put(STp);
- if (resumed)
- scsi_autopm_put_device(STp->device);
-+ scsi_tape_put(STp);
- return retval;
- }
+ #ifdef DEBUG
+@@ -351,9 +351,12 @@ static int sr_done(struct scsi_cmnd *SCpnt)
+ if (cd->device->sector_size == 2048)
+ error_sector <<= 2;
+ error_sector &= ~(block_sectors - 1);
+- good_bytes = (error_sector -
+- blk_rq_pos(SCpnt->request)) << 9;
+- if (good_bytes < 0 || good_bytes >= this_count)
++ if (error_sector >= blk_rq_pos(SCpnt->request)) {
++ good_bytes = (error_sector -
++ blk_rq_pos(SCpnt->request)) << 9;
++ if (good_bytes >= this_count)
++ good_bytes = 0;
++ } else
+ good_bytes = 0;
+ /*
+ * The SCSI specification allows for the value
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index c0d660f..24a5854 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
dlci->modem_rx = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index 396344c..875c1d6 100644
+index 16ed0b6..7d944b4 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -116,7 +116,7 @@ struct n_tty_data {
size_t line_start;
/* protected by output lock */
-@@ -2572,6 +2572,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+@@ -2582,6 +2582,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = tty_ldisc_N_TTY;
ops->owner = NULL;
if (cfg->uart_flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
-index 0b7bb12..ebe191a 100644
+index ec54044..fc93d3f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1376,7 +1376,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
return;
/*
-@@ -1510,7 +1510,7 @@ static void uart_hangup(struct tty_struct *tty)
+@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
uart_flush_buffer(tty);
uart_shutdown(tty, state);
spin_lock_irqsave(&port->lock, flags);
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
tty_port_tty_set(port, NULL);
-@@ -1597,7 +1597,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
+@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
pr_debug("uart_open(%d) called\n", line);
spin_lock_irq(&port->lock);
if (!retval)
port->flags |= ASYNC_NORMAL_ACTIVE;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
-index 843f2cd..7d530a6 100644
+index 9ffdfcf..fd81170 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
-@@ -1086,7 +1086,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
+@@ -1069,7 +1069,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
file->f_version = event_count;
return POLLIN | POLLRDNORM;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
-index 4b0448c..fc84bec 100644
+index 986abde..80e8279 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
dev->rawdescriptors[i] + (*ppos - pos),
min(len, alloclen))) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 45a915c..09f9735 100644
+index 1c1385e..18400ff 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
-@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+@@ -1554,7 +1554,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
*/
usb_get_urb(urb);
atomic_inc(&urb->use_count);
usbmon_urb_submit(&hcd->self, urb);
/* NOTE requirements on root-hub callers (usbfs and the hub
-@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
+@@ -1581,7 +1581,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 3b71516..1f26579 100644
+index 1e9a8c9..b4248b7 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4649,6 +4650,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
+:1095D00080000A8080000A00800009808000090065
+:00000001FF
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
-index 703342e..2b96b597 100644
+index 53f1e8a..2b96b597 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
-@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
- unlock_new_inode(inode);
- return inode;
- error:
-- unlock_new_inode(inode);
-- iput(inode);
-+ iget_failed(inode);
- return ERR_PTR(retval);
-
- }
-@@ -1312,7 +1311,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -1311,7 +1311,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
void
v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
p9_debug(P9_DEBUG_VFS, " %pd %s\n",
dentry, IS_ERR(s) ? "<error>" : s);
-diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
-index 9861c7c..4d3ecfb 100644
---- a/fs/9p/vfs_inode_dotl.c
-+++ b/fs/9p/vfs_inode_dotl.c
-@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
- unlock_new_inode(inode);
- return inode;
- error:
-- unlock_new_inode(inode);
-- iput(inode);
-+ iget_failed(inode);
- return ERR_PTR(retval);
-
- }
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 2d0cbbd..a6d6149 100644
--- a/fs/Kconfig.binfmt
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index 50bb3c2..d874b57 100644
+index 5d03eb0..d874b57 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -545,7 +545,7 @@ static void __dentry_kill(struct dentry *dentry)
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
-@@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
-
- /*
- * If we have a d_op->d_delete() operation, we sould not
-- * let the dentry count go to zero, so use "put__or_lock".
-+ * let the dentry count go to zero, so use "put_or_lock".
- */
- if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
- return lockref_put_or_lock(&dentry->d_lockref);
@@ -660,8 +660,8 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (unlikely(ret < 0)) {
spin_unlock(&dentry->d_lock);
return 1;
}
-@@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
- */
- smp_rmb();
- d_flags = ACCESS_ONCE(dentry->d_flags);
-- d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
-+ d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
-
- /* Nothing to do? Dropping the reference was all we needed? */
- if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -716,7 +716,7 @@ static inline bool fast_dput(struct dentry *dentry)
* else could have killed it and marked it dead. Either way, we
* don't need to do anything else.
return 0;
}
-@@ -776,6 +776,9 @@ repeat:
- if (unlikely(d_unhashed(dentry)))
- goto kill_it;
-
-+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
-+ goto kill_it;
-+
- if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
- if (dentry->d_op->d_delete(dentry))
- goto kill_it;
-@@ -785,7 +788,7 @@ repeat:
+@@ -788,7 +788,7 @@ repeat:
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
spin_unlock(&dentry->d_lock);
return;
-@@ -800,7 +803,7 @@ EXPORT_SYMBOL(dput);
+@@ -803,7 +803,7 @@ EXPORT_SYMBOL(dput);
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
{
}
static inline void __dget(struct dentry *dentry)
-@@ -841,8 +844,8 @@ repeat:
+@@ -844,8 +844,8 @@ repeat:
goto repeat;
}
rcu_read_unlock();
spin_unlock(&ret->d_lock);
return ret;
}
-@@ -920,9 +923,9 @@ restart:
+@@ -923,9 +923,9 @@ restart:
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
__dentry_kill(dentry);
dput(parent);
goto restart;
-@@ -957,7 +960,7 @@ static void shrink_dentry_list(struct list_head *list)
+@@ -960,7 +960,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
-@@ -995,8 +998,8 @@ static void shrink_dentry_list(struct list_head *list)
+@@ -998,8 +998,8 @@ static void shrink_dentry_list(struct list_head *list)
dentry = parent;
while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
parent = lock_parent(dentry);
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
-@@ -1036,7 +1039,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
+@@ -1039,7 +1039,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
* counts, just remove them from the LRU. Otherwise give them
* another pass through the LRU.
*/
d_lru_isolate(lru, dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
-@@ -1370,7 +1373,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+@@ -1373,7 +1373,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
} else {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
d_shrink_add(dentry, &data->dispose);
data->found++;
}
-@@ -1418,7 +1421,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+@@ -1421,7 +1421,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
return D_WALK_CONTINUE;
/* root with refcount 1 is fine */
return D_WALK_CONTINUE;
printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
-@@ -1427,7 +1430,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
+@@ -1430,7 +1430,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
WARN_ON(1);
-@@ -1568,7 +1571,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1571,7 +1571,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (name->len > DNAME_INLINE_LEN-1) {
size_t size = offsetof(struct external_name, name[1]);
if (!p) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
-@@ -1591,7 +1594,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1594,7 +1594,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
-@@ -1600,6 +1603,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1603,6 +1603,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dentry->d_sb = sb;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
-@@ -2321,7 +2327,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
+@@ -2324,7 +2327,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
found = dentry;
spin_unlock(&dentry->d_lock);
break;
-@@ -2389,7 +2395,7 @@ again:
+@@ -2392,7 +2395,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -3331,7 +3337,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -3334,7 +3337,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
}
}
return D_WALK_CONTINUE;
-@@ -3447,7 +3453,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3450,7 +3453,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
/* locality groups */
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 8d1e602..abf497b 100644
+index 4126048..3788867 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (unlikely(err))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index ca9d4a2..4c52f42 100644
+index ca12affd..18b4b75 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
-@@ -1232,7 +1232,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+@@ -1233,7 +1233,7 @@ static ext4_fsblk_t get_sb_block(void **data)
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
#ifdef CONFIG_QUOTA
-@@ -2442,7 +2442,7 @@ struct ext4_attr {
+@@ -2443,7 +2443,7 @@ struct ext4_attr {
int offset;
int deprecated_val;
} u;
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index fe30d3b..cf767ae 100644
+index fe30d3b..57656a7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -336,17 +336,32 @@ int generic_permission(struct inode *inode, int mask)
{
return nd->saved_names[nd->depth];
}
+@@ -766,7 +773,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
+ return 0;
+
+ /* Allowed if parent directory not sticky and world-writable. */
+- parent = nd->path.dentry->d_inode;
++ parent = nd->inode;
+ if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
+ return 0;
+
@@ -854,7 +861,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
{
struct dentry *dentry = link->dentry;
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 02c6875..ac3626c 100644
+index fce3cc1..ac3626c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -1350,6 +1350,36 @@ enum umount_tree_flags {
- UMOUNT_PROPAGATE = 2,
- UMOUNT_CONNECTED = 4,
- };
-+
-+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
-+{
-+ /* Leaving mounts connected is only valid for lazy umounts */
-+ if (how & UMOUNT_SYNC)
-+ return true;
-+
-+ /* A mount without a parent has nothing to be connected to */
-+ if (!mnt_has_parent(mnt))
-+ return true;
-+
-+ /* Because the reference counting rules change when mounts are
-+ * unmounted and connected, umounted mounts may not be
-+ * connected to mounted mounts.
-+ */
-+ if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
-+ return true;
-+
-+ /* Has it been requested that the mount remain connected? */
-+ if (how & UMOUNT_CONNECTED)
-+ return false;
-+
-+ /* Is the mount locked such that it needs to remain connected? */
-+ if (IS_MNT_LOCKED(mnt))
-+ return false;
-+
-+ /* By default disconnect the mount */
-+ return true;
-+}
-+
- /*
- * mount_lock must be held
- * namespace_sem must be held for write
-@@ -1387,10 +1417,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
- if (how & UMOUNT_SYNC)
- p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
-
-- disconnect = !(((how & UMOUNT_CONNECTED) &&
-- mnt_has_parent(p) &&
-- (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-- IS_MNT_LOCKED_AND_LAZY(p));
-+ disconnect = disconnect_mount(p, how);
-
- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
- disconnect ? &unmounted : NULL);
-@@ -1478,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1505,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
up_write(&sb->s_umount);
return retval;
}
-@@ -1500,6 +1530,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1527,6 +1530,9 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
return retval;
}
-@@ -1527,11 +1560,8 @@ void __detach_mounts(struct dentry *dentry)
- while (!hlist_empty(&mp->m_list)) {
- mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
- if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-- struct mount *p, *tmp;
-- list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
-- hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-- umount_mnt(p);
-- }
-+ hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
-+ umount_mnt(mnt);
- }
- else umount_tree(mnt, UMOUNT_CONNECTED);
- }
-@@ -1557,7 +1587,7 @@ static inline bool may_mount(void)
+@@ -1581,7 +1587,7 @@ static inline bool may_mount(void)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
{
struct path path;
struct mount *mnt;
-@@ -1602,7 +1632,7 @@ out:
+@@ -1626,7 +1632,7 @@ out:
/*
* The 2.0 compatible umount. No flags.
*/
{
return sys_umount(name, 0);
}
-@@ -2677,6 +2707,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2701,6 +2707,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
-@@ -2690,7 +2730,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2714,7 +2730,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
retval = do_new_mount(&path, type_page, flags, mnt_flags,
dev_name, data_page);
dput_out:
return retval;
}
-@@ -2708,7 +2751,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2732,7 +2751,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
* number incrementing at 10Ghz will take 12,427 years to wrap which
* is effectively never, so we can ignore the possibility.
*/
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
-@@ -2724,7 +2767,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2748,7 +2767,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return ERR_PTR(ret);
}
new_ns->ns.ops = &mntns_operations;
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
-@@ -2734,7 +2777,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2758,7 +2777,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
-@@ -2855,8 +2898,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2879,8 +2898,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
{
int ret;
char *kernel_type;
-@@ -2962,6 +3005,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2986,6 +3005,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (error)
goto out2;
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -3263,7 +3311,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+@@ -3287,7 +3311,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
static struct callback_op callback_ops[];
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index f734562..3fd6c4e 100644
+index 5d25b9d..765fc0f 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
-@@ -1275,16 +1275,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
+@@ -1277,16 +1277,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
}
}
EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 9e6475b..7970138 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
+
+ #ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
++static inline struct nfs4_label *
++nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
++{
++ if (!dst || !src)
++ return NULL;
++
++ if (src->len > NFS4_MAXLABELLEN)
++ return NULL;
++
++ dst->lfs = src->lfs;
++ dst->pi = src->pi;
++ dst->len = src->len;
++ memcpy(dst->label, src->label, src->len);
++
++ return dst;
++}
+ static inline void nfs4_label_free(struct nfs4_label *label)
+ {
+ if (label) {
+@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
+ static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+ {
+ }
++static inline struct nfs4_label *
++nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
++{
++ return NULL;
++}
+ #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
+
+ /* proc.c */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d3f2051..dd338dc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -916,6 +916,7 @@ struct nfs4_opendata {
+ struct nfs_open_confirmres c_res;
+ struct nfs4_string owner_name;
+ struct nfs4_string group_name;
++ struct nfs4_label *a_label;
+ struct nfs_fattr f_attr;
+ struct nfs4_label *f_label;
+ struct dentry *dir;
+@@ -1019,6 +1020,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
+ if (IS_ERR(p->f_label))
+ goto err_free_p;
+
++ p->a_label = nfs4_label_alloc(server, gfp_mask);
++ if (IS_ERR(p->a_label))
++ goto err_free_f;
++
+ alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
+ p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
+ if (IS_ERR(p->o_arg.seqid))
+@@ -1047,7 +1052,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
+ p->o_arg.server = server;
+ p->o_arg.bitmask = nfs4_bitmask(server, label);
+ p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
+- p->o_arg.label = label;
++ p->o_arg.label = nfs4_label_copy(p->a_label, label);
+ p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
+ switch (p->o_arg.claim) {
+ case NFS4_OPEN_CLAIM_NULL:
+@@ -1080,6 +1085,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
+ return p;
+
+ err_free_label:
++ nfs4_label_free(p->a_label);
++err_free_f:
+ nfs4_label_free(p->f_label);
+ err_free_p:
+ kfree(p);
+@@ -1099,6 +1106,7 @@ static void nfs4_opendata_free(struct kref *kref)
+ nfs4_put_open_state(p->state);
+ nfs4_put_state_owner(p->owner);
+
++ nfs4_label_free(p->a_label);
+ nfs4_label_free(p->f_label);
+
+ dput(p->dir);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 864e200..357c255 100644
--- a/fs/nfsd/nfs4proc.c
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 158badf..f7132ea 100644
+index d4d8445..36ae1a1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
goto out;
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
-diff --git a/fs/pnode.h b/fs/pnode.h
-index 7114ce6..0fcdbe7 100644
---- a/fs/pnode.h
-+++ b/fs/pnode.h
-@@ -20,8 +20,6 @@
- #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
- #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
- #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
--#define IS_MNT_LOCKED_AND_LAZY(m) \
-- (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
-
- #define CL_EXPIRE 0x01
- #define CL_SLAVE 0x02
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 84bb65b8..4270e47 100644
--- a/fs/posix_acl.c
return -EINVAL;
diff --git a/fs/seq_file.c b/fs/seq_file.c
-index 555f821..34684d7 100644
+index 555f821..02a990b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -12,6 +12,8 @@
#include <asm/uaccess.h>
#include <asm/page.h>
-@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
-
- static void *seq_buf_alloc(unsigned long size)
- {
-- void *buf;
--
-- /*
-- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
-- * it's better to fall back to vmalloc() than to kill things.
-- */
+@@ -29,9 +31,9 @@ static void *seq_buf_alloc(unsigned long size)
+ * __GFP_NORETRY to avoid oom-killings with high-order allocations -
+ * it's better to fall back to vmalloc() than to kill things.
+ */
- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
-- if (!buf && size > PAGE_SIZE)
++ buf = kmalloc(size, GFP_KERNEL | GFP_USERCOPY | __GFP_NORETRY | __GFP_NOWARN);
+ if (!buf && size > PAGE_SIZE)
- buf = vmalloc(size);
-- return buf;
-+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
++ buf = vmalloc_usercopy(size);
+ return buf;
}
- /**
-@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
+@@ -65,6 +67,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
#ifdef CONFIG_USER_NS
p->user_ns = file->f_cred->user_ns;
#endif
/*
* Wrappers around seq_open(e.g. swaps_open) need to be
-@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
+@@ -87,6 +92,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
}
EXPORT_SYMBOL(seq_open);
static int traverse(struct seq_file *m, loff_t offset)
{
loff_t pos = 0, index;
-@@ -158,7 +164,7 @@ Eoverflow:
+@@ -158,7 +173,7 @@ Eoverflow:
ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *m = file->private_data;
loff_t pos;
size_t n;
void *p;
-@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
+@@ -557,7 +572,7 @@ static void single_stop(struct seq_file *p, void *v)
int single_open(struct file *file, int (*show)(struct seq_file *, void *),
void *data)
{
int res = -ENOMEM;
if (op) {
-@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
+@@ -593,6 +608,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
}
EXPORT_SYMBOL(single_open_size);
+}
diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
new file mode 100644
-index 0000000..0e39d8c
+index 0000000..0e39d8c7
--- /dev/null
+++ b/grsecurity/grsec_mem.c
@@ -0,0 +1,48 @@
extern struct ipc_namespace init_ipc_ns;
extern atomic_t nr_ipc_ns;
diff --git a/include/linux/irq.h b/include/linux/irq.h
-index 62c6901..827f8f6 100644
+index 3532dca..03ffc0b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -370,7 +370,8 @@ struct irq_chip {
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libata.h b/include/linux/libata.h
-index 28aeae4..320b3bf6 100644
+index e0e3378..38e206f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
-@@ -988,7 +988,7 @@ struct ata_port_operations {
+@@ -991,7 +991,7 @@ struct ata_port_operations {
* fields must be pointers.
*/
const struct ata_port_operations *inherits;
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 0755b9f..2960e96 100644
+index 0755b9f..bf8eab1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
struct mmu_gather;
struct inode;
-@@ -1131,8 +1137,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -1002,6 +1008,34 @@ static inline int page_mapped(struct page *page)
+ }
+
+ /*
++ * Return true only if the page has been allocated with
++ * ALLOC_NO_WATERMARKS and the low watermark was not
++ * met implying that the system is under some pressure.
++ */
++static inline bool page_is_pfmemalloc(struct page *page)
++{
++ /*
++ * Page index cannot be this large so this must be
++ * a pfmemalloc page.
++ */
++ return page->index == -1UL;
++}
++
++/*
++ * Only to be called by the page allocator on a freshly allocated
++ * page.
++ */
++static inline void set_page_pfmemalloc(struct page *page)
++{
++ page->index = -1UL;
++}
++
++static inline void clear_page_pfmemalloc(struct page *page)
++{
++ page->index = 0;
++}
++
++/*
+ * Different kinds of faults, as returned by handle_mm_fault().
+ * Used to decide whether a process gets delivered SIGBUS or
+ * just gets major/minor fault counters bumped up.
+@@ -1131,8 +1165,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1172,9 +1178,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1172,9 +1206,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
-@@ -1219,34 +1225,6 @@ int clear_page_dirty_for_io(struct page *page);
+@@ -1219,34 +1253,6 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
extern struct task_struct *task_of_stack(struct task_struct *task,
struct vm_area_struct *vma, bool in_group);
-@@ -1369,8 +1347,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1369,8 +1375,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
{
return 0;
}
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-@@ -1380,6 +1365,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1380,6 +1393,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
return 0;
}
static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
-@@ -1392,6 +1383,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+@@ -1392,6 +1411,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_nr_pmds_init(struct mm_struct *mm)
{
-@@ -1429,11 +1421,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1429,11 +1449,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
NULL: pud_offset(pgd, address);
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-@@ -1810,12 +1814,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+@@ -1810,12 +1842,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
if (rlim < RLIM_INFINITY) {
if (((new - start) + (end_data - start_data)) > rlim)
return -ENOSPC;
-@@ -1840,7 +1855,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1840,7 +1883,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1848,6 +1863,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1848,6 +1891,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1876,10 +1892,11 @@ struct vm_unmapped_area_info {
+@@ -1876,10 +1920,11 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
/*
* Search for an unmapped address range.
-@@ -1891,7 +1908,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1891,7 +1936,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long
{
if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
-@@ -1953,6 +1970,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1953,6 +1998,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1982,10 +2003,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1982,10 +2031,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
}
#ifdef CONFIG_MMU
{
return __pgprot(0);
}
-@@ -2047,6 +2068,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -2047,6 +2096,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -2149,7 +2175,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2149,7 +2203,7 @@ extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern int soft_offline_page(struct page *page, int flags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2200,5 +2226,11 @@ void __init setup_nr_node_ids(void);
+@@ -2200,5 +2254,11 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 8d37e26..6a6f55b 100644
+index 8d37e26..29c54c9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
-@@ -313,7 +313,9 @@ struct vm_area_struct {
+@@ -63,15 +63,6 @@ struct page {
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+- bool pfmemalloc; /* If set by the page allocator,
+- * ALLOC_NO_WATERMARKS was set
+- * and the low watermark was not
+- * met implying that the system
+- * is under some pressure. The
+- * caller should try ensure
+- * this page is only used to
+- * free other pages.
+- */
+ };
+
+ union {
+@@ -313,7 +304,9 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct core_thread {
struct task_struct *task;
-@@ -466,7 +468,25 @@ struct mm_struct {
+@@ -466,7 +459,25 @@ struct mm_struct {
/* address of the bounds directory */
void __user *bd_addr;
#endif
static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index f15154a..72cf02c 100644
+index f15154a..17b985a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -776,7 +776,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -1971,7 +1971,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /*
+- * Propagate page->pfmemalloc to the skb if we can. The problem is
+- * that not all callers have unique ownership of the page. If
+- * pfmemalloc is set, we check the mapping as a mapping implies
+- * page->index is set (index and pfmemalloc share space).
+- * If it's a valid mapping, we cannot use page->pfmemalloc but we
+- * do not lose pfmemalloc information as the pages would not be
+- * allocated using __GFP_MEMALLOC.
++ * Propagate page pfmemalloc to the skb if we can. The problem is
++ * that not all callers have unique ownership of the page but rely
++ * on page_is_pfmemalloc doing the right thing(tm).
+ */
+ frag->page.p = page;
+ frag->page_offset = off;
+ skb_frag_size_set(frag, size);
+
+ page = compound_head(page);
+- if (page->pfmemalloc && !page->mapping)
++ if (page_is_pfmemalloc(page))
+ skb->pfmemalloc = true;
+ }
+
+@@ -1971,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
return skb->inner_transport_header - skb->inner_network_header;
}
{
return skb_network_header(skb) - skb->data;
}
-@@ -2031,7 +2031,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2031,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2673,9 +2673,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
+ static inline void skb_propagate_pfmemalloc(struct page *page,
+ struct sk_buff *skb)
+ {
+- if (page && page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ skb->pfmemalloc = true;
+ }
+
+@@ -2673,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3197,6 +3197,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3197,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 0ec5983..cc61051 100644
+index 0ec5983..d5888bb 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
-@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
+@@ -18,6 +18,14 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
+#endif
++
++#define VM_USERCOPY 0x00000200 /* allocation intended for copies to userland */
++
+
/* bits [20..32] reserved for arch specific ioremap internals */
/*
-@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
+@@ -67,6 +75,7 @@ static inline void vmalloc_init(void)
+ #endif
+
+ extern void *vmalloc(unsigned long size);
++extern void *vmalloc_usercopy(unsigned long size);
+ extern void *vzalloc(unsigned long size);
+ extern void *vmalloc_user(unsigned long size);
+ extern void *vmalloc_node(unsigned long size, int node);
+@@ -86,6 +95,10 @@ extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long size);
-@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
+@@ -150,7 +163,7 @@ extern void free_vm_area(struct vm_struct *area);
/* for /dev/kmem */
extern long vread(char *buf, char *addr, unsigned long count);
/** inet_connection_sock - INET connection oriented sock
*
+diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
+index 8d17655..2f3246d 100644
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -21,13 +21,11 @@ struct netns_frags {
+ * @INET_FRAG_FIRST_IN: first fragment has arrived
+ * @INET_FRAG_LAST_IN: final fragment has arrived
+ * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
+- * @INET_FRAG_EVICTED: frag queue is being evicted
+ */
+ enum {
+ INET_FRAG_FIRST_IN = BIT(0),
+ INET_FRAG_LAST_IN = BIT(1),
+ INET_FRAG_COMPLETE = BIT(2),
+- INET_FRAG_EVICTED = BIT(3)
+ };
+
+ /**
+@@ -45,6 +43,7 @@ enum {
+ * @flags: fragment queue flags
+ * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @net: namespace that this frag belongs to
++ * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ */
+ struct inet_frag_queue {
+ spinlock_t lock;
+@@ -59,6 +58,7 @@ struct inet_frag_queue {
+ __u8 flags;
+ u16 max_size;
+ struct netns_frags *net;
++ struct hlist_node list_evictor;
+ };
+
+ #define INETFRAGS_HASHSZ 1024
+@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
+ inet_frag_destroy(q, f);
+ }
+
++static inline bool inet_frag_evicting(struct inet_frag_queue *q)
++{
++ return !hlist_unhashed(&q->list_evictor);
++}
++
+ /* Memory Tracking Functions. */
+
+ /* The default percpu_counter batch size is not big enough to scale to
+@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
+ return percpu_counter_read(&nf->mem);
+ }
+
+-static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
++static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+- __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
++ __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ }
+
+-static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
++static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+- __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
++ __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ }
+
+ static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332dd..10a5c3c 100644
--- a/include/net/inetpeer.h
TP_ARGS(irq, action, ret),
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
-index 551b673..9c680df 100644
+index a7e41fb..c115957 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -350,6 +350,7 @@ typedef struct drm_i915_irq_wait {
next_state = Reset;
return 0;
diff --git a/init/main.c b/init/main.c
-index 2a89545..eb9203f 100644
+index 2a89545..58711ee 100644
--- a/init/main.c
+++ b/init/main.c
@@ -97,6 +97,8 @@ extern void radix_tree_init(void);
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
* where only the boot processor is running with IRQ disabled. This means
-@@ -158,6 +160,84 @@ static int __init set_reset_devices(char *str)
+@@ -158,6 +160,37 @@ static int __init set_reset_devices(char *str)
__setup("reset_devices", set_reset_devices);
+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
+#endif
+
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+unsigned long pax_user_shadow_base __read_only;
-+EXPORT_SYMBOL(pax_user_shadow_base);
-+extern char pax_enter_kernel_user[];
-+extern char pax_exit_kernel_user[];
-+#endif
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+static int __init setup_pax_nouderef(char *str)
-+{
-+#ifdef CONFIG_X86_32
-+ unsigned int cpu;
-+ struct desc_struct *gdt;
-+
-+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-+ gdt = get_cpu_gdt_table(cpu);
-+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
-+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
-+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
-+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
-+ }
-+ loadsegment(ds, __KERNEL_DS);
-+ loadsegment(es, __KERNEL_DS);
-+ loadsegment(ss, __KERNEL_DS);
-+#else
-+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
-+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
-+ clone_pgd_mask = ~(pgdval_t)0UL;
-+ pax_user_shadow_base = 0UL;
-+ setup_clear_cpu_cap(X86_FEATURE_PCIDUDEREF);
-+#endif
-+
-+ return 0;
-+}
-+early_param("pax_nouderef", setup_pax_nouderef);
-+
-+#ifdef CONFIG_X86_64
-+static int __init setup_pax_weakuderef(char *str)
-+{
-+ if (clone_pgd_mask != ~(pgdval_t)0UL)
-+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
-+ return 1;
-+}
-+__setup("pax_weakuderef", setup_pax_weakuderef);
-+#endif
-+#endif
-+
+#ifdef CONFIG_PAX_SOFTMODE
+int pax_softmode;
+
static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
static const char *panic_later, *panic_param;
-@@ -726,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
+@@ -726,7 +759,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
struct blacklist_entry *entry;
char *fn_name;
if (!fn_name)
return false;
-@@ -778,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
+@@ -778,7 +811,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
{
int count = preempt_count();
int ret;
if (initcall_blacklisted(fn))
return -EPERM;
-@@ -788,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
+@@ -788,18 +821,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
else
ret = fn();
return ret;
}
-@@ -905,8 +984,8 @@ static int run_init_process(const char *init_filename)
+@@ -905,8 +937,8 @@ static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
return do_execve(getname_kernel(init_filename),
}
static int try_to_run_init_process(const char *init_filename)
-@@ -923,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
+@@ -923,6 +955,10 @@ static int try_to_run_init_process(const char *init_filename)
return ret;
}
static noinline void __init kernel_init_freeable(void);
static int __ref kernel_init(void *unused)
-@@ -947,6 +1030,11 @@ static int __ref kernel_init(void *unused)
+@@ -947,6 +983,11 @@ static int __ref kernel_init(void *unused)
ramdisk_execute_command, ret);
}
/*
* We try each of these until one succeeds.
*
-@@ -1002,7 +1090,7 @@ static noinline void __init kernel_init_freeable(void)
+@@ -1002,7 +1043,7 @@ static noinline void __init kernel_init_freeable(void)
do_basic_setup();
/* Open the /dev/console on the rootfs, this should never fail */
pr_err("Warning: unable to open an initial console.\n");
(void) sys_dup(0);
-@@ -1015,11 +1103,13 @@ static noinline void __init kernel_init_freeable(void)
+@@ -1015,11 +1056,13 @@ static noinline void __init kernel_init_freeable(void)
if (!ramdisk_execute_command)
ramdisk_execute_command = "/init";
mq_table.data = get_mq(table);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
-index 3aaea7f..e8a13d6 100644
+index c3fc5c2..1f32fe2 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
-@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
+@@ -275,6 +275,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
info->attr.mq_msgsize);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
diff --git a/ipc/sem.c b/ipc/sem.c
-index d1a6edd..ef08b40 100644
+index c50aa57..07e9531 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
-@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
+@@ -1790,7 +1790,7 @@ static int get_queue_result(struct sem_queue *q)
}
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
{
int error = -EINVAL;
struct sem_array *sma;
-@@ -2015,7 +2015,7 @@ out_free:
+@@ -2025,7 +2025,7 @@ out_free:
}
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 0ceb386..ddaf008 100644
+index 9481749..5fbec5b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -172,8 +172,15 @@ static struct srcu_struct pmus_srcu;
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -4268,10 +4275,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -4303,10 +4310,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
arch_perf_update_userpage(event, userpg, now);
-@@ -4946,7 +4953,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+@@ -4989,7 +4996,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
-@@ -5037,11 +5044,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -5080,11 +5087,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -7533,7 +7540,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -7576,7 +7583,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -7892,6 +7899,11 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -7935,6 +7942,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
-@@ -8340,10 +8352,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -8383,10 +8395,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
-index 7e01f78..f5da19d 100644
+index 9e30231..75a6d97 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
if (pm_wakeup_pending()) {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index c099b08..54bcfe8 100644
+index bff0169..c90815d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -463,7 +463,7 @@ static int log_store(int facility, int level,
{
@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
if (from_file && type != SYSLOG_ACTION_OPEN)
- return 0;
+ goto ok;
+#ifdef CONFIG_GRKERNSEC_DMESG
+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
+
if (syslog_action_restricted(type)) {
if (capable(CAP_SYSLOG))
- return 0;
+ goto ok;
diff --git a/kernel/profile.c b/kernel/profile.c
index a7bcd28..5b368fa 100644
--- a/kernel/profile.c
}
diff --git a/kernel/resource.c b/kernel/resource.c
-index 90552aa..8c02098 100644
+index 90552aa..ad13346 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
return 0;
}
__initcall(ioresources_init);
+@@ -504,13 +514,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
+ {
+ struct resource *p;
+ resource_size_t end = start + size - 1;
+- int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ const char *name = "System RAM";
+ int ret = -1;
+
+ read_lock(&resource_lock);
+ for (p = iomem_resource.child; p ; p = p->sibling) {
+- if (end < p->start)
++ if (p->end < start)
+ continue;
+
+ if (p->start <= start && end <= p->end) {
+@@ -521,7 +531,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
+ ret = 1;
+ break;
+ }
+- if (p->end < start)
++ if (end < p->start)
+ break; /* not found */
+ }
+ read_unlock(&resource_lock);
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index eae160d..c9aa22e 100644
--- a/kernel/sched/auto_group.c
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
diff --git a/kernel/signal.c b/kernel/signal.c
-index d51c5dd..065c4c8 100644
+index 0206be7..6445784 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
return ret;
}
-@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+@@ -2918,7 +2941,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
int error = -ESRCH;
rcu_read_lock();
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, info, p);
/*
-@@ -3244,8 +3275,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
+@@ -3247,8 +3278,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
}
seg = get_fs();
set_fs(KERNEL_DS);
if (!retval) {
if (old_rlim)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c3eee4c..586e4a0 100644
+index c3eee4c..2e53ad1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -94,7 +94,6 @@
if (copy_to_user(*buf, tmp, len))
return -EFAULT;
*size -= len;
+@@ -1988,7 +2058,7 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
+ int val = *valp;
+ if (val < 0) {
+ *negp = true;
+- *lvalp = (unsigned long)-val;
++ *lvalp = -(unsigned long)val;
+ } else {
+ *negp = false;
+ *lvalp = (unsigned long)val;
@@ -2128,6 +2198,44 @@ int proc_dointvec(struct ctl_table *table, int write,
NULL,NULL);
}
+ int val = *valp;
+ if (val < 0) {
+ *negp = true;
-+ *lvalp = (unsigned long)-val;
++ *lvalp = -(unsigned long)val;
+ } else {
+ *negp = false;
+ *lvalp = (unsigned long)val;
struct do_proc_dointvec_minmax_conv_param {
int *min;
-@@ -2203,6 +2309,32 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
- return 0;
- }
-
+@@ -2194,7 +2300,33 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
+ int val = *valp;
+ if (val < 0) {
+ *negp = true;
+- *lvalp = (unsigned long)-val;
++ *lvalp = -(unsigned long)val;
++ } else {
++ *negp = false;
++ *lvalp = (unsigned long)val;
++ }
++ }
++ return 0;
++}
++
+static int do_proc_dointvec_minmax_conv_secure(bool *negp, unsigned long *lvalp,
+ int *valp,
+ int write, void *data)
+ int val = *valp;
+ if (val < 0) {
+ *negp = true;
-+ *lvalp = (unsigned long)-val;
-+ } else {
-+ *negp = false;
-+ *lvalp = (unsigned long)val;
-+ }
-+ }
-+ return 0;
-+}
-+
- /**
- * proc_dointvec_minmax - read a vector of integers with min/max values
- * @table: the sysctl table
++ *lvalp = -(unsigned long)val;
+ } else {
+ *negp = false;
+ *lvalp = (unsigned long)val;
@@ -2230,6 +2362,17 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
do_proc_dointvec_minmax_conv, ¶m);
}
static void validate_coredump_safety(void)
{
#ifdef CONFIG_COREDUMP
+@@ -2429,7 +2572,7 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,
+ unsigned long lval;
+ if (val < 0) {
+ *negp = true;
+- lval = (unsigned long)-val;
++ lval = -(unsigned long)val;
+ } else {
+ *negp = false;
+ lval = (unsigned long)val;
+@@ -2452,7 +2595,7 @@ static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp
+ unsigned long lval;
+ if (val < 0) {
+ *negp = true;
+- lval = (unsigned long)-val;
++ lval = -(unsigned long)val;
+ } else {
+ *negp = false;
+ lval = (unsigned long)val;
+@@ -2477,7 +2620,7 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
+ unsigned long lval;
+ if (val < 0) {
+ *negp = true;
+- lval = (unsigned long)-val;
++ lval = -(unsigned long)val;
+ } else {
+ *negp = false;
+ lval = (unsigned long)val;
@@ -2732,6 +2875,12 @@ int proc_dostring(struct ctl_table *table, int write,
return -ENOSYS;
}
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 02bece4..f9b05af 100644
+index eb11011..43adc29 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -2395,12 +2395,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -2413,12 +2413,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
}
/*
-@@ -4789,8 +4794,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4807,8 +4812,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -5659,7 +5666,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -5675,7 +5682,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
if (t->ret_stack == NULL) {
atomic_set(&t->tracing_graph_pause, 0);
t->curr_ret_stack = -1;
/* Make sure the tasks see the -1 first: */
smp_wmb();
-@@ -5882,7 +5889,7 @@ static void
+@@ -5898,7 +5905,7 @@ static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
atomic_set(&t->tracing_graph_pause, 0);
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
-index d261201..da10429 100644
+index 921691c..64e1da1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
+@@ -1272,7 +1272,7 @@ extern const char *__stop___tracepoint_str[];
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
static void __add_event_to_tracers(struct ftrace_event_call *call);
/* Add an additional event_call dynamically */
-diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
-index 7f2e97c..085a257 100644
---- a/kernel/trace/trace_events_filter.c
-+++ b/kernel/trace/trace_events_filter.c
-@@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
-
- static char infix_next(struct filter_parse_state *ps)
- {
-+ if (!ps->infix.cnt)
-+ return 0;
-+
- ps->infix.cnt--;
-
- return ps->infix.string[ps->infix.tail++];
-@@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
-
- static void infix_advance(struct filter_parse_state *ps)
- {
-+ if (!ps->infix.cnt)
-+ return;
-+
- ps->infix.cnt--;
- ps->infix.tail++;
- }
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a51e796..1f32ebd 100644
--- a/kernel/trace/trace_functions_graph.c
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
diff --git a/lib/bitmap.c b/lib/bitmap.c
-index 64c0926f..9de1a1f 100644
+index 40162f8..b55c7c7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -234,7 +234,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
int c, old_c, totaldigits;
- const char __user __force *ubuf = (const char __user __force *)buf;
+ const char __user *ubuf = (const char __force_user *)buf;
- int exp_digit, in_range;
+ int at_start, in_range;
totaldigits = c = 0;
-@@ -600,7 +600,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -601,7 +601,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
u32 high = divisor >> 32;
u64 quot;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
-index ae4b65e..daf0230 100644
+index dace71f..13da37b 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
-@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
+@@ -982,7 +982,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
void dma_debug_add_bus(struct bus_type *bus)
{
if (dma_debug_disabled())
return;
-@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
+@@ -1164,7 +1164,7 @@ static void check_unmap(struct dma_debug_entry *ref)
static void check_for_stack(struct device *dev, void *addr)
{
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 271e443..c582971 100644
+index 8c4c1f9..7019e3d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -2362,6 +2362,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2365,6 +2365,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp = h->max_huge_pages;
int ret;
-@@ -2369,9 +2370,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2372,9 +2373,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -ENOTSUPP;
if (ret)
goto out;
-@@ -2406,6 +2408,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2409,6 +2411,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
if (!hugepages_supported())
return -ENOTSUPP;
-@@ -2415,9 +2418,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2418,9 +2421,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
if (ret)
goto out;
-@@ -2907,6 +2911,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2910,6 +2914,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
i_mmap_unlock_write(mapping);
}
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
-@@ -3020,6 +3045,11 @@ retry_avoidcopy:
+@@ -3023,6 +3048,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
}
-@@ -3181,6 +3211,10 @@ retry:
+@@ -3184,6 +3214,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
-@@ -3248,6 +3282,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3251,6 +3285,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct address_space *mapping;
int need_wait_lock = 0;
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address);
-@@ -3261,6 +3299,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3264,6 +3302,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(hstate_index(h));
}
if (end == start)
return error;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index 501820c..9612bcf 100644
+index 9f48145..60a2ac1 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
-@@ -1616,11 +1616,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
+@@ -1617,11 +1617,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
}
}
return ret;
-@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1660,7 +1660,7 @@ static int __soft_offline_page(struct page *page, int flags)
put_page(page);
pr_info("soft_offline: %#lx: invalidated\n", pfn);
SetPageHWPoison(page);
return 0;
}
-@@ -1708,7 +1708,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1709,7 +1709,7 @@ static int __soft_offline_page(struct page *page, int flags)
if (!is_free_buddy_page(page))
pr_info("soft offline: %#lx: page leaked\n",
pfn);
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
-@@ -1778,11 +1778,11 @@ int soft_offline_page(struct page *page, int flags)
+@@ -1779,11 +1779,11 @@ int soft_offline_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
if (!dequeue_hwpoisoned_huge_page(hpage))
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 22e037e..347d230 100644
+index 2a9e098..37435af 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
if (!pud)
return -ENOMEM;
do {
-@@ -2040,6 +2066,185 @@ static inline int wp_page_reuse(struct mm_struct *mm,
+@@ -2040,6 +2066,196 @@ static inline int wp_page_reuse(struct mm_struct *mm,
return VM_FAULT_WRITE;
}
+
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+ entry = *pte;
-+ if (!pte_present(entry)) {
-+ if (!pte_none(entry)) {
-+ free_swap_and_cache(pte_to_swp_entry(entry));
-+ pte_clear_not_present_full(mm, address, pte, 0);
++ if (pte_none(entry))
++ ;
++ else if (!pte_present(entry)) {
++ swp_entry_t swapentry;
++
++ swapentry = pte_to_swp_entry(entry);
++ if (!non_swap_entry(swapentry))
++ dec_mm_counter_fast(mm, MM_SWAPENTS);
++ else if (is_migration_entry(swapentry)) {
++ if (PageAnon(migration_entry_to_page(swapentry)))
++ dec_mm_counter_fast(mm, MM_ANONPAGES);
++ else
++ dec_mm_counter_fast(mm, MM_FILEPAGES);
+ }
++ free_swap_and_cache(swapentry);
++ pte_clear_not_present_full(mm, address, pte, 0);
+ } else {
+ struct page *page;
+
/*
* Handle the case of a page which we actually need to copy to a new page.
*
-@@ -2093,6 +2298,12 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2093,6 +2309,12 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2147,6 +2358,10 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2147,6 +2369,10 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
page_remove_rmap(old_page);
}
/* Free the old page.. */
new_page = old_page;
page_copied = 1;
-@@ -2578,6 +2793,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2578,6 +2804,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (page != swapcache) {
/*
-@@ -2601,6 +2821,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2601,6 +2832,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2620,40 +2845,6 @@ out_release:
+@@ -2620,40 +2856,6 @@ out_release:
}
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -2663,27 +2854,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2663,31 +2865,29 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags)
{
struct mem_cgroup *memcg;
- pte_unmap(page_table);
-
+ /* File mapping without ->vm_ops ? */
+- if (vma->vm_flags & VM_SHARED)
++ if (vma->vm_flags & VM_SHARED) {
++ pte_unmap(page_table);
+ return VM_FAULT_SIGBUS;
++ }
+
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGSEGV;
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2707,6 +2894,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2711,6 +2911,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
-@@ -2716,6 +2908,12 @@ setpte:
+@@ -2720,6 +2925,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2948,6 +3146,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2952,6 +3163,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
-@@ -2999,7 +3202,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3003,7 +3219,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
goto uncharge_out;
}
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
-@@ -3057,6 +3271,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3061,6 +3288,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
-@@ -3280,6 +3499,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3286,6 +3518,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3299,9 +3524,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3305,9 +3543,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3436,6 +3693,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3442,6 +3712,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3468,6 +3742,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3474,6 +3761,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
-@@ -3577,8 +3877,8 @@ out:
+@@ -3583,8 +3896,8 @@ out:
return ret;
}
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -3604,8 +3904,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3610,8 +3923,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -3613,7 +3913,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3619,7 +3932,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
void *maddr;
struct page *page = NULL;
-@@ -3674,8 +3974,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3680,8 +3993,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -3685,11 +3985,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3691,11 +4004,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index ebffa0e..c61160a 100644
+index ebffa0e..a5ae7f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
-@@ -1649,6 +1689,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -983,12 +1023,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+ set_page_owner(page, order, gfp_flags);
+
+ /*
+- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
++ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
+ * allocate the page. The expectation is that the caller is taking
+ * steps that will free more memory. The caller should avoid the page
+ * being used for !PFMEMALLOC purposes.
+ */
+- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
++ if (alloc_flags & ALLOC_NO_WATERMARKS)
++ set_page_pfmemalloc(page);
++ else
++ clear_page_pfmemalloc(page);
+
+ return 0;
+ }
+@@ -1649,6 +1692,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
-@@ -1660,7 +1702,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -1660,7 +1705,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
}
return 1UL << order;
}
-@@ -1749,7 +1791,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -1749,7 +1794,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-@@ -2068,7 +2110,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
+@@ -2068,7 +2113,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
do {
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
} while (zone++ != preferred_zone);
}
-@@ -5781,7 +5823,7 @@ static void __setup_per_zone_wmarks(void)
+@@ -5781,7 +5826,7 @@ static void __setup_per_zone_wmarks(void)
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 7eb38dd..a5172b1 100644
+index 7eb38dd..0451459 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
slab_state = PARTIAL_NODE;
slab_early_init = 0;
+@@ -1602,7 +1606,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ }
+
+ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
+- if (unlikely(page->pfmemalloc))
++ if (page_is_pfmemalloc(page))
+ pfmemalloc_active = true;
+
+ nr_pages = (1 << cachep->gfporder);
+@@ -1613,7 +1617,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ add_zone_page_state(page_zone(page),
+ NR_SLAB_UNRECLAIMABLE, nr_pages);
+ __SetPageSlab(page);
+- if (page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ SetPageSlabPfmemalloc(page);
+
+ if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -2073,7 +2077,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
cachep = find_mergeable(size, align, flags, name, ctor);
#endif
}
-@@ -4210,13 +4237,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4210,13 +4237,80 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
+ if (!slab_is_available())
+ return false;
+
++ if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ && !object_starts_on_stack(ptr)
++#endif
++ ) {
++ struct vm_struct *vm = find_vm_area(ptr);
++ if (vm && (vm->flags & VM_USERCOPY))
++ return true;
++ return false;
++ }
++
+ if (!virt_addr_valid(ptr))
+ return false;
+
{
void *ret;
diff --git a/mm/slob.c b/mm/slob.c
-index 4765f65..fafa9d5 100644
+index 4765f65..f17284d 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
{
return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
}
-@@ -491,34 +515,112 @@ void kfree(const void *block)
+@@ -491,34 +515,123 @@ void kfree(const void *block)
return;
kmemleak_free(block);
+ if (!slab_is_available())
+ return false;
+
++ if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ && !object_starts_on_stack(ptr)
++#endif
++ ) {
++ struct vm_struct *vm = find_vm_area(ptr);
++ if (vm && (vm->flags & VM_USERCOPY))
++ return true;
++ return false;
++ }
++
+ // PAX: TODO
+
+ return false;
}
EXPORT_SYMBOL(ksize);
-@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+@@ -534,23 +647,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
if (b && c->ctor)
c->ctor(b);
-@@ -566,7 +678,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+@@ -566,7 +689,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
{
return __do_kmalloc_node(size, gfp, node, _RET_IP_);
}
-@@ -579,12 +691,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+@@ -579,12 +702,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
}
static void kmem_rcu_free(struct rcu_head *head)
-@@ -592,22 +708,36 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -592,22 +719,36 @@ static void kmem_rcu_free(struct rcu_head *head)
struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 54c0876..31383a1 100644
+index 54c0876..61847f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -198,7 +198,7 @@ struct track {
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
#ifdef CONFIG_STACKTRACE
{
+@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+ inc_slabs_node(s, page_to_nid(page), page->objects);
+ page->slab_cache = s;
+ __SetPageSlab(page);
+- if (page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ SetPageSlabPfmemalloc(page);
+
+ start = page_address(page);
@@ -2707,6 +2707,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
slab_free_hook(s, x);
{
struct kmem_cache *s;
void *ret;
-@@ -3388,6 +3399,59 @@ static size_t __ksize(const void *object)
+@@ -3388,6 +3399,70 @@ static size_t __ksize(const void *object)
return slab_ksize(page->slab_cache);
}
+ if (!slab_is_available())
+ return false;
+
++ if (is_vmalloc_addr(ptr)
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ && !object_starts_on_stack(ptr)
++#endif
++ ) {
++ struct vm_struct *vm = find_vm_area(ptr);
++ if (vm && (vm->flags & VM_USERCOPY))
++ return true;
++ return false;
++ }
++
+ if (!virt_addr_valid(ptr))
+ return false;
+
size_t ksize(const void *object)
{
size_t size = __ksize(object);
-@@ -3408,6 +3472,7 @@ void kfree(const void *x)
+@@ -3408,6 +3483,7 @@ void kfree(const void *x)
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
-@@ -3724,7 +3789,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+@@ -3724,7 +3800,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
/*
* Adjust the object sizes so that we clear
-@@ -3740,7 +3805,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
+@@ -3740,7 +3816,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
}
if (sysfs_slab_alias(s, name)) {
s = NULL;
}
}
-@@ -3857,7 +3922,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+@@ -3857,7 +3933,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#endif
static int count_inuse(struct page *page)
{
return page->inuse;
-@@ -4138,7 +4203,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4138,7 +4214,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
else
len += sprintf(buf + len, "<not-available>");
-@@ -4236,12 +4305,12 @@ static void __init resiliency_test(void)
+@@ -4236,12 +4316,12 @@ static void __init resiliency_test(void)
validate_slab_cache(kmalloc_caches[9]);
}
#else
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4478,13 +4547,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
+@@ -4478,13 +4558,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (!s->ctor)
return 0;
}
SLAB_ATTR_RO(aliases);
-@@ -4572,6 +4645,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+@@ -4572,6 +4656,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(cache_dma);
#endif
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
-@@ -4627,7 +4716,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
+@@ -4627,7 +4727,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
* as well as cause other issues like converting a mergeable
* cache into an umergeable one.
*/
return -EINVAL;
s->flags &= ~SLAB_TRACE;
-@@ -4747,7 +4836,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
+@@ -4747,7 +4847,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
return -EINVAL;
s->flags &= ~SLAB_FAILSLAB;
-@@ -4914,6 +5003,12 @@ static struct attribute *slab_attrs[] = {
+@@ -4914,6 +5014,12 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
-@@ -5155,6 +5250,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5155,6 +5261,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5228,6 +5324,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5228,6 +5335,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5241,6 +5338,7 @@ struct saved_alias {
+@@ -5241,6 +5349,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5263,6 +5361,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5263,6 +5372,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
if (len > buflen)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 2faaa29..9744185 100644
+index 2faaa29..37314a8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -40,20 +40,65 @@ struct vfree_deferred {
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
-@@ -1838,10 +1979,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1715,6 +1856,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_mask, prot, 0, node, caller);
+ }
+
++void *vmalloc_usercopy(unsigned long size)
++{
++ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
++ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
++ VM_USERCOPY, NUMA_NO_NODE,
++ __builtin_return_address(0));
++}
++
+ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+ {
+ return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
+@@ -1838,10 +1987,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
NUMA_NO_NODE, __builtin_return_address(0));
}
-@@ -2148,6 +2288,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
+@@ -2148,6 +2296,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
{
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
-@@ -2630,7 +2772,11 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2630,7 +2780,11 @@ static int s_show(struct seq_file *m, void *p)
v->addr, v->addr + v->size, v->size);
if (v->caller)
atomic_t batman_queue_left;
char num_ifaces;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
-index 56f9edb..0a13cd1 100644
+index e11a5cf..02864cd 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
-@@ -1241,7 +1241,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+@@ -1253,7 +1253,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
if (ip.proto == htons(ETH_P_IP)) {
if (timer_pending(&br->ip4_other_query.timer))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
-index 4b5c236..0627070 100644
+index 4b5c236..f303683 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
-@@ -841,7 +841,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
+@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
+ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ + 0;
+ }
+
+@@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
++ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
++ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
+ };
+
+ /* Change the state of the port and notify spanning tree */
+@@ -841,7 +845,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
.get_link_af_size = br_get_link_af_size,
};
.priv_size = sizeof(struct chnl_net),
.setup = ipcaif_net_setup,
diff --git a/net/can/af_can.c b/net/can/af_can.c
-index 689c818..6323851 100644
+index 62c635f..0b59618 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
-@@ -888,7 +888,7 @@ static const struct net_proto_family can_family_ops = {
+@@ -890,7 +890,7 @@ static const struct net_proto_family can_family_ops = {
};
/* notifier block for netdevice event */
};
diff --git a/net/can/bcm.c b/net/can/bcm.c
-index b523453..f96e639 100644
+index a1ba687..aafaec5 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
-@@ -1618,7 +1618,7 @@ static int __init bcm_module_init(void)
+@@ -1620,7 +1620,7 @@ static int __init bcm_module_init(void)
}
/* create /proc/net/can-bcm directory */
a0 = a[0];
a1 = a[1];
diff --git a/net/core/datagram.c b/net/core/datagram.c
-index b80fb91..d9f4ea5 100644
+index b80fb91..0364f4f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
goto out;
}
-+static int skb_set_peeked(struct sk_buff *skb)
++static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (skb->peeked)
-+ return 0;
++ return skb;
+
+ /* We have to unshare an skb before modifying it. */
+ if (!skb_shared(skb))
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
-+ return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+
+ skb->prev->next = nskb;
+ skb->next->prev = nskb;
+done:
+ skb->peeked = 1;
+
-+ return 0;
++ return skb;
+}
+
/**
int _off = *off;
last = (struct sk_buff *)queue;
-@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
_off -= skb->len;
continue;
}
- skb->peeked = 1;
+
-+ error = skb_set_peeked(skb);
-+ if (error)
++ skb = skb_set_peeked(skb);
++ error = PTR_ERR(skb);
++ if (IS_ERR(skb))
+ goto unlock_err;
+
atomic_inc(&skb->users);
} else
__skb_unlink(skb, queue);
-@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
return NULL;
no_packet:
*err = error;
return NULL;
-@@ -302,7 +337,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+@@ -302,7 +338,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
kfree_skb(skb);
sk_mem_reclaim_partial(sk);
return err;
-@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev);
}
return sum;
}
EXPORT_SYMBOL(__skb_checksum_complete_head);
-@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
+@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
netdev_rx_csum_fault(skb->dev);
}
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 41ec022..3cc0a1c 100644
+index 41ec022..89b1df7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
+@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+- if (virt_to_head_page(data)->pfmemalloc)
++ if (page_is_pfmemalloc(virt_to_head_page(data)))
+ skb->pfmemalloc = 1;
+ }
+ return skb;
@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
.priv_size = sizeof(struct lowpan_dev_info),
.setup = lowpan_setup,
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
-index f46e4d1..30231f1 100644
+index f46e4d1..dcb7f86 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -207,7 +207,7 @@ found:
+ } else {
+ fq->q.meat += skb->len;
+ }
+- add_frag_mem_limit(&fq->q, skb->truesize);
++ add_frag_mem_limit(fq->q.net, skb->truesize);
+
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
+ clone->data_len = clone->len;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+- add_frag_mem_limit(&fq->q, clone->truesize);
++ add_frag_mem_limit(fq->q.net, clone->truesize);
+ }
+
+ WARN_ON(head == NULL);
+@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
+ }
+ fp = next;
+ }
+- sub_frag_mem_limit(&fq->q, sum_truesize);
++ sub_frag_mem_limit(fq->q.net, sum_truesize);
+
+ head->next = NULL;
+ head->dev = dev;
@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
return nh->nh_saddr;
}
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 09b62e1..2871350 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2457,7 +2457,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ key = l->key + 1;
+ iter->pos++;
+
+- if (pos-- <= 0)
++ if (--pos <= 0)
+ break;
+
+ l = NULL;
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 5e346a0..d0a7c03 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+ unsigned int evicted = 0;
+ HLIST_HEAD(expired);
+
+-evict_again:
+ spin_lock(&hb->chain_lock);
+
+ hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+ if (!inet_fragq_should_evict(fq))
+ continue;
+
+- if (!del_timer(&fq->timer)) {
+- /* q expiring right now thus increment its refcount so
+- * it won't be freed under us and wait until the timer
+- * has finished executing then destroy it
+- */
+- atomic_inc(&fq->refcnt);
+- spin_unlock(&hb->chain_lock);
+- del_timer_sync(&fq->timer);
+- inet_frag_put(fq, f);
+- goto evict_again;
+- }
++ if (!del_timer(&fq->timer))
++ continue;
+
+- fq->flags |= INET_FRAG_EVICTED;
+- hlist_del(&fq->list);
+- hlist_add_head(&fq->list, &expired);
++ hlist_add_head(&fq->list_evictor, &expired);
+ ++evicted;
+ }
+
+ spin_unlock(&hb->chain_lock);
+
+- hlist_for_each_entry_safe(fq, n, &expired, list)
++ hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
+ f->frag_expire((unsigned long) fq);
+
+ return evicted;
+@@ -240,19 +228,21 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+ int i;
+
+ nf->low_thresh = 0;
+- local_bh_disable();
+
+ evict_again:
++ local_bh_disable();
+ seq = read_seqbegin(&f->rnd_seqlock);
+
+ for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+ inet_evict_bucket(f, &f->hash[i]);
+
+- if (read_seqretry(&f->rnd_seqlock, seq))
++ local_bh_enable();
++ cond_resched();
++
++ if (read_seqretry(&f->rnd_seqlock, seq) ||
++ percpu_counter_sum(&nf->mem))
+ goto evict_again;
+
+- local_bh_enable();
+-
+ percpu_counter_destroy(&nf->mem);
+ }
+ EXPORT_SYMBOL(inet_frags_exit_net);
+@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+ struct inet_frag_bucket *hb;
+
+ hb = get_frag_bucket_locked(fq, f);
+- if (!(fq->flags & INET_FRAG_EVICTED))
+- hlist_del(&fq->list);
++ hlist_del(&fq->list);
++ fq->flags |= INET_FRAG_COMPLETE;
+ spin_unlock(&hb->chain_lock);
+ }
+
+@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
+ fq_unlink(fq, f);
+ atomic_dec(&fq->refcnt);
+- fq->flags |= INET_FRAG_COMPLETE;
+ }
+ }
+ EXPORT_SYMBOL(inet_frag_kill);
+@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
+ fp = xp;
+ }
+ sum = sum_truesize + f->qsize;
+- sub_frag_mem_limit(q, sum);
+
+ if (f->destructor)
+ f->destructor(q);
+ kmem_cache_free(f->frags_cachep, q);
++
++ sub_frag_mem_limit(nf, sum);
+ }
+ EXPORT_SYMBOL(inet_frag_destroy);
+
+@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
+
+ q->net = nf;
+ f->constructor(q, arg);
+- add_frag_mem_limit(q, f->qsize);
++ add_frag_mem_limit(nf, f->qsize);
+
+ setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
+ spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c6fb80b..8705495 100644
--- a/net/ipv4/inet_hashtables.c
p->rate_tokens = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
-index cc1da6d..64b1534 100644
+index cc1da6d..593fc73 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
+@@ -192,7 +192,7 @@ static void ip_expire(unsigned long arg)
+ ipq_kill(qp);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+
+- if (!(qp->q.flags & INET_FRAG_EVICTED)) {
++ if (!inet_frag_evicting(&qp->q)) {
+ struct sk_buff *head = qp->q.fragments;
+ const struct iphdr *iph;
+ int err;
@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
return 0;
qp->rid = end;
rc = qp->q.fragments && (end - start) > max;
+@@ -301,7 +301,7 @@ static int ip_frag_reinit(struct ipq *qp)
+ kfree_skb(fp);
+ fp = xp;
+ } while (fp);
+- sub_frag_mem_limit(&qp->q, sum_truesize);
++ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ qp->q.flags = 0;
+ qp->q.len = 0;
+@@ -446,7 +446,7 @@ found:
+ qp->q.fragments = next;
+
+ qp->q.meat -= free_it->len;
+- sub_frag_mem_limit(&qp->q, free_it->truesize);
++ sub_frag_mem_limit(qp->q.net, free_it->truesize);
+ kfree_skb(free_it);
+ }
+ }
+@@ -470,7 +470,7 @@ found:
+ qp->q.stamp = skb->tstamp;
+ qp->q.meat += skb->len;
+ qp->ecn |= ecn;
+- add_frag_mem_limit(&qp->q, skb->truesize);
++ add_frag_mem_limit(qp->q.net, skb->truesize);
+ if (offset == 0)
+ qp->q.flags |= INET_FRAG_FIRST_IN;
+
+@@ -573,7 +573,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+- add_frag_mem_limit(&qp->q, clone->truesize);
++ add_frag_mem_limit(qp->q.net, clone->truesize);
+ }
+
+ skb_push(head, head->data - skb_network_header(head));
+@@ -601,7 +601,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ }
+ fp = next;
+ }
+- sub_frag_mem_limit(&qp->q, sum_truesize);
++ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ head->next = NULL;
+ head->dev = dev;
@@ -750,12 +750,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index fc1c658..42a8d34 100644
+index fc1c658..4de4e33 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
+@@ -1348,7 +1352,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+ req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
+ if (req) {
+ nsk = tcp_check_req(sk, skb, req, false);
+- if (!nsk)
++ if (!nsk || nsk == sk)
+ reqsk_put(req);
+ return nsk;
+ }
@@ -1427,6 +1431,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
msg.msg_controllen = len;
msg.msg_flags = flags;
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 96f153c..82fcad9 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
+ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_change_info *change_info;
+ struct net *net = dev_net(dev);
+ struct inet6_dev *idev;
+
+@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
+ ndisc_send_unsol_na(dev);
+ in6_dev_put(idev);
+ break;
++ case NETDEV_CHANGE:
++ change_info = ptr;
++ if (change_info->flags_changed & IFF_NOARP)
++ neigh_changeaddr(&nd_tbl, dev);
++ break;
+ case NETDEV_DOWN:
+ neigh_ifdown(&nd_tbl, dev);
+ fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 62f5b0d..331fdb1 100644
--- a/net/ipv6/netfilter/ip6_tables.c
case IP6T_SO_GET_ENTRIES:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
-index 6f187c8..34b367f 100644
+index 6f187c8..55e564f 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
err_alloc:
return -ENOMEM;
}
+@@ -348,7 +346,7 @@ found:
+ fq->ecn |= ecn;
+ if (payload_len > fq->q.max_size)
+ fq->q.max_size = payload_len;
+- add_frag_mem_limit(&fq->q, skb->truesize);
++ add_frag_mem_limit(fq->q.net, skb->truesize);
+
+ /* The first fragment.
+ * nhoffset is obtained from the first fragment, of course.
+@@ -430,7 +428,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
+ clone->ip_summed = head->ip_summed;
+
+ NFCT_FRAG6_CB(clone)->orig = NULL;
+- add_frag_mem_limit(&fq->q, clone->truesize);
++ add_frag_mem_limit(fq->q.net, clone->truesize);
+ }
+
+ /* We have to remove fragment header from datagram and to relocate
+@@ -454,7 +452,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ }
+- sub_frag_mem_limit(&fq->q, head->truesize);
++ sub_frag_mem_limit(fq->q.net, head->truesize);
+
+ head->ignore_df = 1;
+ head->next = NULL;
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 263a516..692f738 100644
--- a/net/ipv6/ping.c
return 0;
default:
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
-index 8ffa2c8..5968612 100644
+index 8ffa2c8..0db5dad 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
+@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+
+ IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+
+- if (fq->q.flags & INET_FRAG_EVICTED)
++ if (inet_frag_evicting(&fq->q))
+ goto out_rcu_unlock;
+
+ IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+@@ -330,7 +330,7 @@ found:
+ fq->q.stamp = skb->tstamp;
+ fq->q.meat += skb->len;
+ fq->ecn |= ecn;
+- add_frag_mem_limit(&fq->q, skb->truesize);
++ add_frag_mem_limit(fq->q.net, skb->truesize);
+
+ /* The first fragment.
+ * nhoffset is obtained from the first fragment, of course.
+@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+- add_frag_mem_limit(&fq->q, clone->truesize);
++ add_frag_mem_limit(fq->q.net, clone->truesize);
+ }
+
+ /* We have to remove fragment header from datagram and to relocate
+@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ }
+ fp = next;
+ }
+- sub_frag_mem_limit(&fq->q, sum_truesize);
++ sub_frag_mem_limit(fq->q.net, sum_truesize);
+
+ head->next = NULL;
+ head->dev = dev;
@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
struct ctl_table *ipv6_icmp_table;
int err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 3adffb3..a67e4d1 100644
+index 3adffb3..fe3cc78 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+@@ -946,7 +950,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+ &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
+ if (req) {
+ nsk = tcp_check_req(sk, skb, req, false);
+- if (!nsk)
++ if (!nsk || nsk == sk)
+ reqsk_put(req);
+ return nsk;
+ }
@@ -1283,6 +1287,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
kfree_skb(skb);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index f337a90..2a9a9db 100644
+index f337a90..ba0d2a0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
-@@ -222,11 +222,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+@@ -185,7 +185,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ return;
+
+ case IPPROTO_ICMPV6:
+- if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
++ if (!onlyproto && (nh + offset + 2 < skb->data ||
++ pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
+ u8 *icmp;
+
+ nh = skb_network_header(skb);
+@@ -199,7 +200,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ #if IS_ENABLED(CONFIG_IPV6_MIP6)
+ case IPPROTO_MH:
+ offset += ipv6_optlen(exthdr);
+- if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
++ if (!onlyproto && (nh + offset + 3 < skb->data ||
++ pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
+ struct ip6_mh *mh;
+
+ nh = skb_network_header(skb);
+@@ -222,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
}
}
return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
}
-@@ -338,19 +338,19 @@ static struct ctl_table xfrm6_policy_table[] = {
+@@ -338,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
static int __net_init xfrm6_net_init(struct net *net)
{
if (!hdr)
goto err_reg;
-@@ -358,8 +358,7 @@ static int __net_init xfrm6_net_init(struct net *net)
+@@ -358,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
return 0;
err_reg:
goto out;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
-index ff347a0..6ea4923 100644
+index f06d422..de37d95 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -582,7 +582,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
local->_oper_chandef = *chandef;
ieee80211_hw_config(local, 0);
}
-@@ -3428,7 +3428,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
+@@ -3429,7 +3429,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
else
local->probe_req_reg--;
break;
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
-@@ -3563,8 +3563,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
+@@ -3564,8 +3564,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
if (chanctx_conf) {
*chandef = sdata->vif.bss_conf.chandef;
ret = 0;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index df3051d..359d2f4 100644
+index e86daed..9cbf3f4 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
}
#endif
+diff --git a/net/rds/info.c b/net/rds/info.c
+index 9a6b4f6..140a44a 100644
+--- a/net/rds/info.c
++++ b/net/rds/info.c
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
+
+ /* check for all kinds of wrapping and the like */
+ start = (unsigned long)optval;
+- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
++ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
+ ret = -EINVAL;
+ goto out;
+ }
diff --git a/net/rds/iw.h b/net/rds/iw.h
index cbe6674..dc9eb89 100644
--- a/net/rds/iw.h
if (bprm->cap_effective)
return 1;
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
-index 8ee997d..24c174b 100644
+index fc56d4d..ac97140 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -116,8 +116,8 @@ int ima_init_template(void);
result = ima_alloc_init_template(NULL, file, filename,
NULL, 0, &entry);
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
-index 461215e..9bb12ee 100644
+index 816d175..20079ae 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -28,12 +28,12 @@
lock = &avc_cache.slots_lock[hvalue];
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 212070e..87aa172 100644
+index 7f8d7f1..3436ea3 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
-@@ -3288,7 +3288,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
- int rc = 0;
-
- if (default_noexec &&
-- (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
-+ (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
-+ (!shared && (prot & PROT_WRITE)))) {
- /*
- * We are making executable an anonymous mapping or a
- * private file mapping that will also be writable.
-@@ -5841,7 +5842,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
+@@ -5842,7 +5842,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
#endif
.name = "selinux",
.binder_set_context_mgr = selinux_binder_set_context_mgr,
-@@ -6186,6 +6188,9 @@ static void selinux_nf_ip_exit(void)
+@@ -6187,6 +6188,9 @@ static void selinux_nf_ip_exit(void)
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static int selinux_disabled;
int selinux_disable(void)
{
if (ss_initialized) {
-@@ -6203,7 +6208,9 @@ int selinux_disable(void)
+@@ -6204,7 +6208,9 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
if (err < 0)
return err;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
-index d126c03..5d84d1cf 100644
+index 75888dd..c940854 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3004,11 +3004,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
};
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
-index e061355..baed278 100644
+index bf20593..dec8a14 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -573,7 +573,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
-@@ -1013,7 +1013,7 @@ EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
+@@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
*/
void amdtp_stream_update(struct amdtp_stream *s)
{
}
EXPORT_SYMBOL(amdtp_stream_update);
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
-index 8a03a91..aaacc0c 100644
+index 25c9055..e861b6a 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
-@@ -231,7 +231,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
+@@ -233,7 +233,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
struct snd_pcm_substream *pcm)
{
}
/**
-@@ -249,7 +249,7 @@ static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s,
+@@ -251,7 +251,7 @@ static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s,
struct snd_rawmidi_substream *midi)
{
if (port < s->midi_ports)
list_add(&s->list, &cs4297a_devs);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
-index 5645481..63e53a2 100644
+index 36e8f12..9571f49 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1946,7 +1946,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..c5de280
+index 0000000..b884a56
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,568 @@
+@@ -0,0 +1,564 @@
+/*
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
+ * Copyright 2011-2015 by PaX Team <pageexec@freemail.hu>
+
+static void check_global_variables(void *event_data, void *data)
+{
-+#if BUILDING_GCC_VERSION >= 4009
-+ varpool_node *node;
-+#else
-+ struct varpool_node *node;
-+#endif
++ varpool_node_ptr node;
+
+ FOR_EACH_VARIABLE(node) {
+ tree var = NODE_DECL(node);
+}
diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
new file mode 100644
-index 0000000..70924d4
+index 0000000..9cf3947
--- /dev/null
+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,787 @@
+@@ -0,0 +1,789 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
+#define O_BINARY 0
+#endif
+
++typedef struct varpool_node *varpool_node_ptr;
++
+static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
+{
+ tree fndecl;
+}
diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
new file mode 100644
-index 0000000..40dcfa9
+index 0000000..06a039c
--- /dev/null
+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,922 @@
+@@ -0,0 +1,930 @@
+/*
+ * Copyright 2014,2015 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ * and PaX Team <pageexec@freemail.hu>
+ unsigned long i;
+ tree list;
+ tree variant;
++ tree main_variant;
+ expanded_location xloc;
+
+ if (TYPE_FIELDS(type) == NULL_TREE)
+ TREE_CHAIN(newtree[i]) = newtree[i+1];
+ TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
+
-+ for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant)) {
++ main_variant = TYPE_MAIN_VARIANT(type);
++ for (variant = main_variant; variant; variant = TYPE_NEXT_VARIANT(variant)) {
+ TYPE_FIELDS(variant) = list;
+ TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
+ TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE, TYPE_ATTRIBUTES(variant));
-+ // force a re-layout
-+ TYPE_SIZE(variant) = NULL_TREE;
-+ layout_type(variant);
+ }
+
++ /*
++ * force a re-layout of the main variant
++ * the TYPE_SIZE for all variants will be recomputed
++ * by finalize_type_size()
++ */
++ TYPE_SIZE(main_variant) = NULL_TREE;
++ layout_type(main_variant);
++ gcc_assert(TYPE_SIZE(main_variant) != NULL_TREE);
++
+ return 1;
+}
+
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..2e9138d
+index 0000000..ed2d97b
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,28379 @@
+@@ -0,0 +1,28385 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL nohasharray
+iwl_set_tx_power_1 iwl_set_tx_power 0 1 &intel_fake_agp_alloc_by_type_1
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+generic_write_sync_8358 generic_write_sync 0 8358 NULL
+qlcnic_open_8359 qlcnic_open 0 8359 NULL
+isku_sysfs_write_talk_8360 isku_sysfs_write_talk 6-0-5 8360 NULL
++vmalloc_usercopy_8361 vmalloc_usercopy 1 8361 NULL
+ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3-0 8362 NULL nohasharray
+batadv_sysfs_add_vlan_8362 batadv_sysfs_add_vlan 0 8362 &ath6kl_lrssi_roam_write_8362
+alloc_sleep_millisecs_store_8364 alloc_sleep_millisecs_store 0-4 8364 NULL
+sd_major_13294 sd_major 0-1 13294 NULL
+cx23885_start_audio_dma_13295 cx23885_start_audio_dma 0 13295 NULL
+map_offset_show_13301 map_offset_show 0 13301 NULL
-+set_ptk_13304 set_ptk 0 13304 NULL
++set_ptk_13304 set_ptk 0 13304 NULL nohasharray
++__jbd2_update_log_tail_13304 __jbd2_update_log_tail 0 13304 &set_ptk_13304
+store_13310 store 0 13310 NULL
+saa7134_s_ctrl_13313 saa7134_s_ctrl 0 13313 NULL
+read_file_phy_err_13318 read_file_phy_err 3-0 13318 NULL nohasharray
+__pci_request_selected_regions_30058 __pci_request_selected_regions 0 30058 NULL
+SyS_write_30059 SyS_write 3 30059 NULL
+general_stats_read_30067 general_stats_read 0 30067 NULL
++jbd2_log_do_checkpoint_30069 jbd2_log_do_checkpoint 0 30069 NULL
+capture_pcm_prepare_30072 capture_pcm_prepare 0 30072 NULL
+snd_seq_timer_open_30077 snd_seq_timer_open 0 30077 NULL
+m5602_write_sensor_30078 m5602_write_sensor 0 30078 NULL nohasharray
+start_transfer_30146 start_transfer 0 30146 NULL
+ext4_write_inline_data_end_30151 ext4_write_inline_data_end 0-4 30151 NULL
+hvfb_check_var_30153 hvfb_check_var 0 30153 NULL
++jbd2_write_superblock_30154 jbd2_write_superblock 0 30154 NULL
+elfcorehdr_read_30159 elfcorehdr_read 2 30159 NULL
+netlink_realloc_groups_30162 netlink_realloc_groups 0 30162 NULL nohasharray
+start_stop_khugepaged_30162 start_stop_khugepaged 0 30162 &netlink_realloc_groups_30162
+generic_file_fsync_62999 generic_file_fsync 0 62999 NULL
+asus_wmi_get_devstate_63000 asus_wmi_get_devstate 0 63000 NULL
+hash_max_show_63001 hash_max_show 0 63001 NULL
++jbd2_journal_update_sb_log_tail_63005 jbd2_journal_update_sb_log_tail 0 63005 NULL
+get_skb_63008 get_skb 2 63008 NULL
+__ext3_journal_stop_63017 __ext3_journal_stop 0 63017 NULL
+__sctp_connect_63020 __sctp_connect 0 63020 NULL
+qlcnic_alloc_sw_resources_64057 qlcnic_alloc_sw_resources 0 64057 NULL
+tps65912_reg_disable_64058 tps65912_reg_disable 0 64058 NULL
+alloc_codec_buffers_64063 alloc_codec_buffers 0 64063 NULL
-+start_hw_64065 start_hw 0 64065 NULL
++start_hw_64065 start_hw 0 64065 NULL nohasharray
++jbd2_cleanup_journal_tail_64065 jbd2_cleanup_journal_tail 0 64065 &start_hw_64065
+nfs4_lookup_root_64067 nfs4_lookup_root 0 64067 NULL
+__sock_create_64069 __sock_create 0 64069 NULL
+pcifront_bus_write_64071 pcifront_bus_write 0 64071 NULL