revert-x86-build-move-_etext-to-actual-end-of-.text.patch
revert-binder-fix-handling-of-misaligned-binder-object.patch
binder-fix-race-between-munmap-and-direct-reclaim.patch
+x86-ftrace-do-not-call-function-graph-from-dynamic-t.patch
+x86-ftrace-set-trampoline-pages-as-executable.patch
+x86-kprobes-set-instruction-page-as-executable.patch
--- /dev/null
+From d27b93ac7357a68f65c2d384322f01cd1dc2da53 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Sat, 8 Dec 2018 12:58:51 -0500
+Subject: x86/ftrace: Do not call function graph from dynamic trampolines
+
+[ Upstream commit d2a68c4effd821f0871d20368f76b609349c8a3b ]
+
+Since commit 79922b8009c07 ("ftrace: Optimize function graph to be
+called directly"), dynamic trampolines should not be calling the
+function graph tracer at the end. If they do, it could cause the function
+graph tracer to trace functions that it filtered out.
+
+Right now it does not cause a problem because there's a test to check if
+the function graph tracer is attached to the same function as the
+function tracer, which for now is true. But the function graph tracer is
+undergoing changes that can make this no longer true which will cause
+the function graph tracer to trace other functions.
+
+ For example:
+
+ # cd /sys/kernel/tracing/
+ # echo do_IRQ > set_ftrace_filter
+ # mkdir instances/foo
+ # echo ip_rcv > instances/foo/set_ftrace_filter
+ # echo function_graph > current_tracer
+ # echo function > instances/foo/current_tracer
+
+Would cause the function graph tracer to trace both do_IRQ and ip_rcv,
+if the current tests change.
+
+As the current tests prevent this from being a problem, this code does
+not need to be backported. But it does make the code cleaner.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: x86@kernel.org
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/ftrace.c | 41 ++++++++++++++++++++-----------------
+ arch/x86/kernel/ftrace_64.S | 8 ++++----
+ 2 files changed, 26 insertions(+), 23 deletions(-)
+
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 4d2a401c178b..38c798b1dbd2 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -752,18 +752,20 @@ union ftrace_op_code_union {
+ } __attribute__((packed));
+ };
+
++#define RET_SIZE 1
++
+ static unsigned long
+ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ {
+- unsigned const char *jmp;
+ unsigned long start_offset;
+ unsigned long end_offset;
+ unsigned long op_offset;
+ unsigned long offset;
+ unsigned long size;
+- unsigned long ip;
++ unsigned long retq;
+ unsigned long *ptr;
+ void *trampoline;
++ void *ip;
+ /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
+ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
+ union ftrace_op_code_union op_ptr;
+@@ -783,27 +785,27 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+
+ /*
+ * Allocate enough size to store the ftrace_caller code,
+- * the jmp to ftrace_epilogue, as well as the address of
+- * the ftrace_ops this trampoline is used for.
++ * the iret , as well as the address of the ftrace_ops this
++ * trampoline is used for.
+ */
+- trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
++ trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
+ if (!trampoline)
+ return 0;
+
+- *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
++ *tramp_size = size + RET_SIZE + sizeof(void *);
+
+ /* Copy ftrace_caller onto the trampoline memory */
+ ret = probe_kernel_read(trampoline, (void *)start_offset, size);
+- if (WARN_ON(ret < 0)) {
+- tramp_free(trampoline, *tramp_size);
+- return 0;
+- }
++ if (WARN_ON(ret < 0))
++ goto fail;
+
+- ip = (unsigned long)trampoline + size;
++ ip = trampoline + size;
+
+- /* The trampoline ends with a jmp to ftrace_epilogue */
+- jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
+- memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
++ /* The trampoline ends with ret(q) */
++ retq = (unsigned long)ftrace_stub;
++ ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
++ if (WARN_ON(ret < 0))
++ goto fail;
+
+ /*
+ * The address of the ftrace_ops that is used for this trampoline
+@@ -813,17 +815,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ * the global function_trace_op variable.
+ */
+
+- ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
++ ptr = (unsigned long *)(trampoline + size + RET_SIZE);
+ *ptr = (unsigned long)ops;
+
+ op_offset -= start_offset;
+ memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
+
+ /* Are we pointing to the reference? */
+- if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
+- tramp_free(trampoline, *tramp_size);
+- return 0;
+- }
++ if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
++ goto fail;
+
+ /* Load the contents of ptr into the callback parameter */
+ offset = (unsigned long)ptr;
+@@ -838,6 +838,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+
+ return (unsigned long)trampoline;
++fail:
++ tramp_free(trampoline, *tramp_size);
++ return 0;
+ }
+
+ static unsigned long calc_trampoline_call_offset(bool save_regs)
+diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
+index 91b2cff4b79a..75f2b36b41a6 100644
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -171,9 +171,6 @@ GLOBAL(ftrace_call)
+ restore_mcount_regs
+
+ /*
+- * The copied trampoline must call ftrace_epilogue as it
+- * still may need to call the function graph tracer.
+- *
+ * The code up to this label is copied into trampolines so
+ * think twice before adding any new code or changing the
+ * layout here.
+@@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call)
+ jmp ftrace_stub
+ #endif
+
+-/* This is weak to keep gas from relaxing the jumps */
++/*
++ * This is weak to keep gas from relaxing the jumps.
++ * It is also used to copy the retq for trampolines.
++ */
+ WEAK(ftrace_stub)
+ retq
+ ENDPROC(ftrace_caller)
+--
+2.20.1
+
--- /dev/null
+From a31ad79c62d444d6bb29f3db094d5f248904750f Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Thu, 25 Apr 2019 17:11:29 -0700
+Subject: x86/ftrace: Set trampoline pages as executable
+
+[ Upstream commit 3c0dab44e22782359a0a706cbce72de99a22aa75 ]
+
+Since alloc_module() will not set the pages as executable soon, set
+ftrace trampoline pages as executable after they are allocated.
+
+For the time being, do not change ftrace to use the text_poke()
+interface. As a result, ftrace still breaks W^X.
+
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: <akpm@linux-foundation.org>
+Cc: <ard.biesheuvel@linaro.org>
+Cc: <deneen.t.dock@intel.com>
+Cc: <kernel-hardening@lists.openwall.com>
+Cc: <kristen@linux.intel.com>
+Cc: <linux_dti@icloud.com>
+Cc: <will.deacon@arm.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20190426001143.4983-10-namit@vmware.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/ftrace.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 38c798b1dbd2..9f033dfd2766 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -761,6 +761,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ unsigned long end_offset;
+ unsigned long op_offset;
+ unsigned long offset;
++ unsigned long npages;
+ unsigned long size;
+ unsigned long retq;
+ unsigned long *ptr;
+@@ -793,6 +794,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ return 0;
+
+ *tramp_size = size + RET_SIZE + sizeof(void *);
++ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
+
+ /* Copy ftrace_caller onto the trampoline memory */
+ ret = probe_kernel_read(trampoline, (void *)start_offset, size);
+@@ -837,6 +839,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ /* ALLOC_TRAMP flags lets us know we created it */
+ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+
++ /*
++ * Module allocation needs to be completed by making the page
++ * executable. The page is still writable, which is a security hazard,
++ * but anyhow ftrace breaks W^X completely.
++ */
++ set_memory_x((unsigned long)trampoline, npages);
+ return (unsigned long)trampoline;
+ fail:
+ tramp_free(trampoline, *tramp_size);
+--
+2.20.1
+
--- /dev/null
+From e4cb269c974c1fea2b95cfde5d0860c261a041d5 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Thu, 25 Apr 2019 17:11:30 -0700
+Subject: x86/kprobes: Set instruction page as executable
+
+[ Upstream commit 7298e24f904224fa79eb8fd7e0fbd78950ccf2db ]
+
+Set the page as executable after allocation. This patch is a
+preparatory patch for a following patch that makes module allocated
+pages non-executable.
+
+While at it, do some small cleanup of what appears to be unnecessary
+masking.
+
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <akpm@linux-foundation.org>
+Cc: <ard.biesheuvel@linaro.org>
+Cc: <deneen.t.dock@intel.com>
+Cc: <kernel-hardening@lists.openwall.com>
+Cc: <kristen@linux.intel.com>
+Cc: <linux_dti@icloud.com>
+Cc: <will.deacon@arm.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20190426001143.4983-11-namit@vmware.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/kprobes/core.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 544bc2dfe408..e83a057564d1 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -431,8 +431,20 @@ void *alloc_insn_page(void)
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+- if (page)
+- set_memory_ro((unsigned long)page & PAGE_MASK, 1);
++ if (!page)
++ return NULL;
++
++ /*
++ * First make the page read-only, and only then make it executable to
++ * prevent it from being W+X in between.
++ */
++ set_memory_ro((unsigned long)page, 1);
++
++ /*
++ * TODO: Once additional kernel code protection mechanisms are set, ensure
++ * that the page was not maliciously altered and it is still zeroed.
++ */
++ set_memory_x((unsigned long)page, 1);
+
+ return page;
+ }
+@@ -440,8 +452,12 @@ void *alloc_insn_page(void)
+ /* Recover page to RW mode before releasing it */
+ void free_insn_page(void *page)
+ {
+- set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+- set_memory_rw((unsigned long)page & PAGE_MASK, 1);
++ /*
++ * First make the page non-executable, and only then make it writable to
++ * prevent it from being W+X in between.
++ */
++ set_memory_nx((unsigned long)page, 1);
++ set_memory_rw((unsigned long)page, 1);
+ module_memfree(page);
+ }
+
+--
+2.20.1
+