--- /dev/null
+From 24cecc37746393432d994c0dbc251fb9ac7c5d72 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Mon, 6 Jan 2020 14:35:39 +0000
+Subject: arm64: Revert support for execute-only user mappings
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 24cecc37746393432d994c0dbc251fb9ac7c5d72 upstream.
+
+The ARMv8 64-bit architecture supports execute-only user permissions by
+clearing the PTE_USER and PTE_UXN bits, practically making it a mostly
+privileged mapping but from which user running at EL0 can still execute.
+
+The downside, however, is that the kernel at EL1 inadvertently reading
+such mapping would not trip over the PAN (privileged access never)
+protection.
+
+Revert the relevant bits from commit cab15ce604e5 ("arm64: Introduce
+execute-only page access permissions") so that PROT_EXEC implies
+PROT_READ (and therefore PTE_USER) until the architecture gains proper
+support for execute-only user mappings.
+
+Fixes: cab15ce604e5 ("arm64: Introduce execute-only page access permissions")
+Cc: <stable@vger.kernel.org> # 4.9.x-
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable-prot.h | 5 ++---
+ arch/arm64/include/asm/pgtable.h | 10 +++-------
+ arch/arm64/mm/fault.c | 2 +-
+ mm/mmap.c | 6 ------
+ 4 files changed, 6 insertions(+), 17 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -96,13 +96,12 @@
+ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
+
+ #define __P000 PAGE_NONE
+ #define __P001 PAGE_READONLY
+ #define __P010 PAGE_READONLY
+ #define __P011 PAGE_READONLY
+-#define __P100 PAGE_EXECONLY
++#define __P100 PAGE_READONLY_EXEC
+ #define __P101 PAGE_READONLY_EXEC
+ #define __P110 PAGE_READONLY_EXEC
+ #define __P111 PAGE_READONLY_EXEC
+@@ -111,7 +110,7 @@
+ #define __S001 PAGE_READONLY
+ #define __S010 PAGE_SHARED
+ #define __S011 PAGE_SHARED
+-#define __S100 PAGE_EXECONLY
++#define __S100 PAGE_READONLY_EXEC
+ #define __S101 PAGE_READONLY_EXEC
+ #define __S110 PAGE_SHARED_EXEC
+ #define __S111 PAGE_SHARED_EXEC
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -105,12 +105,8 @@ extern unsigned long empty_zero_page[PAG
+ #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+ #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+-/*
+- * Execute-only user mappings do not have the PTE_USER bit set. All valid
+- * kernel mappings have the PTE_UXN bit set.
+- */
+ #define pte_valid_not_user(pte) \
+- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+ #define pte_valid_young(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+ #define pte_valid_user(pte) \
+@@ -126,8 +122,8 @@ extern unsigned long empty_zero_page[PAG
+
+ /*
+ * p??_access_permitted() is true for valid user mappings (subject to the
+- * write permission check) other than user execute-only which do not have the
+- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
++ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
++ * set.
+ */
+ #define pte_access_permitted(pte, write) \
+ (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -428,7 +428,7 @@ static int __kprobes do_page_fault(unsig
+ struct mm_struct *mm;
+ struct siginfo si;
+ vm_fault_t fault, major = 0;
+- unsigned long vm_flags = VM_READ | VM_WRITE;
++ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ if (notify_page_fault(regs, esr))
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -89,12 +89,6 @@ static void unmap_region(struct mm_struc
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+- *
+- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+- * MAP_PRIVATE:
+- * r: (no) no
+- * w: (no) no
+- * x: (yes) yes
+ */
+ pgprot_t protection_map[16] __ro_after_init = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
--- /dev/null
+From e31f7939c1c27faa5d0e3f14519eaf7c89e8a69d Mon Sep 17 00:00:00 2001
+From: Wen Yang <wenyang@linux.alibaba.com>
+Date: Fri, 3 Jan 2020 11:02:48 +0800
+Subject: ftrace: Avoid potential division by zero in function profiler
+
+From: Wen Yang <wenyang@linux.alibaba.com>
+
+commit e31f7939c1c27faa5d0e3f14519eaf7c89e8a69d upstream.
+
+The ftrace_profile->counter is unsigned long and
+do_div truncates it to 32 bits, which means it can test
+non-zero and be truncated to zero for division.
+Fix this issue by using div64_ul() instead.
+
+Link: http://lkml.kernel.org/r/20200103030248.14516-1-wenyang@linux.alibaba.com
+
+Cc: stable@vger.kernel.org
+Fixes: e330b3bcd8319 ("tracing: Show sample std dev in function profiling")
+Fixes: 34886c8bc590f ("tracing: add average time in function to function profiler")
+Signed-off-by: Wen Yang <wenyang@linux.alibaba.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -554,8 +554,7 @@ static int function_stat_show(struct seq
+ }
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+- avg = rec->time;
+- do_div(avg, rec->counter);
++ avg = div64_ul(rec->time, rec->counter);
+ if (tracing_thresh && (avg < tracing_thresh))
+ goto out;
+ #endif
+@@ -581,7 +580,8 @@ static int function_stat_show(struct seq
+ * Divide only 1000 for ns^2 -> us^2 conversion.
+ * trace_print_graph_duration will divide 1000 again.
+ */
+- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
++ stddev = div64_ul(stddev,
++ rec->counter * (rec->counter - 1) * 1000);
+ }
+
+ trace_seq_init(&s);