--- /dev/null
+From 40660f1fcee8d524a60b5101538e42b1f39f106d Mon Sep 17 00:00:00 2001
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Date: Sun, 16 Sep 2018 23:47:57 +0300
+Subject: ARC: build: Don't set CROSS_COMPILE in arch's Makefile
+
+From: Alexey Brodkin <abrodkin@synopsys.com>
+
+commit 40660f1fcee8d524a60b5101538e42b1f39f106d upstream.
+
+There's not much sense in doing that because if user or
+his build-system didn't set CROSS_COMPILE we still may
+very well make incorrect guess.
+
+But as it turned out setting CROSS_COMPILE is not as harmless
+as one may think: with recent changes that implemented automatic
+discovery of __host__ gcc features unconditional setup of
+CROSS_COMPILE leads to failures on execution of "make xxx_defconfig"
+with absent cross-compiler, for more info see [1].
+
+Set CROSS_COMPILE as well gets in the way if we want only to build
+.dtb's (again with absent cross-compiler which is not really needed
+for building .dtb's), see [2].
+
+Note, we had to change LIBGCC assignment type from ":=" to "="
+so that is is resolved on its usage, otherwise if it is resolved
+at declaration time with missing CROSS_COMPILE we're getting this
+error message from host GCC:
+
+| gcc: error: unrecognized command line option -mmedium-calls
+| gcc: error: unrecognized command line option -mno-sdata
+
+[1] http://lists.infradead.org/pipermail/linux-snps-arc/2018-September/004308.html
+[2] http://lists.infradead.org/pipermail/linux-snps-arc/2018-September/004320.html
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Rob Herring <robh@kernel.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/Makefile | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -6,14 +6,6 @@
+ # published by the Free Software Foundation.
+ #
+
+-ifeq ($(CROSS_COMPILE),)
+-ifndef CONFIG_CPU_BIG_ENDIAN
+-CROSS_COMPILE := arc-linux-
+-else
+-CROSS_COMPILE := arceb-linux-
+-endif
+-endif
+-
+ KBUILD_DEFCONFIG := nsim_700_defconfig
+
+ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+@@ -73,7 +65,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+ # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
+ ldflags-$(upto_gcc44) += -marclinux
+
+-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
++LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+
+ # Modules with short calls might break for calls into builtin-kernel
+ KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
--- /dev/null
+From 615f64458ad890ef94abc879a66d8b27236e733a Mon Sep 17 00:00:00 2001
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Date: Thu, 13 Sep 2018 23:24:28 +0300
+Subject: ARC: build: Get rid of toolchain check
+
+From: Alexey Brodkin <abrodkin@synopsys.com>
+
+commit 615f64458ad890ef94abc879a66d8b27236e733a upstream.
+
+This check is very naive: we simply test if GCC invoked without
+"-mcpu=XXX" has ARC700 define set. In that case we think that GCC
+was built with "--with-cpu=arc700" and has libgcc built for ARC700.
+
+Otherwise if ARC700 is not defined we think that everythng was built
+for ARCv2.
+
+But in reality our life is much more interesting.
+
+1. Regardless of GCC configuration (i.e. what we pass in "--with-cpu"
+ it may generate code for any ARC core).
+
+2. libgcc might be built with explicitly specified "--mcpu=YYY"
+
+That's exactly what happens in case of multilibbed toolchains:
+ - GCC is configured with default settings
+ - All the libs built for many different CPU flavors
+
+I.e. that check gets in the way of usage of multilibbed
+toolchains. And even non-multilibbed toolchains are affected.
+OpenEmbedded also builds GCC without "--with-cpu" because
+each and every target component later is compiled with explicitly
+set "-mcpu=ZZZ".
+
+Acked-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/Makefile | 14 --------------
+ 1 file changed, 14 deletions(-)
+
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -20,20 +20,6 @@ cflags-y += -fno-common -pipe -fno-built
+ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
+ cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
+
+-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
+-
+-ifdef CONFIG_ISA_ARCOMPACT
+-ifeq ($(is_700), 0)
+- $(error Toolchain not configured for ARCompact builds)
+-endif
+-endif
+-
+-ifdef CONFIG_ISA_ARCV2
+-ifeq ($(is_700), 1)
+- $(error Toolchain not configured for ARCv2 builds)
+-endif
+-endif
+-
+ ifdef CONFIG_ARC_CURR_IN_REG
+ # For a global register defintion, make sure it gets passed to every file
+ # We had a customer reported bug where some code built in kernel was NOT using
--- /dev/null
+From eb66ae030829605d61fbef1909ce310e29f78821 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 12 Oct 2018 15:22:59 -0700
+Subject: mremap: properly flush TLB before releasing the page
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit eb66ae030829605d61fbef1909ce310e29f78821 upstream.
+
+Jann Horn points out that our TLB flushing was subtly wrong for the
+mremap() case. What makes mremap() special is that we don't follow the
+usual "add page to list of pages to be freed, then flush tlb, and then
+free pages". No, mremap() obviously just _moves_ the page from one page
+table location to another.
+
+That matters, because mremap() thus doesn't directly control the
+lifetime of the moved page with a freelist: instead, the lifetime of the
+page is controlled by the page table locking, that serializes access to
+the entry.
+
+As a result, we need to flush the TLB not just before releasing the lock
+for the source location (to avoid any concurrent accesses to the entry),
+but also before we release the destination page table lock (to avoid the
+TLB being flushed after somebody else has already done something to that
+page).
+
+This also makes the whole "need_flush" logic unnecessary, since we now
+always end up flushing the TLB for every valid entry.
+
+Reported-and-tested-by: Jann Horn <jannh@google.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Tested-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/huge_mm.h | 2 +-
+ mm/huge_memory.c | 10 ++++------
+ mm/mremap.c | 30 +++++++++++++-----------------
+ 3 files changed, 18 insertions(+), 24 deletions(-)
+
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_ar
+ unsigned char *vec);
+ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, unsigned long old_end,
+- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
++ pmd_t *old_pmd, pmd_t *new_pmd);
+ extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, pgprot_t newprot,
+ int prot_numa);
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1765,7 +1765,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t p
+
+ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, unsigned long old_end,
+- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
++ pmd_t *old_pmd, pmd_t *new_pmd)
+ {
+ spinlock_t *old_ptl, *new_ptl;
+ pmd_t pmd;
+@@ -1796,7 +1796,7 @@ bool move_huge_pmd(struct vm_area_struct
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
+- if (pmd_present(pmd) && pmd_dirty(pmd))
++ if (pmd_present(pmd))
+ force_flush = true;
+ VM_BUG_ON(!pmd_none(*new_pmd));
+
+@@ -1807,12 +1807,10 @@ bool move_huge_pmd(struct vm_area_struct
+ }
+ pmd = move_soft_dirty_pmd(pmd);
+ set_pmd_at(mm, new_addr, new_pmd, pmd);
+- if (new_ptl != old_ptl)
+- spin_unlock(new_ptl);
+ if (force_flush)
+ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+- else
+- *need_flush = true;
++ if (new_ptl != old_ptl)
++ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+ return true;
+ }
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t p
+ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
+ unsigned long old_addr, unsigned long old_end,
+ struct vm_area_struct *new_vma, pmd_t *new_pmd,
+- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
++ unsigned long new_addr, bool need_rmap_locks)
+ {
+ struct mm_struct *mm = vma->vm_mm;
+ pte_t *old_pte, *new_pte, pte;
+@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_str
+
+ pte = ptep_get_and_clear(mm, old_addr, old_pte);
+ /*
+- * If we are remapping a dirty PTE, make sure
++ * If we are remapping a valid PTE, make sure
+ * to flush TLB before we drop the PTL for the
+- * old PTE or we may race with page_mkclean().
++ * PTE.
+ *
+- * This check has to be done after we removed the
+- * old PTE from page tables or another thread may
+- * dirty it after the check and before the removal.
++ * NOTE! Both old and new PTL matter: the old one
++ * for racing with page_mkclean(), the new one to
++ * make sure the physical page stays valid until
++ * the TLB entry for the old mapping has been
++ * flushed.
+ */
+- if (pte_present(pte) && pte_dirty(pte))
++ if (pte_present(pte))
+ force_flush = true;
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
+ pte = move_soft_dirty_pte(pte);
+@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_str
+ }
+
+ arch_leave_lazy_mmu_mode();
++ if (force_flush)
++ flush_tlb_range(vma, old_end - len, old_end);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ pte_unmap(new_pte - 1);
+- if (force_flush)
+- flush_tlb_range(vma, old_end - len, old_end);
+- else
+- *need_flush = true;
+ pte_unmap_unlock(old_pte - 1, old_ptl);
+ if (need_rmap_locks)
+ drop_rmap_locks(vma);
+@@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm
+ {
+ unsigned long extent, next, old_end;
+ pmd_t *old_pmd, *new_pmd;
+- bool need_flush = false;
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+
+@@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm
+ if (need_rmap_locks)
+ take_rmap_locks(vma);
+ moved = move_huge_pmd(vma, old_addr, new_addr,
+- old_end, old_pmd, new_pmd,
+- &need_flush);
++ old_end, old_pmd, new_pmd);
+ if (need_rmap_locks)
+ drop_rmap_locks(vma);
+ if (moved)
+@@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm
+ if (extent > LATENCY_LIMIT)
+ extent = LATENCY_LIMIT;
+ move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
+- new_pmd, new_addr, need_rmap_locks, &need_flush);
++ new_pmd, new_addr, need_rmap_locks);
+ }
+- if (need_flush)
+- flush_tlb_range(vma, old_end-len, old_addr);
+
+ mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+