--- /dev/null
+From 5a44c71ccda60a50073c5d7fe3f694cdfa3ab0c2 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sun, 26 Jan 2020 11:44:29 +0100
+Subject: drivers: net: xgene: Fix the order of the arguments of 'alloc_etherdev_mqs()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 5a44c71ccda60a50073c5d7fe3f694cdfa3ab0c2 upstream.
+
+'alloc_etherdev_mqs()' expects first 'tx', then 'rx'. The semantic here
+looks reversed.
+
+Reorder the arguments passed to 'alloc_etherdev_mqs()' in order to keep
+the correct semantic.
+
+In fact, this is a no-op because both XGENE_NUM_[RT]X_RING are 8.
+
+Fixes: 107dec2749fe ("drivers: net: xgene: Add support for multiple queues")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -2034,7 +2034,7 @@ static int xgene_enet_probe(struct platf
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
+- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
++ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
+ if (!ndev)
+ return -ENOMEM;
+
--- /dev/null
+From f66c0447cca1281116224d474cdb37d6a18e4b5b Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Wed, 27 Nov 2019 14:57:04 +0900
+Subject: kprobes: Set unoptimized flag after unoptimizing code
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit f66c0447cca1281116224d474cdb37d6a18e4b5b upstream.
+
+Set the unoptimized flag after confirming the code is completely
+unoptimized. Without this fix, when a kprobe hits the intermediate
+modified instruction (the first byte is replaced by an INT3, but
+later bytes can still be a jump address operand) while unoptimizing,
+it can return to the middle byte of the modified code, which causes
+an invalid instruction exception in the kernel.
+
+Usually, this is a rare case, but if we put a probe on the function
+call while text patching, it always causes a kernel panic as below:
+
+ # echo p text_poke+5 > kprobe_events
+ # echo 1 > events/kprobes/enable
+ # echo 0 > events/kprobes/enable
+
+invalid opcode: 0000 [#1] PREEMPT SMP PTI
+ RIP: 0010:text_poke+0x9/0x50
+ Call Trace:
+ arch_unoptimize_kprobe+0x22/0x28
+ arch_unoptimize_kprobes+0x39/0x87
+ kprobe_optimizer+0x6e/0x290
+ process_one_work+0x2a0/0x610
+ worker_thread+0x28/0x3d0
+ ? process_one_work+0x610/0x610
+ kthread+0x10d/0x130
+ ? kthread_park+0x80/0x80
+ ret_from_fork+0x3a/0x50
+
+text_poke() is used for patching the code in optprobes.
+
+This can happen even if we blacklist text_poke() and other functions,
+because there is a small time window during which we show the intermediate
+code to other CPUs.
+
+ [ mingo: Edited the changelog. ]
+
+Tested-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bristot@redhat.com
+Fixes: 6274de4984a6 ("kprobes: Support delayed unoptimizing")
+Link: https://lkml.kernel.org/r/157483422375.25881.13508326028469515760.stgit@devnote2
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kprobes.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ /* Loop free_list for disarming */
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
++ /* Switching from detour code to origin */
++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ /* Disarm probes if marked disabled */
+ if (kprobe_disabled(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(stru
+ {
+ lockdep_assert_cpus_held();
+ arch_unoptimize_kprobe(op);
++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ if (kprobe_disabled(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+ }
+@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kpr
+ return;
+ }
+
+- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ if (!list_empty(&op->list)) {
+ /* Dequeue from the optimization queue */
+ list_del_init(&op->list);
--- /dev/null
+From cb829624867b5ab10bc6a7036d183b1b82bfe9f8 Mon Sep 17 00:00:00 2001
+From: Wei Yang <richardw.yang@linux.intel.com>
+Date: Thu, 30 Jan 2020 22:14:29 -0800
+Subject: mm/huge_memory.c: use head to check huge zero page
+
+From: Wei Yang <richardw.yang@linux.intel.com>
+
+commit cb829624867b5ab10bc6a7036d183b1b82bfe9f8 upstream.
+
+The page could be a tail page, if this is the case, this BUG_ON will
+never be triggered.
+
+Link: http://lkml.kernel.org/r/20200110032610.26499-1-richardw.yang@linux.intel.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+
+Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2561,7 +2561,7 @@ int split_huge_page_to_list(struct page
+ unsigned long flags;
+ pgoff_t end;
+
+- VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
++ VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+
--- /dev/null
+From f42f25526502d851d0e3ca1e46297da8aafce8a7 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Thu, 30 Jan 2020 22:14:48 -0800
+Subject: mm, thp: fix defrag setting if newline is not used
+
+From: David Rientjes <rientjes@google.com>
+
+commit f42f25526502d851d0e3ca1e46297da8aafce8a7 upstream.
+
+If thp defrag setting "defer" is used and a newline is *not* used when
+writing to the sysfs file, this is interpreted as the "defer+madvise"
+option.
+
+This is because we do prefix matching and if five characters are written
+without a newline, the current code ends up comparing to the first five
+bytes of the "defer+madvise" option and using that instead.
+
+Use the more appropriate sysfs_streq() that handles the trailing newline
+for us. Since this doubles as a nice cleanup, do it in enabled_store()
+as well.
+
+The current implementation relies on prefix matching: the number of
+bytes compared is either the number of bytes written or the length of
+the option being compared. With a newline, "defer\n" does not match
+"defer+"madvise"; without a newline, however, "defer" is considered to
+match "defer+madvise" (prefix matching is only comparing the first five
+bytes). End result is that writing "defer" is broken unless it has an
+additional trailing character.
+
+This means that writing "madv" in the past would match and set
+"madvise". With strict checking, that no longer is the case but it is
+unlikely anybody is currently doing this.
+
+Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2001171411020.56385@chino.kir.corp.google.com
+Fixes: 21440d7eb904 ("mm, thp: add new defer+madvise defrag option")
+Signed-off-by: David Rientjes <rientjes@google.com>
+Suggested-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 24 ++++++++----------------
+ 1 file changed, 8 insertions(+), 16 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -163,16 +163,13 @@ static ssize_t enabled_store(struct kobj
+ {
+ ssize_t ret = count;
+
+- if (!memcmp("always", buf,
+- min(sizeof("always")-1, count))) {
++ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("madvise", buf,
+- min(sizeof("madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("never", buf,
+- min(sizeof("never")-1, count))) {
++ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else
+@@ -236,32 +233,27 @@ static ssize_t defrag_store(struct kobje
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+- if (!memcmp("always", buf,
+- min(sizeof("always")-1, count))) {
++ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("defer+madvise", buf,
+- min(sizeof("defer+madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "defer+madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("defer", buf,
+- min(sizeof("defer")-1, count))) {
++ } else if (sysfs_streq(buf, "defer")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("madvise", buf,
+- min(sizeof("madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("never", buf,
+- min(sizeof("never")-1, count))) {
++ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
--- /dev/null
+From 38228e8848cd7dd86ccb90406af32de0cad24be3 Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Tue, 3 Dec 2019 14:31:11 -0500
+Subject: padata: always acquire cpu_hotplug_lock before pinst->lock
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 38228e8848cd7dd86ccb90406af32de0cad24be3 upstream.
+
+lockdep complains when padata's paths to update cpumasks via CPU hotplug
+and sysfs are both taken:
+
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ # echo ff > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 5.4.0-rc8-padata-cpuhp-v3+ #1 Not tainted
+ ------------------------------------------------------
+ bash/205 is trying to acquire lock:
+ ffffffff8286bcd0 (cpu_hotplug_lock.rw_sem){++++}, at: padata_set_cpumask+0x2b/0x120
+
+ but task is already holding lock:
+ ffff8880001abfa0 (&pinst->lock){+.+.}, at: padata_set_cpumask+0x26/0x120
+
+ which lock already depends on the new lock.
+
+padata doesn't take cpu_hotplug_lock and pinst->lock in a consistent
+order. Which should be first? CPU hotplug calls into padata with
+cpu_hotplug_lock already held, so it should have priority.
+
+Fixes: 6751fb3c0e0c ("padata: Use get_online_cpus/put_online_cpus")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Eric Biggers <ebiggers@kernel.org>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/padata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -605,8 +605,8 @@ int padata_set_cpumask(struct padata_ins
+ struct cpumask *serial_mask, *parallel_mask;
+ int err = -EINVAL;
+
+- mutex_lock(&pinst->lock);
+ get_online_cpus();
++ mutex_lock(&pinst->lock);
+
+ switch (cpumask_type) {
+ case PADATA_CPU_PARALLEL:
+@@ -624,8 +624,8 @@ int padata_set_cpumask(struct padata_ins
+ err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
+
+ out:
+- put_online_cpus();
+ mutex_unlock(&pinst->lock);
++ put_online_cpus();
+
+ return err;
+ }
--- /dev/null
+From 3f7774033e6820d25beee5cf7aefa11d4968b951 Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Mon, 16 Dec 2019 13:22:33 -0300
+Subject: perf hists browser: Restore ESC as "Zoom out" of DSO/thread/etc
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 3f7774033e6820d25beee5cf7aefa11d4968b951 upstream.
+
+We need to set actions->ms.map since 599a2f38a989 ("perf hists browser:
+Check sort keys before hot key actions"), as in that patch we bail out
+if map is NULL.
+
+Reviewed-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Fixes: 599a2f38a989 ("perf hists browser: Check sort keys before hot key actions")
+Link: https://lkml.kernel.org/n/tip-wp1ssoewy6zihwwexqpohv0j@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/ui/browsers/hists.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -3142,6 +3142,7 @@ static int perf_evsel__hists_browse(stru
+
+ continue;
+ }
++ actions->ms.map = map;
+ top = pstack__peek(browser->pstack);
+ if (top == &browser->hists->dso_filter) {
+ /*
kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch
kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
tuntap-correctly-set-sockwq_async_nospace.patch
+drivers-net-xgene-fix-the-order-of-the-arguments-of-alloc_etherdev_mqs.patch
+kprobes-set-unoptimized-flag-after-unoptimizing-code.patch
+perf-hists-browser-restore-esc-as-zoom-out-of-dso-thread-etc.patch
+padata-always-acquire-cpu_hotplug_lock-before-pinst-lock.patch
+mm-huge_memory.c-use-head-to-check-huge-zero-page.patch
+mm-thp-fix-defrag-setting-if-newline-is-not-used.patch