--- /dev/null
+From 5a44c71ccda60a50073c5d7fe3f694cdfa3ab0c2 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sun, 26 Jan 2020 11:44:29 +0100
+Subject: drivers: net: xgene: Fix the order of the arguments of 'alloc_etherdev_mqs()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 5a44c71ccda60a50073c5d7fe3f694cdfa3ab0c2 upstream.
+
+'alloc_etherdev_mqs()' expects first 'tx', then 'rx'. The semantic here
+looks reversed.
+
+Reorder the arguments passed to 'alloc_etherdev_mqs()' in order to keep
+the correct semantic.
+
+In fact, this is a no-op because both XGENE_NUM_[RT]X_RING are 8.
+
+Fixes: 107dec2749fe ("drivers: net: xgene: Add support for multiple queues")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -2034,7 +2034,7 @@ static int xgene_enet_probe(struct platf
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
+- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
++ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
+ if (!ndev)
+ return -ENOMEM;
+
--- /dev/null
+From f66c0447cca1281116224d474cdb37d6a18e4b5b Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Wed, 27 Nov 2019 14:57:04 +0900
+Subject: kprobes: Set unoptimized flag after unoptimizing code
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit f66c0447cca1281116224d474cdb37d6a18e4b5b upstream.
+
+Set the unoptimized flag after confirming the code is completely
+unoptimized. Without this fix, when a kprobe hits the intermediate
+modified instruction (the first byte is replaced by an INT3, but
+later bytes can still be a jump address operand) while unoptimizing,
+it can return to the middle byte of the modified code, which causes
+an invalid instruction exception in the kernel.
+
+Usually, this is a rare case, but if we put a probe on the function
+call while text patching, it always causes a kernel panic as below:
+
+ # echo p text_poke+5 > kprobe_events
+ # echo 1 > events/kprobes/enable
+ # echo 0 > events/kprobes/enable
+
+invalid opcode: 0000 [#1] PREEMPT SMP PTI
+ RIP: 0010:text_poke+0x9/0x50
+ Call Trace:
+ arch_unoptimize_kprobe+0x22/0x28
+ arch_unoptimize_kprobes+0x39/0x87
+ kprobe_optimizer+0x6e/0x290
+ process_one_work+0x2a0/0x610
+ worker_thread+0x28/0x3d0
+ ? process_one_work+0x610/0x610
+ kthread+0x10d/0x130
+ ? kthread_park+0x80/0x80
+ ret_from_fork+0x3a/0x50
+
+text_poke() is used for patching the code in optprobes.
+
+This can happen even if we blacklist text_poke() and other functions,
+because there is a small time window during which we show the intermediate
+code to other CPUs.
+
+ [ mingo: Edited the changelog. ]
+
+Tested-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bristot@redhat.com
+Fixes: 6274de4984a6 ("kprobes: Support delayed unoptimizing")
+Link: https://lkml.kernel.org/r/157483422375.25881.13508326028469515760.stgit@devnote2
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/kprobes.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ /* Loop free_list for disarming */
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
++ /* Switching from detour code to origin */
++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ /* Disarm probes if marked disabled */
+ if (kprobe_disabled(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(stru
+ {
+ lockdep_assert_cpus_held();
+ arch_unoptimize_kprobe(op);
++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ if (kprobe_disabled(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+ }
+@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kpr
+ return;
+ }
+
+- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ if (!list_empty(&op->list)) {
+ /* Dequeue from the optimization queue */
+ list_del_init(&op->list);
--- /dev/null
+From 208050dac5ef4de5cb83ffcafa78499c94d0b5ad Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 18 Dec 2019 13:55:06 -0800
+Subject: KVM: x86: Remove spurious clearing of async #PF MSR
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 208050dac5ef4de5cb83ffcafa78499c94d0b5ad upstream.
+
+Remove a bogus clearing of apf.msr_val from kvm_arch_vcpu_destroy().
+
+apf.msr_val is only set to a non-zero value by kvm_pv_enable_async_pf(),
+which is only reachable by kvm_set_msr_common(), i.e. by writing
+MSR_KVM_ASYNC_PF_EN. KVM does not autonomously write said MSR, i.e.
+can only be written via KVM_SET_MSRS or KVM_RUN. Since KVM_SET_MSRS and
+KVM_RUN are vcpu ioctls, they require a valid vcpu file descriptor.
+kvm_arch_vcpu_destroy() is only called if KVM_CREATE_VCPU fails, and KVM
+declares KVM_CREATE_VCPU successful once the vcpu fd is installed and
+thus visible to userspace. Ergo, apf.msr_val cannot be non-zero when
+kvm_arch_vcpu_destroy() is called.
+
+Fixes: 344d9588a9df0 ("KVM: Add PV MSR to enable asynchronous page faults delivery.")
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8693,8 +8693,6 @@ void kvm_arch_vcpu_postcreate(struct kvm
+
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.apf.msr_val = 0;
+-
+ kvm_arch_vcpu_free(vcpu);
+ }
+
--- /dev/null
+From 9d979c7e6ff43ca3200ffcb74f57415fd633a2da Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 18 Dec 2019 13:55:05 -0800
+Subject: KVM: x86: Remove spurious kvm_mmu_unload() from vcpu destruction path
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 9d979c7e6ff43ca3200ffcb74f57415fd633a2da upstream.
+
+x86 does not load its MMU until KVM_RUN, which cannot be invoked until
+after vCPU creation succeeds. Given that kvm_arch_vcpu_destroy() is
+called if and only if vCPU creation fails, it is impossible for the MMU
+to be loaded.
+
+Note, the bogus kvm_mmu_unload() call was added during an unrelated
+refactoring of vCPU allocation, i.e. was presumably added as an
+opportunstic "fix" for a perceived leak.
+
+Fixes: fb3f0f51d92d1 ("KVM: Dynamically allocate vcpus")
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8695,10 +8695,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vc
+ {
+ vcpu->arch.apf.msr_val = 0;
+
+- vcpu_load(vcpu);
+- kvm_mmu_unload(vcpu);
+- vcpu_put(vcpu);
+-
+ kvm_arch_vcpu_free(vcpu);
+ }
+
--- /dev/null
+From cb829624867b5ab10bc6a7036d183b1b82bfe9f8 Mon Sep 17 00:00:00 2001
+From: Wei Yang <richardw.yang@linux.intel.com>
+Date: Thu, 30 Jan 2020 22:14:29 -0800
+Subject: mm/huge_memory.c: use head to check huge zero page
+
+From: Wei Yang <richardw.yang@linux.intel.com>
+
+commit cb829624867b5ab10bc6a7036d183b1b82bfe9f8 upstream.
+
+The page could be a tail page, if this is the case, this BUG_ON will
+never be triggered.
+
+Link: http://lkml.kernel.org/r/20200110032610.26499-1-richardw.yang@linux.intel.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+
+Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2661,7 +2661,7 @@ int split_huge_page_to_list(struct page
+ unsigned long flags;
+ pgoff_t end;
+
+- VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
++ VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+
--- /dev/null
+From f42f25526502d851d0e3ca1e46297da8aafce8a7 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Thu, 30 Jan 2020 22:14:48 -0800
+Subject: mm, thp: fix defrag setting if newline is not used
+
+From: David Rientjes <rientjes@google.com>
+
+commit f42f25526502d851d0e3ca1e46297da8aafce8a7 upstream.
+
+If thp defrag setting "defer" is used and a newline is *not* used when
+writing to the sysfs file, this is interpreted as the "defer+madvise"
+option.
+
+This is because we do prefix matching and if five characters are written
+without a newline, the current code ends up comparing to the first five
+bytes of the "defer+madvise" option and using that instead.
+
+Use the more appropriate sysfs_streq() that handles the trailing newline
+for us. Since this doubles as a nice cleanup, do it in enabled_store()
+as well.
+
+The current implementation relies on prefix matching: the number of
+bytes compared is either the number of bytes written or the length of
+the option being compared. With a newline, "defer\n" does not match
+"defer+"madvise"; without a newline, however, "defer" is considered to
+match "defer+madvise" (prefix matching is only comparing the first five
+bytes). End result is that writing "defer" is broken unless it has an
+additional trailing character.
+
+This means that writing "madv" in the past would match and set
+"madvise". With strict checking, that no longer is the case but it is
+unlikely anybody is currently doing this.
+
+Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2001171411020.56385@chino.kir.corp.google.com
+Fixes: 21440d7eb904 ("mm, thp: add new defer+madvise defrag option")
+Signed-off-by: David Rientjes <rientjes@google.com>
+Suggested-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 24 ++++++++----------------
+ 1 file changed, 8 insertions(+), 16 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -173,16 +173,13 @@ static ssize_t enabled_store(struct kobj
+ {
+ ssize_t ret = count;
+
+- if (!memcmp("always", buf,
+- min(sizeof("always")-1, count))) {
++ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("madvise", buf,
+- min(sizeof("madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("never", buf,
+- min(sizeof("never")-1, count))) {
++ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ } else
+@@ -246,32 +243,27 @@ static ssize_t defrag_store(struct kobje
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+ {
+- if (!memcmp("always", buf,
+- min(sizeof("always")-1, count))) {
++ if (sysfs_streq(buf, "always")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("defer+madvise", buf,
+- min(sizeof("defer+madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "defer+madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("defer", buf,
+- min(sizeof("defer")-1, count))) {
++ } else if (sysfs_streq(buf, "defer")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("madvise", buf,
+- min(sizeof("madvise")-1, count))) {
++ } else if (sysfs_streq(buf, "madvise")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+- } else if (!memcmp("never", buf,
+- min(sizeof("never")-1, count))) {
++ } else if (sysfs_streq(buf, "never")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
--- /dev/null
+From 78e06cf430934fc3768c342cbebdd1013dcd6fa7 Mon Sep 17 00:00:00 2001
+From: Matteo Croce <mcroce@redhat.com>
+Date: Thu, 30 Jan 2020 20:10:19 +0100
+Subject: netfilter: nf_flowtable: fix documentation
+
+From: Matteo Croce <mcroce@redhat.com>
+
+commit 78e06cf430934fc3768c342cbebdd1013dcd6fa7 upstream.
+
+In the flowtable documentation there is a missing semicolon, the command
+as is would give this error:
+
+ nftables.conf:5:27-33: Error: syntax error, unexpected devices, expecting newline or semicolon
+ hook ingress priority 0 devices = { br0, pppoe-data };
+ ^^^^^^^
+ nftables.conf:4:12-13: Error: invalid hook (null)
+ flowtable ft {
+ ^^
+
+Fixes: 19b351f16fd9 ("netfilter: add flowtable documentation")
+Signed-off-by: Matteo Croce <mcroce@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/networking/nf_flowtable.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/networking/nf_flowtable.txt
++++ b/Documentation/networking/nf_flowtable.txt
+@@ -76,7 +76,7 @@ flowtable and add one rule to your forwa
+
+ table inet x {
+ flowtable f {
+- hook ingress priority 0 devices = { eth0, eth1 };
++ hook ingress priority 0; devices = { eth0, eth1 };
+ }
+ chain y {
+ type filter hook forward priority 0; policy accept;
--- /dev/null
+From cf3e204a1ca5442190018a317d9ec181b4639bd6 Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Fri, 13 Dec 2019 16:53:05 +0800
+Subject: netfilter: nft_tunnel: no need to call htons() when dumping ports
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit cf3e204a1ca5442190018a317d9ec181b4639bd6 upstream.
+
+info->key.tp_src and tp_dst are __be16, when using nla_put_be16()
+to dump them, htons() is not needed, so remove it in this patch.
+
+Fixes: af308b94a2a4 ("netfilter: nf_tables: add tunnel support")
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Reviewed-by: Simon Horman <simon.horman@netronome.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/nft_tunnel.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -467,8 +467,8 @@ static int nft_tunnel_opts_dump(struct s
+ static int nft_tunnel_ports_dump(struct sk_buff *skb,
+ struct ip_tunnel_info *info)
+ {
+- if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
+- nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
++ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
++ nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
+ return -1;
+
+ return 0;
--- /dev/null
+From 38228e8848cd7dd86ccb90406af32de0cad24be3 Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Tue, 3 Dec 2019 14:31:11 -0500
+Subject: padata: always acquire cpu_hotplug_lock before pinst->lock
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit 38228e8848cd7dd86ccb90406af32de0cad24be3 upstream.
+
+lockdep complains when padata's paths to update cpumasks via CPU hotplug
+and sysfs are both taken:
+
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ # echo ff > /sys/kernel/pcrypt/pencrypt/parallel_cpumask
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 5.4.0-rc8-padata-cpuhp-v3+ #1 Not tainted
+ ------------------------------------------------------
+ bash/205 is trying to acquire lock:
+ ffffffff8286bcd0 (cpu_hotplug_lock.rw_sem){++++}, at: padata_set_cpumask+0x2b/0x120
+
+ but task is already holding lock:
+ ffff8880001abfa0 (&pinst->lock){+.+.}, at: padata_set_cpumask+0x26/0x120
+
+ which lock already depends on the new lock.
+
+padata doesn't take cpu_hotplug_lock and pinst->lock in a consistent
+order. Which should be first? CPU hotplug calls into padata with
+cpu_hotplug_lock already held, so it should have priority.
+
+Fixes: 6751fb3c0e0c ("padata: Use get_online_cpus/put_online_cpus")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: Eric Biggers <ebiggers@kernel.org>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/padata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -671,8 +671,8 @@ int padata_set_cpumask(struct padata_ins
+ struct cpumask *serial_mask, *parallel_mask;
+ int err = -EINVAL;
+
+- mutex_lock(&pinst->lock);
+ get_online_cpus();
++ mutex_lock(&pinst->lock);
+
+ switch (cpumask_type) {
+ case PADATA_CPU_PARALLEL:
+@@ -690,8 +690,8 @@ int padata_set_cpumask(struct padata_ins
+ err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
+
+ out:
+- put_online_cpus();
+ mutex_unlock(&pinst->lock);
++ put_online_cpus();
+
+ return err;
+ }
--- /dev/null
+From 3f7774033e6820d25beee5cf7aefa11d4968b951 Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Mon, 16 Dec 2019 13:22:33 -0300
+Subject: perf hists browser: Restore ESC as "Zoom out" of DSO/thread/etc
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 3f7774033e6820d25beee5cf7aefa11d4968b951 upstream.
+
+We need to set actions->ms.map since 599a2f38a989 ("perf hists browser:
+Check sort keys before hot key actions"), as in that patch we bail out
+if map is NULL.
+
+Reviewed-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Fixes: 599a2f38a989 ("perf hists browser: Check sort keys before hot key actions")
+Link: https://lkml.kernel.org/n/tip-wp1ssoewy6zihwwexqpohv0j@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/ui/browsers/hists.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2931,6 +2931,7 @@ static int perf_evsel__hists_browse(stru
+
+ continue;
+ }
++ actions->ms.map = map;
+ top = pstack__peek(browser->pstack);
+ if (top == &browser->hists->dso_filter) {
+ /*
--- /dev/null
+From c7cb3a1dd53f63c64fb2b567d0be130b92a44d91 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Mon, 11 Nov 2019 10:03:56 +0100
+Subject: pwm: omap-dmtimer: put_device() after of_find_device_by_node()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+commit c7cb3a1dd53f63c64fb2b567d0be130b92a44d91 upstream.
+
+This was found by coccicheck:
+
+ drivers/pwm/pwm-omap-dmtimer.c:304:2-8: ERROR: missing put_device;
+ call of_find_device_by_node on line 255, but without a corresponding
+ object release within this function.
+
+Reported-by: Markus Elfring <elfring@users.sourceforge.net>
+Fixes: 6604c6556db9 ("pwm: Add PWM driver for OMAP using dual-mode timers")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pwm/pwm-omap-dmtimer.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -259,7 +259,7 @@ static int pwm_omap_dmtimer_probe(struct
+ if (!timer_pdev) {
+ dev_err(&pdev->dev, "Unable to find Timer pdev\n");
+ ret = -ENODEV;
+- goto put;
++ goto err_find_timer_pdev;
+ }
+
+ timer_pdata = dev_get_platdata(&timer_pdev->dev);
+@@ -267,7 +267,7 @@ static int pwm_omap_dmtimer_probe(struct
+ dev_dbg(&pdev->dev,
+ "dmtimer pdata structure NULL, deferring probe\n");
+ ret = -EPROBE_DEFER;
+- goto put;
++ goto err_platdata;
+ }
+
+ pdata = timer_pdata->timer_ops;
+@@ -286,19 +286,19 @@ static int pwm_omap_dmtimer_probe(struct
+ !pdata->write_counter) {
+ dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
+ ret = -EINVAL;
+- goto put;
++ goto err_platdata;
+ }
+
+ if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
+ dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
+ ret = -ENODEV;
+- goto put;
++ goto err_timer_property;
+ }
+
+ dm_timer = pdata->request_by_node(timer);
+ if (!dm_timer) {
+ ret = -EPROBE_DEFER;
+- goto put;
++ goto err_request_timer;
+ }
+
+ omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
+@@ -355,7 +355,14 @@ err_pwmchip_add:
+ err_alloc_omap:
+
+ pdata->free(dm_timer);
+-put:
++err_request_timer:
++
++err_timer_property:
++err_platdata:
++
++ put_device(&timer_pdev->dev);
++err_find_timer_pdev:
++
+ of_node_put(timer);
+
+ return ret;
+@@ -375,6 +382,8 @@ static int pwm_omap_dmtimer_remove(struc
+
+ omap->pdata->free(omap->dm_timer);
+
++ put_device(&omap->dm_timer_pdev->dev);
++
+ mutex_destroy(&omap->mutex);
+
+ return 0;
sched-fair-fix-o-nr_cgroups-in-the-load-balancing-path.patch
perf-stat-use-perf_evsel__is_clocki-for-clock-events.patch
perf-stat-fix-shadow-stats-for-clock-events.patch
+drivers-net-xgene-fix-the-order-of-the-arguments-of-alloc_etherdev_mqs.patch
+kprobes-set-unoptimized-flag-after-unoptimizing-code.patch
+pwm-omap-dmtimer-put_device-after-of_find_device_by_node.patch
+perf-hists-browser-restore-esc-as-zoom-out-of-dso-thread-etc.patch
+kvm-x86-remove-spurious-kvm_mmu_unload-from-vcpu-destruction-path.patch
+kvm-x86-remove-spurious-clearing-of-async-pf-msr.patch
+thermal-brcmstb_thermal-do-not-use-dt-coefficients.patch
+netfilter-nft_tunnel-no-need-to-call-htons-when-dumping-ports.patch
+netfilter-nf_flowtable-fix-documentation.patch
+padata-always-acquire-cpu_hotplug_lock-before-pinst-lock.patch
+mm-huge_memory.c-use-head-to-check-huge-zero-page.patch
+mm-thp-fix-defrag-setting-if-newline-is-not-used.patch
--- /dev/null
+From e1ff6fc22f19e2af8adbad618526b80067911d40 Mon Sep 17 00:00:00 2001
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Tue, 14 Jan 2020 11:06:02 -0800
+Subject: thermal: brcmstb_thermal: Do not use DT coefficients
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+commit e1ff6fc22f19e2af8adbad618526b80067911d40 upstream.
+
+At the time the brcmstb_thermal driver and its binding were merged, the
+DT binding did not make the coefficients properties a mandatory one,
+therefore all users of the brcmstb_thermal driver out there have a non
+functional implementation with zero coefficients. Even if these
+properties were provided, the formula used for computation is incorrect.
+
+The coefficients are entirely process specific (right now, only 28nm is
+supported) and not board or SoC specific, it is therefore appropriate to
+hard code them in the driver given the compatibility string we are
+probed with which has to be updated whenever a new process is
+introduced.
+
+We remove the existing coefficients definition since subsequent patches
+are going to add support for a new process and will introduce new
+coefficients as well.
+
+Fixes: 9e03cf1b2dd5 ("thermal: add brcmstb AVS TMON driver")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Amit Kucheria <amit.kucheria@linaro.org>
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Link: https://lore.kernel.org/r/20200114190607.29339-2-f.fainelli@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/thermal/broadcom/brcmstb_thermal.c | 31 ++++++++---------------------
+ 1 file changed, 9 insertions(+), 22 deletions(-)
+
+--- a/drivers/thermal/broadcom/brcmstb_thermal.c
++++ b/drivers/thermal/broadcom/brcmstb_thermal.c
+@@ -58,7 +58,7 @@
+ #define AVS_TMON_TP_TEST_ENABLE 0x20
+
+ /* Default coefficients */
+-#define AVS_TMON_TEMP_SLOPE -487
++#define AVS_TMON_TEMP_SLOPE 487
+ #define AVS_TMON_TEMP_OFFSET 410040
+
+ /* HW related temperature constants */
+@@ -117,23 +117,12 @@ struct brcmstb_thermal_priv {
+ struct thermal_zone_device *thermal;
+ };
+
+-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
+- int *offset)
+-{
+- *slope = thermal_zone_get_slope(tz);
+- *offset = thermal_zone_get_offset(tz);
+-}
+-
+ /* Convert a HW code to a temperature reading (millidegree celsius) */
+ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+ u32 code)
+ {
+- const int val = code & AVS_TMON_TEMP_MASK;
+- int slope, offset;
+-
+- avs_tmon_get_coeffs(tz, &slope, &offset);
+-
+- return slope * val + offset;
++ return (AVS_TMON_TEMP_OFFSET -
++ (int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE));
+ }
+
+ /*
+@@ -145,20 +134,18 @@ static inline int avs_tmon_code_to_temp(
+ static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+ int temp, bool low)
+ {
+- int slope, offset;
+-
+ if (temp < AVS_TMON_TEMP_MIN)
+- return AVS_TMON_TEMP_MAX; /* Maximum code value */
+-
+- avs_tmon_get_coeffs(tz, &slope, &offset);
++ return AVS_TMON_TEMP_MAX; /* Maximum code value */
+
+- if (temp >= offset)
++ if (temp >= AVS_TMON_TEMP_OFFSET)
+ return 0; /* Minimum code value */
+
+ if (low)
+- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
++ return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp,
++ AVS_TMON_TEMP_SLOPE));
+ else
+- return (u32)((offset - temp) / abs(slope));
++ return (u32)((AVS_TMON_TEMP_OFFSET - temp) /
++ AVS_TMON_TEMP_SLOPE);
+ }
+
+ static int brcmstb_get_temp(void *data, int *temp)