--- /dev/null
+From 9e25ebfe56ece7541cd10a20d715cbdd148a2e06 Mon Sep 17 00:00:00 2001
+From: Doug Berger <opendmb@gmail.com>
+Date: Thu, 29 Jun 2017 18:41:36 +0100
+Subject: ARM: 8685/1: ensure memblock-limit is pmd-aligned
+
+From: Doug Berger <opendmb@gmail.com>
+
+commit 9e25ebfe56ece7541cd10a20d715cbdd148a2e06 upstream.
+
+The pmd containing memblock_limit is cleared by prepare_page_table()
+which creates the opportunity for early_alloc() to allocate unmapped
+memory if memblock_limit is not pmd aligned causing a boot-time hang.
+
+Commit 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM")
+attempted to resolve this problem, but there is a path through the
+adjust_lowmem_bounds() routine where if all memory regions start and
+end on pmd-aligned addresses the memblock_limit will be set to
+arm_lowmem_limit.
+
+Since arm_lowmem_limit can be affected by the vmalloc early parameter,
+the value of arm_lowmem_limit may not be pmd-aligned. This commit
+corrects this oversight such that memblock_limit is always rounded
+down to pmd-alignment.
+
+Fixes: 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1216,15 +1216,15 @@ void __init adjust_lowmem_bounds(void)
+
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+
++ if (!memblock_limit)
++ memblock_limit = arm_lowmem_limit;
++
+ /*
+ * Round the memblock limit down to a pmd size. This
+ * helps to ensure that we will allocate memory from the
+ * last full pmd, which should be mapped.
+ */
+- if (memblock_limit)
+- memblock_limit = round_down(memblock_limit, PMD_SIZE);
+- if (!memblock_limit)
+- memblock_limit = arm_lowmem_limit;
++ memblock_limit = round_down(memblock_limit, PMD_SIZE);
+
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
+ if (memblock_end_of_DRAM() > arm_lowmem_limit) {
--- /dev/null
+From 95d7c1f18bf8ac03b0fc48eac1f1b11f867765b8 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 13 May 2017 13:40:20 +0200
+Subject: ARM: davinci: PM: Do not free useful resources in normal path in 'davinci_pm_init'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 95d7c1f18bf8ac03b0fc48eac1f1b11f867765b8 upstream.
+
+It is wrong to iounmap resources in the normal path of davinci_pm_init()
+
+The 3 ioremap'ed fields of 'pm_config' can be accessed later on in other
+functions, so we should return 'success' instead of unrolling everything.
+
+Fixes: aa9aa1ec2df6 ("ARM: davinci: PM: rework init, remove platform device")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+[nsekhar@ti.com: commit message and minor style fixes]
+Signed-off-by: Sekhar Nori <nsekhar@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-davinci/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -163,6 +163,8 @@ int __init davinci_pm_init(void)
+
+ suspend_set_ops(&davinci_pm_ops);
+
++ return 0;
++
+ no_sram_mem:
+ iounmap(pm_config.ddrpsc_reg_base);
+ no_ddrpsc_mem:
--- /dev/null
+From f3f6cc814f9cb61cfb738af2b126a8bf19e5ab4c Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sat, 13 May 2017 13:40:05 +0200
+Subject: ARM: davinci: PM: Free resources in error handling path in 'davinci_pm_init'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit f3f6cc814f9cb61cfb738af2b126a8bf19e5ab4c upstream.
+
+If 'sram_alloc' fails, we need to free already allocated resources.
+
+Fixes: aa9aa1ec2df6 ("ARM: davinci: PM: rework init, remove platform device")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Sekhar Nori <nsekhar@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-davinci/pm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/mach-davinci/pm.c
++++ b/arch/arm/mach-davinci/pm.c
+@@ -154,7 +154,8 @@ int __init davinci_pm_init(void)
+ davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
+ if (!davinci_sram_suspend) {
+ pr_err("PM: cannot allocate SRAM memory\n");
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto no_sram_mem;
+ }
+
+ davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
+@@ -162,6 +163,8 @@ int __init davinci_pm_init(void)
+
+ suspend_set_ops(&davinci_pm_ops);
+
++no_sram_mem:
++ iounmap(pm_config.ddrpsc_reg_base);
+ no_ddrpsc_mem:
+ iounmap(pm_config.ddrpll_reg_base);
+ no_ddrpll_mem:
--- /dev/null
+From cb7cf772d83d2d4e6995c5bb9e0fb59aea8f7080 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Fri, 26 May 2017 17:40:02 +0100
+Subject: ARM64/ACPI: Fix BAD_MADT_GICC_ENTRY() macro implementation
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit cb7cf772d83d2d4e6995c5bb9e0fb59aea8f7080 upstream.
+
+The BAD_MADT_GICC_ENTRY() macro checks if a GICC MADT entry passes
+muster from an ACPI specification standpoint. Current macro detects the
+MADT GICC entry length through ACPI firmware version (it changed from 76
+to 80 bytes in the transition from ACPI 5.1 to ACPI 6.0 specification)
+but always uses (erroneously) the ACPICA (latest) struct (ie struct
+acpi_madt_generic_interrupt - that is 80-bytes long) length to check if
+the current GICC entry memory record exceeds the MADT table end in
+memory as defined by the MADT table header itself, which may result in
+false negatives depending on the ACPI firmware version and how the MADT
+entries are laid out in memory (ie on ACPI 5.1 firmware MADT GICC
+entries are 76 bytes long, so by adding 80 to a GICC entry start address
+in memory the resulting address may well be past the actual MADT end,
+triggering a false negative).
+
+Fix the BAD_MADT_GICC_ENTRY() macro by reshuffling the condition checks
+and update them to always use the firmware version specific MADT GICC
+entry length in order to carry out boundary checks.
+
+Fixes: b6cfb277378e ("ACPI / ARM64: add BAD_MADT_GICC_ENTRY() macro")
+Reported-by: Julien Grall <julien.grall@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: Julien Grall <julien.grall@arm.com>
+Cc: Hanjun Guo <hanjun.guo@linaro.org>
+Cc: Al Stone <ahs3@redhat.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/acpi.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -23,9 +23,9 @@
+ #define ACPI_MADT_GICC_LENGTH \
+ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
+
+-#define BAD_MADT_GICC_ENTRY(entry, end) \
+- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
+- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
++#define BAD_MADT_GICC_ENTRY(entry, end) \
++ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
++ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
+
+ /* Basic configuration for ACPI */
+ #ifdef CONFIG_ACPI
--- /dev/null
+From 717902cc93118119a6fce7765da6cf2786987418 Mon Sep 17 00:00:00 2001
+From: Timmy Li <lixiaoping3@huawei.com>
+Date: Mon, 22 May 2017 16:48:28 +0100
+Subject: ARM64: PCI: Fix struct acpi_pci_root_ops allocation failure path
+
+From: Timmy Li <lixiaoping3@huawei.com>
+
+commit 717902cc93118119a6fce7765da6cf2786987418 upstream.
+
+Commit 093d24a20442 ("arm64: PCI: Manage controller-specific data on
+per-controller basis") added code to allocate ACPI PCI root_ops
+dynamically on a per host bridge basis but failed to update the
+corresponding memory allocation failure path in pci_acpi_scan_root()
+leading to a potential memory leakage.
+
+Fix it by adding the required kfree call.
+
+Fixes: 093d24a20442 ("arm64: PCI: Manage controller-specific data on per-controller basis")
+Reviewed-by: Tomasz Nowicki <tn@semihalf.com>
+Signed-off-by: Timmy Li <lixiaoping3@huawei.com>
+[lorenzo.pieralisi@arm.com: refactored code, rewrote commit log]
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+CC: Will Deacon <will.deacon@arm.com>
+CC: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/pci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/pci.c
++++ b/arch/arm64/kernel/pci.c
+@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struc
+ return NULL;
+
+ root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+- if (!root_ops)
++ if (!root_ops) {
++ kfree(ri);
+ return NULL;
++ }
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
--- /dev/null
+From 8818efaaacb78c60a9d90c5705b6c99b75d7d442 Mon Sep 17 00:00:00 2001
+From: Eric Ren <zren@suse.com>
+Date: Fri, 23 Jun 2017 15:08:55 -0700
+Subject: ocfs2: fix deadlock caused by recursive locking in xattr
+
+From: Eric Ren <zren@suse.com>
+
+commit 8818efaaacb78c60a9d90c5705b6c99b75d7d442 upstream.
+
+Another deadlock path caused by recursive locking is reported. This
+kind of issue was introduced since commit 743b5f1434f5 ("ocfs2: take
+inode lock in ocfs2_iop_set/get_acl()"). Two deadlock paths have been
+fixed by commit b891fa5024a9 ("ocfs2: fix deadlock issue when taking
+inode lock at vfs entry points"). Yes, we intend to fix this kind of
+case in incremental way, because it's hard to find out all possible
+paths at once.
+
+This one can be reproduced like this. On node1, cp a large file from
+home directory to ocfs2 mountpoint. While on node2, run
+setfacl/getfacl. Both nodes will hang up there. The backtraces:
+
+On node1:
+ __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2]
+ ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2]
+ ocfs2_write_begin+0x43/0x1a0 [ocfs2]
+ generic_perform_write+0xa9/0x180
+ __generic_file_write_iter+0x1aa/0x1d0
+ ocfs2_file_write_iter+0x4f4/0xb40 [ocfs2]
+ __vfs_write+0xc3/0x130
+ vfs_write+0xb1/0x1a0
+ SyS_write+0x46/0xa0
+
+On node2:
+ __ocfs2_cluster_lock.isra.39+0x357/0x740 [ocfs2]
+ ocfs2_inode_lock_full_nested+0x17d/0x840 [ocfs2]
+ ocfs2_xattr_set+0x12e/0xe80 [ocfs2]
+ ocfs2_set_acl+0x22d/0x260 [ocfs2]
+ ocfs2_iop_set_acl+0x65/0xb0 [ocfs2]
+ set_posix_acl+0x75/0xb0
+ posix_acl_xattr_set+0x49/0xa0
+ __vfs_setxattr+0x69/0x80
+ __vfs_setxattr_noperm+0x72/0x1a0
+ vfs_setxattr+0xa7/0xb0
+ setxattr+0x12d/0x190
+ path_setxattr+0x9f/0xb0
+ SyS_setxattr+0x14/0x20
+
+Fix this one by using ocfs2_inode_{lock|unlock}_tracker, which is
+exported by commit 439a36b8ef38 ("ocfs2/dlmglue: prepare tracking logic
+to avoid recursive cluster lock").
+
+Link: http://lkml.kernel.org/r/20170622014746.5815-1-zren@suse.com
+Fixes: 743b5f1434f5 ("ocfs2: take inode lock in ocfs2_iop_set/get_acl()")
+Signed-off-by: Eric Ren <zren@suse.com>
+Reported-by: Thomas Voegtle <tv@lio96.de>
+Tested-by: Thomas Voegtle <tv@lio96.de>
+Reviewed-by: Joseph Qi <jiangqi903@gmail.com>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlmglue.c | 4 ++++
+ fs/ocfs2/xattr.c | 23 +++++++++++++----------
+ 2 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct i
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
++ /* had_lock means that the currect process already takes the cluster
++ * lock previously. If had_lock is 1, we have nothing to do here, and
++ * it will get unlocked where we got the lock.
++ */
+ if (!had_lock) {
+ ocfs2_remove_holder(lockres, oh);
+ ocfs2_inode_unlock(inode, ex);
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode
+ void *buffer,
+ size_t buffer_size)
+ {
+- int ret;
++ int ret, had_lock;
+ struct buffer_head *di_bh = NULL;
++ struct ocfs2_lock_holder oh;
+
+- ret = ocfs2_inode_lock(inode, &di_bh, 0);
+- if (ret < 0) {
+- mlog_errno(ret);
+- return ret;
++ had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
++ if (had_lock < 0) {
++ mlog_errno(had_lock);
++ return had_lock;
+ }
+ down_read(&OCFS2_I(inode)->ip_xattr_sem);
+ ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
+ name, buffer, buffer_size);
+ up_read(&OCFS2_I(inode)->ip_xattr_sem);
+
+- ocfs2_inode_unlock(inode, 0);
++ ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
+
+ brelse(di_bh);
+
+@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
+ {
+ struct buffer_head *di_bh = NULL;
+ struct ocfs2_dinode *di;
+- int ret, credits, ref_meta = 0, ref_credits = 0;
++ int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct inode *tl_inode = osb->osb_tl_inode;
+ struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
+ struct ocfs2_refcount_tree *ref_tree = NULL;
++ struct ocfs2_lock_holder oh;
+
+ struct ocfs2_xattr_info xi = {
+ .xi_name_index = name_index,
+@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
+ return -ENOMEM;
+ }
+
+- ret = ocfs2_inode_lock(inode, &di_bh, 1);
+- if (ret < 0) {
++ had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
++ if (had_lock < 0) {
++ ret = had_lock;
+ mlog_errno(ret);
+ goto cleanup_nolock;
+ }
+@@ -3670,7 +3673,7 @@ cleanup:
+ if (ret)
+ mlog_errno(ret);
+ }
+- ocfs2_inode_unlock(inode, 1);
++ ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
+ cleanup_nolock:
+ brelse(di_bh);
+ brelse(xbs.xattr_bh);
--- /dev/null
+From 33496c3c3d7b88dcbe5e55aa01288b05646c6aca Mon Sep 17 00:00:00 2001
+From: Junxiao Bi <junxiao.bi@oracle.com>
+Date: Wed, 3 May 2017 14:51:41 -0700
+Subject: ocfs2: o2hb: revert hb threshold to keep compatible
+
+From: Junxiao Bi <junxiao.bi@oracle.com>
+
+commit 33496c3c3d7b88dcbe5e55aa01288b05646c6aca upstream.
+
+Configfs is the interface for ocfs2-tools to set configure to kernel and
+$configfs_dir/cluster/$clustername/heartbeat/dead_threshold is the one
+used to configure heartbeat dead threshold. Kernel has a default value
+of it but user can set O2CB_HEARTBEAT_THRESHOLD in /etc/sysconfig/o2cb
+to override it.
+
+Commit 45b997737a80 ("ocfs2/cluster: use per-attribute show and store
+methods") changed heartbeat dead threshold name while ocfs2-tools did
+not, so ocfs2-tools won't set this configurable and the default value is
+always used. So revert it.
+
+Fixes: 45b997737a80 ("ocfs2/cluster: use per-attribute show and store methods")
+Link: http://lkml.kernel.org/r/1490665245-15374-1-git-send-email-junxiao.bi@oracle.com
+Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
+Acked-by: Joseph Qi <jiangqi903@gmail.com>
+Cc: Mark Fasheh <mfasheh@versity.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/cluster/heartbeat.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -2242,13 +2242,13 @@ unlock:
+ spin_unlock(&o2hb_live_lock);
+ }
+
+-static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
++static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
+ char *page)
+ {
+ return sprintf(page, "%u\n", o2hb_dead_threshold);
+ }
+
+-static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
++static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
+ const char *page, size_t count)
+ {
+ unsigned long tmp;
+@@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode
+
+ }
+
+-CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
++CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
+ CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
+
+ static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
+- &o2hb_heartbeat_group_attr_threshold,
++ &o2hb_heartbeat_group_attr_dead_threshold,
+ &o2hb_heartbeat_group_attr_mode,
+ NULL,
+ };
--- /dev/null
+From fd583ad1563bec5f00140e1f2444adbcd331caad Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@intel.com>
+Date: Tue, 4 Apr 2017 15:14:06 -0400
+Subject: perf/x86: Fix spurious NMI with PEBS Load Latency event
+
+From: Kan Liang <kan.liang@intel.com>
+
+commit fd583ad1563bec5f00140e1f2444adbcd331caad upstream.
+
+Spurious NMIs will be observed with the following command:
+
+ while :; do
+ perf record -bae "cpu/umask=0x01,event=0xcd,ldlat=0x80/pp"
+ -e "cpu/umask=0x03,event=0x0/"
+ -e "cpu/umask=0x02,event=0x0/"
+ -e cycles,branches,cache-misses
+ -e cache-references -- sleep 10
+ done
+
+The bug was introduced by commit:
+
+ 8077eca079a2 ("perf/x86/pebs: Add workaround for broken OVFL status on HSW+")
+
+That commit clears the status bits for the counters used for PEBS
+events, by masking the whole 64 bits pebs_enabled. However, only the
+low 32 bits of both status and pebs_enabled are reserved for PEBS-able
+counters.
+
+For status bits 32-34 are fixed counter overflow bits. For
+pebs_enabled bits 32-34 are for PEBS Load Latency.
+
+In the test case, the PEBS Load Latency event and fixed counter event
+could overflow at the same time. The fixed counter overflow bit will
+be cleared by mistake. Once it is cleared, the fixed counter overflow
+never be processed, which finally trigger spurious NMI.
+
+Correct the PEBS enabled mask by ignoring the non-PEBS bits.
+
+Signed-off-by: Kan Liang <kan.liang@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Fixes: 8077eca079a2 ("perf/x86/pebs: Add workaround for broken OVFL status on HSW+")
+Link: http://lkml.kernel.org/r/1491333246-3965-1-git-send-email-kan.liang@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c | 2 +-
+ arch/x86/events/intel/ds.c | 2 +-
+ arch/x86/events/perf_event.h | 1 +
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2130,7 +2130,7 @@ again:
+ * counters from the GLOBAL_STATUS mask and we always process PEBS
+ * events via drain_pebs().
+ */
+- status &= ~cpuc->pebs_enabled;
++ status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+
+ /*
+ * PEBS overflow sets bit 62 in the global status register
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1222,7 +1222,7 @@ get_next_pebs_record_by_bit(void *base,
+
+ /* clear non-PEBS bit and re-check */
+ pebs_status = p->status & cpuc->pebs_enabled;
+- pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1;
++ pebs_status &= PEBS_COUNTER_MASK;
+ if (pebs_status == (1 << bit))
+ return at;
+ }
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -79,6 +79,7 @@ struct amd_nb {
+
+ /* The maximal number of PEBS events: */
+ #define MAX_PEBS_EVENTS 8
++#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
+
+ /*
+ * Flags PEBS can handle without an PMI.
--- /dev/null
+From ebd574994c63164d538a197172157318f58ac647 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 23 May 2017 10:37:29 -0500
+Subject: Revert "x86/entry: Fix the end of the stack for newly forked tasks"
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit ebd574994c63164d538a197172157318f58ac647 upstream.
+
+Petr Mladek reported the following warning when loading the livepatch
+sample module:
+
+ WARNING: CPU: 1 PID: 3699 at arch/x86/kernel/stacktrace.c:132 save_stack_trace_tsk_reliable+0x133/0x1a0
+ ...
+ Call Trace:
+ __schedule+0x273/0x820
+ schedule+0x36/0x80
+ kthreadd+0x305/0x310
+ ? kthread_create_on_cpu+0x80/0x80
+ ? icmp_echo.part.32+0x50/0x50
+ ret_from_fork+0x2c/0x40
+
+That warning means the end of the stack is no longer recognized as such
+for newly forked tasks. The problem was introduced with the following
+commit:
+
+ ff3f7e2475bb ("x86/entry: Fix the end of the stack for newly forked tasks")
+
+... which was completely misguided. It only partially fixed the
+reported issue, and it introduced another bug in the process. None of
+the other entry code saves the frame pointer before calling into C code,
+so it doesn't make sense for ret_from_fork to do so either.
+
+Contrary to what I originally thought, the original issue wasn't related
+to newly forked tasks. It was actually related to ftrace. When entry
+code calls into a function which then calls into an ftrace handler, the
+stack frame looks different than normal.
+
+The original issue will be fixed in the unwinder, in a subsequent patch.
+
+Reported-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: live-patching@vger.kernel.org
+Fixes: ff3f7e2475bb ("x86/entry: Fix the end of the stack for newly forked tasks")
+Link: http://lkml.kernel.org/r/f350760f7e82f0750c8d1dd093456eb212751caa.1495553739.git.jpoimboe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/entry_32.S | 30 +++++++++++++++++++-----------
+ arch/x86/entry/entry_64.S | 11 ++++-------
+ 2 files changed, 23 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -255,6 +255,23 @@ ENTRY(__switch_to_asm)
+ END(__switch_to_asm)
+
+ /*
++ * The unwinder expects the last frame on the stack to always be at the same
++ * offset from the end of the page, which allows it to validate the stack.
++ * Calling schedule_tail() directly would break that convention because its an
++ * asmlinkage function so its argument has to be pushed on the stack. This
++ * wrapper creates a proper "end of stack" frame header before the call.
++ */
++ENTRY(schedule_tail_wrapper)
++ FRAME_BEGIN
++
++ pushl %eax
++ call schedule_tail
++ popl %eax
++
++ FRAME_END
++ ret
++ENDPROC(schedule_tail_wrapper)
++/*
+ * A newly forked process directly context switches into this address.
+ *
+ * eax: prev task we switched from
+@@ -262,24 +279,15 @@ END(__switch_to_asm)
+ * edi: kernel thread arg
+ */
+ ENTRY(ret_from_fork)
+- FRAME_BEGIN /* help unwinder find end of stack */
+-
+- /*
+- * schedule_tail() is asmlinkage so we have to put its 'prev' argument
+- * on the stack.
+- */
+- pushl %eax
+- call schedule_tail
+- popl %eax
++ call schedule_tail_wrapper
+
+ testl %ebx, %ebx
+ jnz 1f /* kernel threads are uncommon */
+
+ 2:
+ /* When we fork, we trace the syscall return in the child, too. */
+- leal FRAME_OFFSET(%esp), %eax
++ movl %esp, %eax
+ call syscall_return_slowpath
+- FRAME_END
+ jmp restore_all
+
+ /* kernel thread */
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -36,7 +36,6 @@
+ #include <asm/smap.h>
+ #include <asm/pgtable_types.h>
+ #include <asm/export.h>
+-#include <asm/frame.h>
+ #include <linux/err.h>
+
+ .code64
+@@ -409,19 +408,17 @@ END(__switch_to_asm)
+ * r12: kernel thread arg
+ */
+ ENTRY(ret_from_fork)
+- FRAME_BEGIN /* help unwinder find end of stack */
+ movq %rax, %rdi
+- call schedule_tail /* rdi: 'prev' task parameter */
++ call schedule_tail /* rdi: 'prev' task parameter */
+
+- testq %rbx, %rbx /* from kernel_thread? */
+- jnz 1f /* kernel threads are uncommon */
++ testq %rbx, %rbx /* from kernel_thread? */
++ jnz 1f /* kernel threads are uncommon */
+
+ 2:
+- leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
++ movq %rsp, %rdi
+ call syscall_return_slowpath /* returns with IRQs disabled */
+ TRACE_IRQS_ON /* user mode is traced as IRQS on */
+ SWAPGS
+- FRAME_END
+ jmp restore_regs_and_iret
+
+ 1:
xfrm-fix-stack-access-out-of-bounds-with-config_xfrm_sub_policy.patch
xfrm-null-dereference-on-allocation-failure.patch
xfrm-oops-on-error-in-pfkey_msg2xfrm_state.patch
+watchdog-bcm281xx-fix-use-of-uninitialized-spinlock.patch
+arm64-pci-fix-struct-acpi_pci_root_ops-allocation-failure-path.patch
+arm64-acpi-fix-bad_madt_gicc_entry-macro-implementation.patch
+arm-8685-1-ensure-memblock-limit-is-pmd-aligned.patch
+arm-davinci-pm-free-resources-in-error-handling-path-in-davinci_pm_init.patch
+arm-davinci-pm-do-not-free-useful-resources-in-normal-path-in-davinci_pm_init.patch
+tools-arch-sync-arch-x86-lib-memcpy_64.s-with-the-kernel.patch
+revert-x86-entry-fix-the-end-of-the-stack-for-newly-forked-tasks.patch
+x86-mshyperv-remove-excess-includes-from-mshyperv.h.patch
+x86-boot-kaslr-fix-kexec-crash-due-to-virt_addr-calculation-bug.patch
+perf-x86-fix-spurious-nmi-with-pebs-load-latency-event.patch
+x86-mpx-correctly-report-do_mpx_bt_fault-failures-to-user-space.patch
+x86-mm-fix-flush_tlb_page-on-xen.patch
+ocfs2-o2hb-revert-hb-threshold-to-keep-compatible.patch
+ocfs2-fix-deadlock-caused-by-recursive-locking-in-xattr.patch
--- /dev/null
+From e883d09c9eb2ffddfd057c17e6a0cef446ec8c9b Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Mon, 24 Apr 2017 11:58:54 -0300
+Subject: tools arch: Sync arch/x86/lib/memcpy_64.S with the kernel
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit e883d09c9eb2ffddfd057c17e6a0cef446ec8c9b upstream.
+
+Just a minor fix done in:
+
+ Fixes: 26a37ab319a2 ("x86/mce: Fix copy/paste error in exception table entries")
+
+Cc: Tony Luck <tony.luck@intel.com>
+Link: http://lkml.kernel.org/n/tip-ni9jzdd5yxlail6pq8cuexw2@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/arch/x86/lib/memcpy_64.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/arch/x86/lib/memcpy_64.S
++++ b/tools/arch/x86/lib/memcpy_64.S
+@@ -286,7 +286,7 @@ ENDPROC(memcpy_mcsafe_unrolled)
+ _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
+- _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
++ _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
--- /dev/null
+From fedf266f9955d9a019643cde199a2fd9a0259f6f Mon Sep 17 00:00:00 2001
+From: Eric Anholt <eric@anholt.net>
+Date: Thu, 27 Apr 2017 18:02:32 -0700
+Subject: watchdog: bcm281xx: Fix use of uninitialized spinlock.
+
+From: Eric Anholt <eric@anholt.net>
+
+commit fedf266f9955d9a019643cde199a2fd9a0259f6f upstream.
+
+The bcm_kona_wdt_set_resolution_reg() call takes the spinlock, so
+initialize it earlier. Fixes a warning at boot with lock debugging
+enabled.
+
+Fixes: 6adb730dc208 ("watchdog: bcm281xx: Watchdog Driver")
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@iguana.be>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/watchdog/bcm_kona_wdt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/watchdog/bcm_kona_wdt.c
++++ b/drivers/watchdog/bcm_kona_wdt.c
+@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct pla
+ if (!wdt)
+ return -ENOMEM;
+
++ spin_lock_init(&wdt->lock);
++
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdt->base))
+@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct pla
+ return ret;
+ }
+
+- spin_lock_init(&wdt->lock);
+ platform_set_drvdata(pdev, wdt);
+ watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+ bcm_kona_wdt_wdd.parent = &pdev->dev;
--- /dev/null
+From 8eabf42ae5237e6b699aeac687b5b629e3537c8d Mon Sep 17 00:00:00 2001
+From: Baoquan He <bhe@redhat.com>
+Date: Tue, 27 Jun 2017 20:39:06 +0800
+Subject: x86/boot/KASLR: Fix kexec crash due to 'virt_addr' calculation bug
+
+From: Baoquan He <bhe@redhat.com>
+
+commit 8eabf42ae5237e6b699aeac687b5b629e3537c8d upstream.
+
+Kernel text KASLR is separated into physical address and virtual
+address randomization. And for virtual address randomization, we
+only randomiza to get an offset between 16M and KERNEL_IMAGE_SIZE.
+So the initial value of 'virt_addr' should be LOAD_PHYSICAL_ADDR,
+but not the original kernel loading address 'output'.
+
+The bug will cause kernel boot failure if kernel is loaded at a different
+position than the address, 16M, which is decided at compiled time.
+Kexec/kdump is such practical case.
+
+To fix it, just assign LOAD_PHYSICAL_ADDR to virt_addr as initial
+value.
+
+Tested-by: Dave Young <dyoung@redhat.com>
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 8391c73 ("x86/KASLR: Randomize virtual address separately")
+Link: http://lkml.kernel.org/r/1498567146-11990-3-git-send-email-bhe@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/boot/compressed/kaslr.c | 3 ---
+ arch/x86/boot/compressed/misc.c | 4 ++--
+ arch/x86/boot/compressed/misc.h | 2 --
+ 3 files changed, 2 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -564,9 +564,6 @@ void choose_random_location(unsigned lon
+ {
+ unsigned long random_addr, min_addr;
+
+- /* By default, keep output position unchanged. */
+- *virt_addr = *output;
+-
+ if (cmdline_find_option_bool("nokaslr")) {
+ warn("KASLR disabled: 'nokaslr' on cmdline.");
+ return;
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kerne
+ unsigned long output_len)
+ {
+ const unsigned long kernel_total_size = VO__end - VO__text;
+- unsigned long virt_addr = (unsigned long)output;
++ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+
+ /* Retain x86 boot parameters pointer passed from startup_32/64. */
+ boot_params = rmode;
+@@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kerne
+ #ifndef CONFIG_RELOCATABLE
+ if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
+ error("Destination address does not match LOAD_PHYSICAL_ADDR");
+- if ((unsigned long)output != virt_addr)
++ if (virt_addr != LOAD_PHYSICAL_ADDR)
+ error("Destination virtual address changed when not relocatable");
+ #endif
+
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -81,8 +81,6 @@ static inline void choose_random_locatio
+ unsigned long output_size,
+ unsigned long *virt_addr)
+ {
+- /* No change from existing output location. */
+- *virt_addr = *output;
+ }
+ #endif
+
--- /dev/null
+From dbd68d8e84c606673ebbcf15862f8c155fa92326 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Sat, 22 Apr 2017 00:01:22 -0700
+Subject: x86/mm: Fix flush_tlb_page() on Xen
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit dbd68d8e84c606673ebbcf15862f8c155fa92326 upstream.
+
+flush_tlb_page() passes a bogus range to flush_tlb_others() and
+expects the latter to fix it up. native_flush_tlb_others() has the
+fixup but Xen's version doesn't. Move the fixup to
+flush_tlb_others().
+
+AFAICS the only real effect is that, without this fix, Xen would
+flush everything instead of just the one page on remote vCPUs in
+when flush_tlb_page() was called.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Nadav Amit <namit@vmware.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: e7b52ffd45a6 ("x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range")
+Link: http://lkml.kernel.org/r/10ed0e4dfea64daef10b87fb85df1746999b4dba.1492844372.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/tlb.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -263,8 +263,6 @@ void native_flush_tlb_others(const struc
+ {
+ struct flush_tlb_info info;
+
+- if (end == 0)
+- end = start + PAGE_SIZE;
+ info.flush_mm = mm;
+ info.flush_start = start;
+ info.flush_end = end;
+@@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struc
+ }
+
+ if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+- flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
++ flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
+
+ preempt_enable();
+ }
--- /dev/null
+From 5ed386ec09a5d75bcf073967e55e895c2607a5c3 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Thu, 6 Apr 2017 16:19:22 +0200
+Subject: x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space
+
+From: Joerg Roedel <jroedel@suse.de>
+
+commit 5ed386ec09a5d75bcf073967e55e895c2607a5c3 upstream.
+
+When this function fails it just sends a SIGSEGV signal to
+user-space using force_sig(). This signal is missing
+essential information about the cause, e.g. the trap_nr or
+an error code.
+
+Fix this by propagating the error to the only caller of
+mpx_handle_bd_fault(), do_bounds(), which sends the correct
+SIGSEGV signal to the process.
+
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: fe3d197f84319 ('x86, mpx: On-demand kernel allocation of bounds tables')
+Link: http://lkml.kernel.org/r/1491488362-27198-1-git-send-email-joro@8bytes.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/mpx.c | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -526,15 +526,7 @@ int mpx_handle_bd_fault(void)
+ if (!kernel_managing_mpx_tables(current->mm))
+ return -EINVAL;
+
+- if (do_mpx_bt_fault()) {
+- force_sig(SIGSEGV, current);
+- /*
+- * The force_sig() is essentially "handling" this
+- * exception, so we do not pass up the error
+- * from do_mpx_bt_fault().
+- */
+- }
+- return 0;
++ return do_mpx_bt_fault();
+ }
+
+ /*
--- /dev/null
+From 26fcd952d5c977a94ac64bb44ed409e37607b2c9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 23 Jun 2017 10:50:38 +0200
+Subject: x86/mshyperv: Remove excess #includes from mshyperv.h
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 26fcd952d5c977a94ac64bb44ed409e37607b2c9 upstream.
+
+A recent commit included linux/slab.h in linux/irq.h. This breaks the build
+of vdso32 on a 64-bit kernel.
+
+The reason is that linux/irq.h gets included into the vdso code via
+linux/interrupt.h which is included from asm/mshyperv.h. That makes the
+32-bit vdso compile fail, because slab.h includes the pgtable headers for
+64-bit on a 64-bit build.
+
+Neither linux/clocksource.h nor linux/interrupt.h are needed in the
+mshyperv.h header file itself - it has a dependency on <linux/atomic.h>.
+
+Remove the includes and unbreak the build.
+
+Reported-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: K. Y. Srinivasan <kys@microsoft.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: devel@linuxdriverproject.org
+Fixes: dee863b571b0 ("hv: export current Hyper-V clocksource")
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1706231038460.2647@nanos
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mshyperv.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -2,8 +2,7 @@
+ #define _ASM_X86_MSHYPER_H
+
+ #include <linux/types.h>
+-#include <linux/interrupt.h>
+-#include <linux/clocksource.h>
++#include <linux/atomic.h>
+ #include <asm/hyperv.h>
+
+ /*