--- /dev/null
+From 5e545df3292fbd3d5963c68980f1527ead2a2b3f Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.ibm.com>
+Date: Mon, 14 Dec 2020 19:09:55 -0800
+Subject: arm: remove CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+
+From: Mike Rapoport <rppt@linux.ibm.com>
+
+commit 5e545df3292fbd3d5963c68980f1527ead2a2b3f upstream.
+
+ARM is the only architecture that defines CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+which in turn enables memmap_valid_within() function that is intended to
+verify existence of struct page associated with a pfn when there are holes
+in the memory map.
+
+However, the ARCH_HAS_HOLES_MEMORYMODEL also enables HAVE_ARCH_PFN_VALID
+and arch-specific pfn_valid() implementation that also deals with the holes
+in the memory map.
+
+The only two users of memmap_valid_within() call this function after
+a call to pfn_valid() so the memmap_valid_within() check becomes redundant.
+
+Remove CONFIG_ARCH_HAS_HOLES_MEMORYMODEL and memmap_valid_within() and rely
+entirely on ARM's implementation of pfn_valid() that is now enabled
+unconditionally.
+
+Link: https://lkml.kernel.org/r/20201101170454.9567-9-rppt@kernel.org
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Greg Ungerer <gerg@linux-m68k.org>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Matt Turner <mattst88@gmail.com>
+Cc: Meelis Roos <mroos@linux.ee>
+Cc: Michael Schmitz <schmitzmic@gmail.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Fixes: 8dd559d53b3b ("arm: ioremap: don't abuse pfn_valid() to check if pfn is in RAM")
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/vm/memory-model.rst | 3 +--
+ arch/arm/Kconfig | 8 ++------
+ arch/arm/mach-bcm/Kconfig | 1 -
+ arch/arm/mach-davinci/Kconfig | 1 -
+ arch/arm/mach-exynos/Kconfig | 1 -
+ arch/arm/mach-highbank/Kconfig | 1 -
+ arch/arm/mach-omap2/Kconfig | 2 +-
+ arch/arm/mach-s5pv210/Kconfig | 1 -
+ arch/arm/mach-tango/Kconfig | 1 -
+ fs/proc/kcore.c | 2 --
+ include/linux/mmzone.h | 31 -------------------------------
+ mm/mmzone.c | 14 --------------
+ mm/vmstat.c | 4 ----
+ 13 files changed, 4 insertions(+), 66 deletions(-)
+
+--- a/Documentation/vm/memory-model.rst
++++ b/Documentation/vm/memory-model.rst
+@@ -52,8 +52,7 @@ wrapper :c:func:`free_area_init`. Yet, t
+ usable until the call to :c:func:`memblock_free_all` that hands all
+ the memory to the page allocator.
+
+-If an architecture enables `CONFIG_ARCH_HAS_HOLES_MEMORYMODEL` option,
+-it may free parts of the `mem_map` array that do not cover the
++An architecture may free parts of the `mem_map` array that do not cover the
+ actual physical pages. In such case, the architecture specific
+ :c:func:`pfn_valid` implementation should take the holes in the
+ `mem_map` into account.
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -26,7 +26,7 @@ config ARM
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_HAS_GCOV_PROFILE_ALL
+- select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC
++ select ARCH_KEEP_MEMBLOCK
+ select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+@@ -521,7 +521,6 @@ config ARCH_S3C24XX
+ config ARCH_OMAP1
+ bool "TI OMAP1"
+ depends on MMU
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_OMAP
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
+@@ -1518,9 +1517,6 @@ config OABI_COMPAT
+ UNPREDICTABLE (in fact it can be predicted that it won't work
+ at all). If in doubt say N.
+
+-config ARCH_HAS_HOLES_MEMORYMODEL
+- bool
+-
+ config ARCH_SPARSEMEM_ENABLE
+ bool
+
+@@ -1528,7 +1524,7 @@ config ARCH_SPARSEMEM_DEFAULT
+ def_bool ARCH_SPARSEMEM_ENABLE
+
+ config HAVE_ARCH_PFN_VALID
+- def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
++ def_bool y
+
+ config HIGHMEM
+ bool "High Memory Support"
+--- a/arch/arm/mach-bcm/Kconfig
++++ b/arch/arm/mach-bcm/Kconfig
+@@ -214,7 +214,6 @@ config ARCH_BRCMSTB
+ select HAVE_ARM_ARCH_TIMER
+ select BRCMSTB_L2_IRQ
+ select BCM7120_L2_IRQ
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ZONE_DMA if ARM_LPAE
+ select SOC_BRCMSTB
+ select SOC_BUS
+--- a/arch/arm/mach-davinci/Kconfig
++++ b/arch/arm/mach-davinci/Kconfig
+@@ -5,7 +5,6 @@ menuconfig ARCH_DAVINCI
+ depends on ARCH_MULTI_V5
+ select DAVINCI_TIMER
+ select ZONE_DMA
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
+ select REGMAP_MMIO
+--- a/arch/arm/mach-exynos/Kconfig
++++ b/arch/arm/mach-exynos/Kconfig
+@@ -8,7 +8,6 @@
+ menuconfig ARCH_EXYNOS
+ bool "Samsung EXYNOS"
+ depends on ARCH_MULTI_V7
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_AMBA
+ select ARM_GIC
+--- a/arch/arm/mach-highbank/Kconfig
++++ b/arch/arm/mach-highbank/Kconfig
+@@ -2,7 +2,6 @@
+ config ARCH_HIGHBANK
+ bool "Calxeda ECX-1000/2000 (Highbank/Midway)"
+ depends on ARCH_MULTI_V7
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select ARM_AMBA
+ select ARM_ERRATA_764369 if SMP
+--- a/arch/arm/mach-omap2/Kconfig
++++ b/arch/arm/mach-omap2/Kconfig
+@@ -94,7 +94,7 @@ config SOC_DRA7XX
+ config ARCH_OMAP2PLUS
+ bool
+ select ARCH_HAS_BANDGAP
+- select ARCH_HAS_HOLES_MEMORYMODEL
++ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_OMAP
+ select CLKSRC_MMIO
+ select GENERIC_IRQ_CHIP
+--- a/arch/arm/mach-s5pv210/Kconfig
++++ b/arch/arm/mach-s5pv210/Kconfig
+@@ -8,7 +8,6 @@
+ config ARCH_S5PV210
+ bool "Samsung S5PV210/S5PC110"
+ depends on ARCH_MULTI_V7
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARM_VIC
+ select CLKSRC_SAMSUNG_PWM
+ select COMMON_CLK_SAMSUNG
+--- a/arch/arm/mach-tango/Kconfig
++++ b/arch/arm/mach-tango/Kconfig
+@@ -3,7 +3,6 @@ config ARCH_TANGO
+ bool "Sigma Designs Tango4 (SMP87xx)"
+ depends on ARCH_MULTI_V7
+ # Cortex-A9 MPCore r3p0, PL310 r3p2
+- select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select ARM_ERRATA_775420
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -193,8 +193,6 @@ kclist_add_private(unsigned long pfn, un
+ return 1;
+
+ p = pfn_to_page(pfn);
+- if (!memmap_valid_within(pfn, p, page_zone(p)))
+- return 1;
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1438,37 +1438,6 @@ void memory_present(int nid, unsigned lo
+ #define pfn_valid_within(pfn) (1)
+ #endif
+
+-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+-/*
+- * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
+- * associated with it or not. This means that a struct page exists for this
+- * pfn. The caller cannot assume the page is fully initialized in general.
+- * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
+- * will ensure the struct page is fully online and initialized. Special pages
+- * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
+- *
+- * In FLATMEM, it is expected that holes always have valid memmap as long as
+- * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
+- * that a valid section has a memmap for the entire section.
+- *
+- * However, an ARM, and maybe other embedded architectures in the future
+- * free memmap backing holes to save memory on the assumption the memmap is
+- * never used. The page_zone linkages are then broken even though pfn_valid()
+- * returns true. A walker of the full memmap must then do this additional
+- * check to ensure the memmap they are looking at is sane by making sure
+- * the zone and PFN linkages are still valid. This is expensive, but walkers
+- * of the full memmap are extremely rare.
+- */
+-bool memmap_valid_within(unsigned long pfn,
+- struct page *page, struct zone *zone);
+-#else
+-static inline bool memmap_valid_within(unsigned long pfn,
+- struct page *page, struct zone *zone)
+-{
+- return true;
+-}
+-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+-
+ #endif /* !__GENERATING_BOUNDS.H */
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _LINUX_MMZONE_H */
+--- a/mm/mmzone.c
++++ b/mm/mmzone.c
+@@ -72,20 +72,6 @@ struct zoneref *__next_zones_zonelist(st
+ return z;
+ }
+
+-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+-bool memmap_valid_within(unsigned long pfn,
+- struct page *page, struct zone *zone)
+-{
+- if (page_to_pfn(page) != pfn)
+- return false;
+-
+- if (page_zone(page) != zone)
+- return false;
+-
+- return true;
+-}
+-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+-
+ void lruvec_init(struct lruvec *lruvec)
+ {
+ enum lru_list lru;
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1444,10 +1444,6 @@ static void pagetypeinfo_showblockcount_
+ if (!page)
+ continue;
+
+- /* Watch for unexpected holes punched in the memmap */
+- if (!memmap_valid_within(pfn, page, zone))
+- continue;
+-
+ if (page_zone(page) != zone)
+ continue;
+
--- /dev/null
+From 7c174f305cbee6bdba5018aae02b84369e7ab995 Mon Sep 17 00:00:00 2001
+From: Like Xu <likexu@tencent.com>
+Date: Tue, 30 Nov 2021 15:42:17 +0800
+Subject: KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id()
+
+From: Like Xu <likexu@tencent.com>
+
+commit 7c174f305cbee6bdba5018aae02b84369e7ab995 upstream.
+
+The find_arch_event() returns a "unsigned int" value,
+which is used by the pmc_reprogram_counter() to
+program a PERF_TYPE_HARDWARE type perf_event.
+
+The returned value is actually the kernel defined generic
+perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler
+incoming parameters for better self-explanation.
+
+Signed-off-by: Like Xu <likexu@tencent.com>
+Message-Id: <20211130074221.93635-3-likexu@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[Backport to 5.4: kvm_x86_ops is a pointer here]
+Signed-off-by: Kyle Huey <me@kylehuey.com>]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/pmu.c | 8 +-------
+ arch/x86/kvm/pmu.h | 3 +--
+ arch/x86/kvm/pmu_amd.c | 8 ++++----
+ arch/x86/kvm/vmx/pmu_intel.c | 9 +++++----
+ 4 files changed, 11 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -143,7 +143,6 @@ static void pmc_reprogram_counter(struct
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ {
+ unsigned config, type = PERF_TYPE_RAW;
+- u8 event_select, unit_mask;
+ struct kvm *kvm = pmc->vcpu->kvm;
+ struct kvm_pmu_event_filter *filter;
+ int i;
+@@ -175,17 +174,12 @@ void reprogram_gp_counter(struct kvm_pmc
+ if (!allow_event)
+ return;
+
+- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+-
+ if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+ ARCH_PERFMON_EVENTSEL_INV |
+ ARCH_PERFMON_EVENTSEL_CMASK |
+ HSW_IN_TX |
+ HSW_IN_TX_CHECKPOINTED))) {
+- config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
+- event_select,
+- unit_mask);
++ config = kvm_x86_ops->pmu_ops->pmc_perf_hw_id(pmc);
+ if (config != PERF_COUNT_HW_MAX)
+ type = PERF_TYPE_HARDWARE;
+ }
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -22,8 +22,7 @@ struct kvm_event_hw_type_mapping {
+ };
+
+ struct kvm_pmu_ops {
+- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
+- u8 unit_mask);
++ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
+ unsigned (*find_fixed_event)(int idx);
+ bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
+ struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
+--- a/arch/x86/kvm/pmu_amd.c
++++ b/arch/x86/kvm/pmu_amd.c
+@@ -126,10 +126,10 @@ static inline struct kvm_pmc *get_gp_pmc
+ return &pmu->gp_counters[msr_to_index(msr)];
+ }
+
+-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
+- u8 event_select,
+- u8 unit_mask)
++static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
+ {
++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+@@ -300,7 +300,7 @@ static void amd_pmu_reset(struct kvm_vcp
+ }
+
+ struct kvm_pmu_ops amd_pmu_ops = {
+- .find_arch_event = amd_find_arch_event,
++ .pmc_perf_hw_id = amd_pmc_perf_hw_id,
+ .find_fixed_event = amd_find_fixed_event,
+ .pmc_is_enabled = amd_pmc_is_enabled,
+ .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -64,10 +64,11 @@ static void global_ctrl_changed(struct k
+ reprogram_counter(pmu, bit);
+ }
+
+-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
+- u8 event_select,
+- u8 unit_mask)
++static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
+ {
++ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+@@ -374,7 +375,7 @@ static void intel_pmu_reset(struct kvm_v
+ }
+
+ struct kvm_pmu_ops intel_pmu_ops = {
+- .find_arch_event = intel_find_arch_event,
++ .pmc_perf_hw_id = intel_pmc_perf_hw_id,
+ .find_fixed_event = intel_find_fixed_event,
+ .pmc_is_enabled = intel_pmc_is_enabled,
+ .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
nfp-bpf-silence-bitwise-vs.-logical-or-warning.patch
can-grcan-grcan_probe-fix-broken-system-id-check-for-errata-workaround-needs.patch
can-grcan-only-use-the-napi-poll-budget-for-rx.patch
+arm-remove-config_arch_has_holes_memorymodel.patch
+kvm-x86-pmu-refactoring-find_arch_event-to-pmc_perf_hw_id.patch
+x86-asm-allow-to-pass-macros-to-__asm_form.patch
+x86-xen-kvm-gather-the-definition-of-emulate-prefixes.patch
+x86-xen-insn-decode-xen-and-kvm-emulate-prefix-signature.patch
+x86-kprobes-prohibit-probing-on-instruction-which-has-emulate-prefix.patch
--- /dev/null
+From f7919fd943abf0c77aed4441ea9897a323d132f5 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 6 Sep 2019 22:13:48 +0900
+Subject: x86/asm: Allow to pass macros to __ASM_FORM()
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit f7919fd943abf0c77aed4441ea9897a323d132f5 upstream.
+
+Use __stringify() at __ASM_FORM() so that user can pass
+code including macros to __ASM_FORM().
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: x86@kernel.org
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: xen-devel@lists.xenproject.org
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/156777562873.25081.2288083344657460959.stgit@devnote2
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/asm.h | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -7,9 +7,11 @@
+ # define __ASM_FORM_RAW(x) x
+ # define __ASM_FORM_COMMA(x) x,
+ #else
+-# define __ASM_FORM(x) " " #x " "
+-# define __ASM_FORM_RAW(x) #x
+-# define __ASM_FORM_COMMA(x) " " #x ","
++#include <linux/stringify.h>
++
++# define __ASM_FORM(x) " " __stringify(x) " "
++# define __ASM_FORM_RAW(x) __stringify(x)
++# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
+ #endif
+
+ #ifndef __x86_64__
--- /dev/null
+From 004e8dce9c5595697951f7cd0e9f66b35c92265e Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 6 Sep 2019 22:14:20 +0900
+Subject: x86: kprobes: Prohibit probing on instruction which has emulate prefix
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 004e8dce9c5595697951f7cd0e9f66b35c92265e upstream.
+
+Prohibit probing on instruction which has XEN_EMULATE_PREFIX
+or KVM's emulate prefix. Since that prefix is a marker for Xen
+and KVM, if we modify the marker by kprobe's int3, that doesn't
+work as expected.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: x86@kernel.org
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: xen-devel@lists.xenproject.org
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/156777566048.25081.6296162369492175325.stgit@devnote2
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kprobes/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -358,6 +358,10 @@ int __copy_instruction(u8 *dest, u8 *src
+ kernel_insn_init(insn, dest, MAX_INSN_SIZE);
+ insn_get_length(insn);
+
++ /* We can not probe force emulate prefixed instruction */
++ if (insn_has_emulate_prefix(insn))
++ return 0;
++
+ /* Another subsystem puts a breakpoint, failed to recover */
+ if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+ return 0;
--- /dev/null
+From 4d65adfcd1196818659d3bd9b42dccab291e1751 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 6 Sep 2019 22:14:10 +0900
+Subject: x86: xen: insn: Decode Xen and KVM emulate-prefix signature
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 4d65adfcd1196818659d3bd9b42dccab291e1751 upstream.
+
+Decode Xen and KVM's emulate-prefix signature by x86 insn decoder.
+It is called "prefix" but actually not x86 instruction prefix, so
+this adds insn.emulate_prefix_size field instead of reusing
+insn.prefixes.
+
+If x86 decoder finds a special sequence of instructions of
+XEN_EMULATE_PREFIX and 'ud2a; .ascii "kvm"', it just counts the
+length, set insn.emulate_prefix_size and fold it with the next
+instruction. In other words, the signature and the next instruction
+is treated as a single instruction.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: x86@kernel.org
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: xen-devel@lists.xenproject.org
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Link: https://lkml.kernel.org/r/156777564986.25081.4964537658500952557.stgit@devnote2
+[mheyne: resolved contextual conflict in tools/objtools/sync-check.sh]
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/insn.h | 6 ++++
+ arch/x86/lib/insn.c | 34 ++++++++++++++++++++++++++++
+ tools/arch/x86/include/asm/emulate_prefix.h | 14 +++++++++++
+ tools/arch/x86/include/asm/insn.h | 6 ++++
+ tools/arch/x86/lib/insn.c | 34 ++++++++++++++++++++++++++++
+ tools/objtool/sync-check.sh | 3 +-
+ tools/perf/check-headers.sh | 3 +-
+ 7 files changed, 98 insertions(+), 2 deletions(-)
+ create mode 100644 tools/arch/x86/include/asm/emulate_prefix.h
+
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -45,6 +45,7 @@ struct insn {
+ struct insn_field immediate2; /* for 64bit imm or seg16 */
+ };
+
++ int emulate_prefix_size;
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+@@ -128,6 +129,11 @@ static inline int insn_is_evex(struct in
+ return (insn->vex_prefix.nbytes == 4);
+ }
+
++static inline int insn_has_emulate_prefix(struct insn *insn)
++{
++ return !!insn->emulate_prefix_size;
++}
++
+ /* Ensure this instruction is decoded completely */
+ static inline int insn_complete(struct insn *insn)
+ {
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -13,6 +13,8 @@
+ #include <asm/inat.h>
+ #include <asm/insn.h>
+
++#include <asm/emulate_prefix.h>
++
+ /* Verify next sizeof(t) bytes can be on the same instruction */
+ #define validate_next(t, insn, n) \
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+@@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const
+ insn->addr_bytes = 4;
+ }
+
++static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
++static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
++
++static int __insn_get_emulate_prefix(struct insn *insn,
++ const insn_byte_t *prefix, size_t len)
++{
++ size_t i;
++
++ for (i = 0; i < len; i++) {
++ if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
++ goto err_out;
++ }
++
++ insn->emulate_prefix_size = len;
++ insn->next_byte += len;
++
++ return 1;
++
++err_out:
++ return 0;
++}
++
++static void insn_get_emulate_prefix(struct insn *insn)
++{
++ if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
++ return;
++
++ __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
++}
++
+ /**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn: &struct insn containing instruction
+@@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn
+ if (prefixes->got)
+ return;
+
++ insn_get_emulate_prefix(insn);
++
+ nb = 0;
+ lb = 0;
+ b = peek_next(insn_byte_t, insn);
+--- /dev/null
++++ b/tools/arch/x86/include/asm/emulate_prefix.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_EMULATE_PREFIX_H
++#define _ASM_X86_EMULATE_PREFIX_H
++
++/*
++ * Virt escape sequences to trigger instruction emulation;
++ * ideally these would decode to 'whole' instruction and not destroy
++ * the instruction stream; sadly this is not true for the 'kvm' one :/
++ */
++
++#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
++#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
++
++#endif
+--- a/tools/arch/x86/include/asm/insn.h
++++ b/tools/arch/x86/include/asm/insn.h
+@@ -45,6 +45,7 @@ struct insn {
+ struct insn_field immediate2; /* for 64bit imm or seg16 */
+ };
+
++ int emulate_prefix_size;
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+@@ -128,6 +129,11 @@ static inline int insn_is_evex(struct in
+ return (insn->vex_prefix.nbytes == 4);
+ }
+
++static inline int insn_has_emulate_prefix(struct insn *insn)
++{
++ return !!insn->emulate_prefix_size;
++}
++
+ /* Ensure this instruction is decoded completely */
+ static inline int insn_complete(struct insn *insn)
+ {
+--- a/tools/arch/x86/lib/insn.c
++++ b/tools/arch/x86/lib/insn.c
+@@ -13,6 +13,8 @@
+ #include "../include/asm/inat.h"
+ #include "../include/asm/insn.h"
+
++#include "../include/asm/emulate_prefix.h"
++
+ /* Verify next sizeof(t) bytes can be on the same instruction */
+ #define validate_next(t, insn, n) \
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+@@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const
+ insn->addr_bytes = 4;
+ }
+
++static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
++static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
++
++static int __insn_get_emulate_prefix(struct insn *insn,
++ const insn_byte_t *prefix, size_t len)
++{
++ size_t i;
++
++ for (i = 0; i < len; i++) {
++ if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
++ goto err_out;
++ }
++
++ insn->emulate_prefix_size = len;
++ insn->next_byte += len;
++
++ return 1;
++
++err_out:
++ return 0;
++}
++
++static void insn_get_emulate_prefix(struct insn *insn)
++{
++ if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
++ return;
++
++ __insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
++}
++
+ /**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn: &struct insn containing instruction
+@@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn
+ if (prefixes->got)
+ return;
+
++ insn_get_emulate_prefix(insn);
++
+ nb = 0;
+ lb = 0;
+ b = peek_next(insn_byte_t, insn);
+--- a/tools/objtool/sync-check.sh
++++ b/tools/objtool/sync-check.sh
+@@ -4,6 +4,7 @@
+ FILES='
+ arch/x86/include/asm/inat_types.h
+ arch/x86/include/asm/orc_types.h
++arch/x86/include/asm/emulate_prefix.h
+ arch/x86/lib/x86-opcode-map.txt
+ arch/x86/tools/gen-insn-attr-x86.awk
+ '
+@@ -46,4 +47,4 @@ done
+ check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
+ check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
+ check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
+-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
++check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
+--- a/tools/perf/check-headers.sh
++++ b/tools/perf/check-headers.sh
+@@ -28,6 +28,7 @@ arch/x86/include/asm/disabled-features.h
+ arch/x86/include/asm/required-features.h
+ arch/x86/include/asm/cpufeatures.h
+ arch/x86/include/asm/inat_types.h
++arch/x86/include/asm/emulate_prefix.h
+ arch/x86/include/uapi/asm/prctl.h
+ arch/x86/lib/x86-opcode-map.txt
+ arch/x86/tools/gen-insn-attr-x86.awk
+@@ -116,7 +117,7 @@ check lib/ctype.c '-I "^EXPORT_SY
+ check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
+ check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
+ check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
+-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
++check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
+
+ # diff non-symmetric files
+ check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
--- /dev/null
+From b3dc0695fa40c3b280230fb6fb7fb7a94ce28bf4 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Fri, 6 Sep 2019 22:13:59 +0900
+Subject: x86: xen: kvm: Gather the definition of emulate prefixes
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit b3dc0695fa40c3b280230fb6fb7fb7a94ce28bf4 upstream.
+
+Gather the emulate prefixes, which forcibly make the following
+instruction emulated on virtualization, in one place.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: x86@kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Andrew Cooper <andrew.cooper3@citrix.com>
+Cc: Stefano Stabellini <sstabellini@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: xen-devel@lists.xenproject.org
+Cc: Randy Dunlap <rdunlap@infradead.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/156777563917.25081.7286628561790289995.stgit@devnote2
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/emulate_prefix.h | 14 ++++++++++++++
+ arch/x86/include/asm/xen/interface.h | 11 ++++-------
+ arch/x86/kvm/x86.c | 4 +++-
+ 3 files changed, 21 insertions(+), 8 deletions(-)
+ create mode 100644 arch/x86/include/asm/emulate_prefix.h
+
+--- /dev/null
++++ b/arch/x86/include/asm/emulate_prefix.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_EMULATE_PREFIX_H
++#define _ASM_X86_EMULATE_PREFIX_H
++
++/*
++ * Virt escape sequences to trigger instruction emulation;
++ * ideally these would decode to 'whole' instruction and not destroy
++ * the instruction stream; sadly this is not true for the 'kvm' one :/
++ */
++
++#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
++#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
++
++#endif
+--- a/arch/x86/include/asm/xen/interface.h
++++ b/arch/x86/include/asm/xen/interface.h
+@@ -379,12 +379,9 @@ struct xen_pmu_arch {
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+-#ifdef __ASSEMBLY__
+-#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+-#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+-#else
+-#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+-#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+-#endif
++#include <asm/emulate_prefix.h>
++
++#define XEN_EMULATE_PREFIX __ASM_FORM(.byte __XEN_EMULATE_PREFIX ;)
++#define XEN_CPUID XEN_EMULATE_PREFIX __ASM_FORM(cpuid)
+
+ #endif /* _ASM_X86_XEN_INTERFACE_H */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -68,6 +68,7 @@
+ #include <asm/mshyperv.h>
+ #include <asm/hypervisor.h>
+ #include <asm/intel_pt.h>
++#include <asm/emulate_prefix.h>
+ #include <clocksource/hyperv_timer.h>
+
+ #define CREATE_TRACE_POINTS
+@@ -5583,6 +5584,7 @@ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_s
+
+ int handle_ud(struct kvm_vcpu *vcpu)
+ {
++ static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
+ int emul_type = EMULTYPE_TRAP_UD;
+ char sig[5]; /* ud2; .ascii "kvm" */
+ struct x86_exception e;
+@@ -5590,7 +5592,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
+ if (force_emulation_prefix &&
+ kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
+ sig, sizeof(sig), &e) == 0 &&
+- memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
++ memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
+ kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
+ emul_type = EMULTYPE_TRAP_UD_FORCED;
+ }