--- /dev/null
+From 2dfe63e61cc31ee59ce951672b0850b5229cd5b0 Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Thu, 14 Apr 2022 19:13:40 -0700
+Subject: mm, kfence: support kmem_dump_obj() for KFENCE objects
+
+From: Marco Elver <elver@google.com>
+
+commit 2dfe63e61cc31ee59ce951672b0850b5229cd5b0 upstream.
+
+Calling kmem_obj_info() via kmem_dump_obj() on KFENCE objects has been
+producing garbage data due to the object not actually being maintained
+by SLAB or SLUB.
+
+Fix this by implementing __kfence_obj_info() that copies relevant
+information to struct kmem_obj_info when the object was allocated by
+KFENCE; this is called by a common kmem_obj_info(), which also calls the
+slab/slub/slob specific variant now called __kmem_obj_info().
+
+For completeness, kmem_dump_obj() now displays if the object was
+allocated by KFENCE.
+
+Link: https://lore.kernel.org/all/20220323090520.GG16885@xsang-OptiPlex-9020/
+Link: https://lkml.kernel.org/r/20220406131558.3558585-1-elver@google.com
+Fixes: b89fb5ef0ce6 ("mm, kfence: insert KFENCE hooks for SLUB")
+Fixes: d3fb45f370d9 ("mm, kfence: insert KFENCE hooks for SLAB")
+Signed-off-by: Marco Elver <elver@google.com>
+Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz> [slab]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/kfence.h | 24 ++++++++++++++++++++++++
+ mm/kfence/core.c | 21 ---------------------
+ mm/kfence/kfence.h | 21 +++++++++++++++++++++
+ mm/kfence/report.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ mm/slab.c | 2 +-
+ mm/slab.h | 2 +-
+ mm/slab_common.c | 9 +++++++++
+ mm/slob.c | 2 +-
+ mm/slub.c | 2 +-
+ 9 files changed, 105 insertions(+), 25 deletions(-)
+
+--- a/include/linux/kfence.h
++++ b/include/linux/kfence.h
+@@ -202,6 +202,22 @@ static __always_inline __must_check bool
+ */
+ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++/**
++ * __kfence_obj_info() - fill kmem_obj_info struct
++ * @kpp: kmem_obj_info to be filled
++ * @object: the object
++ *
++ * Return:
++ * * false - not a KFENCE object
++ * * true - a KFENCE object, filled @kpp
++ *
++ * Copies information to @kpp for KFENCE objects.
++ */
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
++#endif
++
+ #else /* CONFIG_KFENCE */
+
+ static inline bool is_kfence_address(const void *addr) { return false; }
+@@ -219,6 +235,14 @@ static inline bool __must_check kfence_h
+ return false;
+ }
+
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++{
++ return false;
++}
++#endif
++
+ #endif
+
+ #endif /* _LINUX_KFENCE_H */
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -221,27 +221,6 @@ static bool kfence_unprotect(unsigned lo
+ return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
+ }
+
+-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+-{
+- long index;
+-
+- /* The checks do not affect performance; only called from slow-paths. */
+-
+- if (!is_kfence_address((void *)addr))
+- return NULL;
+-
+- /*
+- * May be an invalid index if called with an address at the edge of
+- * __kfence_pool, in which case we would report an "invalid access"
+- * error.
+- */
+- index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+- if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+- return NULL;
+-
+- return &kfence_metadata[index];
+-}
+-
+ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
+ {
+ unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
+--- a/mm/kfence/kfence.h
++++ b/mm/kfence/kfence.h
+@@ -93,6 +93,27 @@ struct kfence_metadata {
+
+ extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+
++static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
++{
++ long index;
++
++ /* The checks do not affect performance; only called from slow-paths. */
++
++ if (!is_kfence_address((void *)addr))
++ return NULL;
++
++ /*
++ * May be an invalid index if called with an address at the edge of
++ * __kfence_pool, in which case we would report an "invalid access"
++ * error.
++ */
++ index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
++ if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
++ return NULL;
++
++ return &kfence_metadata[index];
++}
++
+ /* KFENCE error types for report generation. */
+ enum kfence_error_type {
+ KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
+--- a/mm/kfence/report.c
++++ b/mm/kfence/report.c
+@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long a
+ /* We encountered a memory safety error, taint the kernel! */
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
+ }
++
++#ifdef CONFIG_PRINTK
++static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
++{
++ int i, j;
++
++ i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
++ for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
++ kp_stack[j] = (void *)track->stack_entries[i];
++ if (j < KS_ADDRS_COUNT)
++ kp_stack[j] = NULL;
++}
++
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++{
++ struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
++ unsigned long flags;
++
++ if (!meta)
++ return false;
++
++ /*
++ * If state is UNUSED at least show the pointer requested; the rest
++ * would be garbage data.
++ */
++ kpp->kp_ptr = object;
++
++ /* Requesting info an a never-used object is almost certainly a bug. */
++ if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
++ return true;
++
++ raw_spin_lock_irqsave(&meta->lock, flags);
++
++ kpp->kp_page = page;
++ kpp->kp_slab_cache = meta->cache;
++ kpp->kp_objp = (void *)meta->addr;
++ kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
++ if (meta->state == KFENCE_OBJECT_FREED)
++ kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
++ /* get_stack_skipnr() ensures the first entry is outside allocator. */
++ kpp->kp_ret = kpp->kp_stack[0];
++
++ raw_spin_unlock_irqrestore(&meta->lock, flags);
++
++ return true;
++}
++#endif
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3658,7 +3658,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_calle
+ #endif /* CONFIG_NUMA */
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+ {
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -643,7 +643,7 @@ struct kmem_obj_info {
+ void *kp_stack[KS_ADDRS_COUNT];
+ void *kp_free_stack[KS_ADDRS_COUNT];
+ };
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
+ #endif
+
+ #endif /* MM_SLAB_H */
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -568,6 +568,13 @@ bool kmem_valid_obj(void *object)
+ }
+ EXPORT_SYMBOL_GPL(kmem_valid_obj);
+
++static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++{
++ if (__kfence_obj_info(kpp, object, page))
++ return;
++ __kmem_obj_info(kpp, object, page);
++}
++
+ /**
+ * kmem_dump_obj - Print available slab provenance information
+ * @object: slab object for which to find provenance information.
+@@ -603,6 +610,8 @@ void kmem_dump_obj(void *object)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+ else
+ pr_cont(" slab%s", cp);
++ if (is_kfence_address(object))
++ pr_cont(" (kfence)");
+ if (kp.kp_objp)
+ pr_cont(" start %px", kp.kp_objp);
+ if (kp.kp_data_offset)
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -462,7 +462,7 @@ out:
+ }
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+ {
+ kpp->kp_ptr = object;
+ kpp->kp_page = page;
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4299,7 +4299,7 @@ int __kmem_cache_shutdown(struct kmem_ca
+ }
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
+ {
+ void *base;
+ int __maybe_unused i;
--- /dev/null
+From a668cc07f990d2ed19424d5c1a529521a9d1cee1 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Wed, 13 Apr 2022 14:42:32 +0300
+Subject: perf tools: Fix segfault accessing sample_id xyarray
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit a668cc07f990d2ed19424d5c1a529521a9d1cee1 upstream.
+
+perf_evsel::sample_id is an xyarray which can cause a segfault when
+accessed beyond its size. e.g.
+
+ # perf record -e intel_pt// -C 1 sleep 1
+ Segmentation fault (core dumped)
+ #
+
+That is happening because a dummy event is opened to capture text poke
+events accross all CPUs, however the mmap logic is allocating according
+to the number of user_requested_cpus.
+
+In general, perf sometimes uses the evsel cpus to open events, and
+sometimes the evlist user_requested_cpus. However, it is not necessary
+to determine which case is which because the opened event file
+descriptors are also in an xyarray, the size of whch can be used
+to correctly allocate the size of the sample_id xyarray, because there
+is one ID per file descriptor.
+
+Note, in the affected code path, perf_evsel fd array is subsequently
+used to get the file descriptor for the mmap, so it makes sense for the
+xyarrays to be the same size there.
+
+Fixes: d1a177595b3a824c ("libperf: Adopt perf_evlist__mmap()/munmap() from tools/perf")
+Fixes: 246eba8e9041c477 ("perf tools: Add support for PERF_RECORD_TEXT_POKE")
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: stable@vger.kernel.org # 5.5+
+Link: https://lore.kernel.org/r/20220413114232.26914-1-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/lib/perf/evlist.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_ev
+ {
+ struct perf_evsel *evsel;
+ const struct perf_cpu_map *cpus = evlist->cpus;
+- const struct perf_thread_map *threads = evlist->threads;
+
+ if (!ops || !ops->get || !ops->mmap)
+ return -EINVAL;
+@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_ev
+ perf_evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
++ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
+ return -ENOMEM;
+ }
+