--- /dev/null
+From 932196dcaf15a72980b35bc4f29a9504f10f4212 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Apr 2022 18:25:12 +0200
+Subject: drm/panel/raspberrypi-touchscreen: Avoid NULL deref if not
+ initialised
+
+From: Dave Stevenson <dave.stevenson@raspberrypi.com>
+
+[ Upstream commit f92055ae0acb035891e988ce345d6b81a0316423 ]
+
+If a call to rpi_touchscreen_i2c_write from rpi_touchscreen_probe
+fails before mipi_dsi_device_register_full is called, then
+in trying to log the error message if uses ts->dsi->dev when
+it is still NULL.
+
+Use ts->i2c->dev instead, which is initialised earlier in probe.
+
+Fixes: 2f733d6194bd ("drm/panel: Add support for the Raspberry Pi 7" Touchscreen.")
+Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220415162513.42190-2-stefan.wahren@i2se.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index bbdd086be7f5..90487df62480 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
+
+ ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
+ if (ret)
+- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
++ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
+ }
+
+ static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
+--
+2.35.1
+
--- /dev/null
+From ecb4d3735937509623b12cca0c7ee3e86f5f66c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Apr 2022 18:25:13 +0200
+Subject: drm/panel/raspberrypi-touchscreen: Initialise the bridge in prepare
+
+From: Dave Stevenson <dave.stevenson@raspberrypi.com>
+
+[ Upstream commit 5f18c0782b99e26121efa93d20b76c19e17aa1dd ]
+
+The panel has a prepare call which is before video starts, and an
+enable call which is after.
+The Toshiba bridge should be configured before video, so move
+the relevant power and initialisation calls to prepare.
+
+Fixes: 2f733d6194bd ("drm/panel: Add support for the Raspberry Pi 7" Touchscreen.")
+Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220415162513.42190-3-stefan.wahren@i2se.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index 90487df62480..4b92c6341490 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
+ return 0;
+ }
+
+-static int rpi_touchscreen_enable(struct drm_panel *panel)
++static int rpi_touchscreen_prepare(struct drm_panel *panel)
+ {
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+ int i;
+@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
+ rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
+ msleep(100);
+
++ return 0;
++}
++
++static int rpi_touchscreen_enable(struct drm_panel *panel)
++{
++ struct rpi_touchscreen *ts = panel_to_ts(panel);
++
+ /* Turn on the backlight. */
+ rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
+
+@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
+ static const struct drm_panel_funcs rpi_touchscreen_funcs = {
+ .disable = rpi_touchscreen_disable,
+ .unprepare = rpi_touchscreen_noop,
+- .prepare = rpi_touchscreen_noop,
++ .prepare = rpi_touchscreen_prepare,
+ .enable = rpi_touchscreen_enable,
+ .get_modes = rpi_touchscreen_get_modes,
+ };
+--
+2.35.1
+
--- /dev/null
+From dd3235df9cfe9a7a98d18a6f8b1fd79256cb89e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 21:50:07 +0800
+Subject: drm/vc4: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync()
+ usage
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 3d0b93d92a2790337aa9d18cb332d02356a24126 ]
+
+If the device is already in a runtime PM enabled state
+pm_runtime_get_sync() will return 1.
+
+Also, we need to call pm_runtime_put_noidle() when pm_runtime_get_sync()
+fails, so use pm_runtime_resume_and_get() instead. this function
+will handle this.
+
+Fixes: 4078f5757144 ("drm/vc4: Add DSI driver")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220420135008.2757-1-linmq006@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vc4/vc4_dsi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
+index eaf276978ee7..ad84b56f4091 100644
+--- a/drivers/gpu/drm/vc4/vc4_dsi.c
++++ b/drivers/gpu/drm/vc4/vc4_dsi.c
+@@ -835,7 +835,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+ unsigned long phy_clock;
+ int ret;
+
+- ret = pm_runtime_get_sync(dev);
++ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
+ return;
+--
+2.35.1
+
--- /dev/null
+From 210bdb099010e34fd3417a88737f233ac1293147 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 15:08:40 +1000
+Subject: KVM: PPC: Fix TCE handling for VFIO
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+[ Upstream commit 26a62b750a4e6364b0393562f66759b1494c3a01 ]
+
+The LoPAPR spec defines a guest visible IOMMU with a variable page size.
+Currently QEMU advertises 4K, 64K, 2M, 16MB pages, a Linux VM picks
+the biggest (16MB). In the case of a passed though PCI device, there is
+a hardware IOMMU which does not support all pages sizes from the above -
+P8 cannot do 2MB and P9 cannot do 16MB. So for each emulated
+16M IOMMU page we may create several smaller mappings ("TCEs") in
+the hardware IOMMU.
+
+The code wrongly uses the emulated TCE index instead of hardware TCE
+index in error handling. The problem is easier to see on POWER8 with
+multi-level TCE tables (when only the first level is preallocated)
+as hash mode uses real mode TCE hypercalls handlers.
+The kernel starts using indirect tables when VMs get bigger than 128GB
+(depends on the max page order).
+The very first real mode hcall is going to fail with H_TOO_HARD as
+in the real mode we cannot allocate memory for TCEs (we can in the virtual
+mode) but on the way out the code attempts to clear hardware TCEs using
+emulated TCE indexes which corrupts random kernel memory because
+it_offset==1<<59 is subtracted from those indexes and the resulting index
+is out of the TCE table bounds.
+
+This fixes kvmppc_clear_tce() to use the correct TCE indexes.
+
+While at it, this fixes TCE cache invalidation which uses emulated TCE
+indexes instead of the hardware ones. This went unnoticed as 64bit DMA
+is used these days and VMs map all RAM in one go and only then do DMA
+and this is when the TCE cache gets populated.
+
+Potentially this could slow down mapping, however normally 16MB
+emulated pages are backed by 64K hardware pages so it is one write to
+the "TCE Kill" per 256 updates which is not that bad considering the size
+of the cache (1024 TCEs or so).
+
+Fixes: ca1fc489cfa0 ("KVM: PPC: Book3S: Allow backing bigger guest IOMMU pages with smaller physical pages")
+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Tested-by: David Gibson <david@gibson.dropbear.id.au>
+Reviewed-by: Frederic Barrat <fbarrat@linux.ibm.com>
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220420050840.328223-1-aik@ozlabs.ru
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kvm/book3s_64_vio.c | 45 +++++++++++++++--------------
+ arch/powerpc/kvm/book3s_64_vio_hv.c | 44 ++++++++++++++--------------
+ 2 files changed, 45 insertions(+), 44 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 8da93fdfa59e..c640053ab03f 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -421,13 +421,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+ tbl[idx % TCES_PER_PAGE] = tce;
+ }
+
+-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
+- unsigned long entry)
++static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
++ struct iommu_table *tbl, unsigned long entry)
+ {
+- unsigned long hpa = 0;
+- enum dma_data_direction dir = DMA_NONE;
++ unsigned long i;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
++
++ for (i = 0; i < subpages; ++i) {
++ unsigned long hpa = 0;
++ enum dma_data_direction dir = DMA_NONE;
+
+- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
++ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
++ }
+ }
+
+ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
+@@ -486,6 +492,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+ break;
+ }
+
++ iommu_tce_kill(tbl, io_entry, subpages);
++
+ return ret;
+ }
+
+@@ -545,6 +553,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
+ break;
+ }
+
++ iommu_tce_kill(tbl, io_entry, subpages);
++
+ return ret;
+ }
+
+@@ -591,10 +601,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
+ entry, ua, dir);
+
+- iommu_tce_kill(stit->tbl, entry, 1);
+
+ if (ret != H_SUCCESS) {
+- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
+ goto unlock_exit;
+ }
+ }
+@@ -670,13 +679,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ */
+ if (get_user(tce, tces + i)) {
+ ret = H_TOO_HARD;
+- goto invalidate_exit;
++ goto unlock_exit;
+ }
+ tce = be64_to_cpu(tce);
+
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
+ ret = H_PARAMETER;
+- goto invalidate_exit;
++ goto unlock_exit;
+ }
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+@@ -685,19 +694,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ iommu_tce_direction(tce));
+
+ if (ret != H_SUCCESS) {
+- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
+- entry);
+- goto invalidate_exit;
++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
++ entry + i);
++ goto unlock_exit;
+ }
+ }
+
+ kvmppc_tce_put(stt, entry + i, tce);
+ }
+
+-invalidate_exit:
+- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+- iommu_tce_kill(stit->tbl, entry, npages);
+-
+ unlock_exit:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+@@ -736,20 +741,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ continue;
+
+ if (ret == H_TOO_HARD)
+- goto invalidate_exit;
++ return ret;
+
+ WARN_ON_ONCE(1);
+- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
++ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
+ }
+ }
+
+ for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+ kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+
+-invalidate_exit:
+- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
+-
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index e5ba96c41f3f..57af53a6a2d8 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
+ tbl->it_ops->tce_kill(tbl, entry, pages, true);
+ }
+
+-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+- unsigned long entry)
++static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
++ struct iommu_table *tbl, unsigned long entry)
+ {
+- unsigned long hpa = 0;
+- enum dma_data_direction dir = DMA_NONE;
++ unsigned long i;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
++
++ for (i = 0; i < subpages; ++i) {
++ unsigned long hpa = 0;
++ enum dma_data_direction dir = DMA_NONE;
+
+- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
++ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
++ }
+ }
+
+ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ break;
+ }
+
++ iommu_tce_kill_rm(tbl, io_entry, subpages);
++
+ return ret;
+ }
+
+@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
+ break;
+ }
+
++ iommu_tce_kill_rm(tbl, io_entry, subpages);
++
+ return ret;
+ }
+
+@@ -424,10 +434,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
+ stit->tbl, entry, ua, dir);
+
+- iommu_tce_kill_rm(stit->tbl, entry, 1);
+-
+ if (ret != H_SUCCESS) {
+- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
+ return ret;
+ }
+ }
+@@ -569,7 +577,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ ua = 0;
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
+ ret = H_PARAMETER;
+- goto invalidate_exit;
++ goto unlock_exit;
+ }
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+@@ -578,19 +586,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ iommu_tce_direction(tce));
+
+ if (ret != H_SUCCESS) {
+- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
+- entry);
+- goto invalidate_exit;
++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
++ entry + i);
++ goto unlock_exit;
+ }
+ }
+
+ kvmppc_rm_tce_put(stt, entry + i, tce);
+ }
+
+-invalidate_exit:
+- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+- iommu_tce_kill_rm(stit->tbl, entry, npages);
+-
+ unlock_exit:
+ if (!prereg)
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+@@ -632,20 +636,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ continue;
+
+ if (ret == H_TOO_HARD)
+- goto invalidate_exit;
++ return ret;
+
+ WARN_ON_ONCE_RM(1);
+- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
++ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
+ }
+ }
+
+ for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+ kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
+
+-invalidate_exit:
+- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
+- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
+-
+ return ret;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 4a797aa231b09ecf8d6c84fcb2630d305643c63d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Feb 2022 09:54:17 -0500
+Subject: perf/core: Fix perf_mmap fail when CONFIG_PERF_USE_VMALLOC enabled
+
+From: Zhipeng Xie <xiezhipeng1@huawei.com>
+
+[ Upstream commit 60490e7966659b26d74bf1fa4aa8693d9a94ca88 ]
+
+This problem can be reproduced with CONFIG_PERF_USE_VMALLOC enabled on
+both x86_64 and aarch64 arch when using sysdig -B(using ebpf)[1].
+sysdig -B works fine after rebuilding the kernel with
+CONFIG_PERF_USE_VMALLOC disabled.
+
+I tracked it down to the if condition event->rb->nr_pages != nr_pages
+in perf_mmap is true when CONFIG_PERF_USE_VMALLOC is enabled where
+event->rb->nr_pages = 1 and nr_pages = 2048 resulting perf_mmap to
+return -EINVAL. This is because when CONFIG_PERF_USE_VMALLOC is
+enabled, rb->nr_pages is always equal to 1.
+
+Arch with CONFIG_PERF_USE_VMALLOC enabled by default:
+ arc/arm/csky/mips/sh/sparc/xtensa
+
+Arch with CONFIG_PERF_USE_VMALLOC disabled by default:
+ x86_64/aarch64/...
+
+Fix this problem by using data_page_nr()
+
+[1] https://github.com/draios/sysdig
+
+Fixes: 906010b2134e ("perf_event: Provide vmalloc() based mmap() backing")
+Signed-off-by: Zhipeng Xie <xiezhipeng1@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20220209145417.6495-1-xiezhipeng1@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/events/core.c | 2 +-
+ kernel/events/internal.h | 5 +++++
+ kernel/events/ring_buffer.c | 5 -----
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 79d8b27cf2fc..9aa6563587d8 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6221,7 +6221,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ again:
+ mutex_lock(&event->mmap_mutex);
+ if (event->rb) {
+- if (event->rb->nr_pages != nr_pages) {
++ if (data_page_nr(event->rb) != nr_pages) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 228801e20788..aa23ffdaf819 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
+ }
+ #endif
+
++static inline int data_page_nr(struct perf_buffer *rb)
++{
++ return rb->nr_pages << page_order(rb);
++}
++
+ static inline unsigned long perf_data_size(struct perf_buffer *rb)
+ {
+ return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index ef91ae75ca56..4032cd475000 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -856,11 +856,6 @@ void rb_free(struct perf_buffer *rb)
+ }
+
+ #else
+-static int data_page_nr(struct perf_buffer *rb)
+-{
+- return rb->nr_pages << page_order(rb);
+-}
+-
+ static struct page *
+ __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
+ {
+--
+2.35.1
+
--- /dev/null
+From 04d83662756415e43006e17d0e5844e03aef1803 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Apr 2022 20:32:01 +0800
+Subject: perf report: Set PERF_SAMPLE_DATA_SRC bit for Arm SPE event
+
+From: Leo Yan <leo.yan@linaro.org>
+
+[ Upstream commit ccb17caecfbd542f49a2a79ae088136ba8bfb794 ]
+
+Since commit bb30acae4c4dacfa ("perf report: Bail out --mem-mode if mem
+info is not available") "perf mem report" and "perf report --mem-mode"
+don't report result if the PERF_SAMPLE_DATA_SRC bit is missed in sample
+type.
+
+The commit ffab487052054162 ("perf: arm-spe: Fix perf report
+--mem-mode") partially fixes the issue. It adds PERF_SAMPLE_DATA_SRC
+bit for Arm SPE event, this allows the perf data file generated by
+kernel v5.18-rc1 or later version can be reported properly.
+
+On the other hand, perf tool still fails to be backward compatibility
+for a data file recorded by an older version's perf which contains Arm
+SPE trace data. This patch is a workaround in reporting phase, when
+detects ARM SPE PMU event and without PERF_SAMPLE_DATA_SRC bit, it will
+force to set the bit in the sample type and give a warning info.
+
+Fixes: bb30acae4c4dacfa ("perf report: Bail out --mem-mode if mem info is not available")
+Reviewed-by: James Clark <james.clark@arm.com>
+Signed-off-by: Leo Yan <leo.yan@linaro.org>
+Tested-by: German Gomez <german.gomez@arm.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220414123201.842754-1-leo.yan@linaro.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-report.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
+index 91cab5cdfbc1..b55ee073c2f7 100644
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -340,6 +340,7 @@ static int report__setup_sample_type(struct report *rep)
+ struct perf_session *session = rep->session;
+ u64 sample_type = evlist__combined_sample_type(session->evlist);
+ bool is_pipe = perf_data__is_pipe(session->data);
++ struct evsel *evsel;
+
+ if (session->itrace_synth_opts->callchain ||
+ session->itrace_synth_opts->add_callchain ||
+@@ -394,6 +395,19 @@ static int report__setup_sample_type(struct report *rep)
+ }
+
+ if (sort__mode == SORT_MODE__MEMORY) {
++ /*
++ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
++ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
++ * compatibility, set the bit if it's an old perf data file.
++ */
++ evlist__for_each_entry(session->evlist, evsel) {
++ if (strstr(evsel->name, "arm_spe") &&
++ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
++ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
++ sample_type |= PERF_SAMPLE_DATA_SRC;
++ }
++ }
++
+ if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ ui__error("Selected --mem-mode but no mem data. "
+ "Did you call perf record without -d?\n");
+--
+2.35.1
+
--- /dev/null
+From 8c18f931051077e92a6606606f263c88aa17fa07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Apr 2022 17:18:27 +0530
+Subject: powerpc/perf: Fix power9 event alternatives
+
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+
+[ Upstream commit 0dcad700bb2776e3886fe0a645a4bf13b1e747cd ]
+
+When scheduling a group of events, there are constraint checks done to
+make sure all events can go in a group. Example, one of the criteria is
+that events in a group cannot use the same PMC. But platform specific
+PMU supports alternative event for some of the event codes. During
+perf_event_open(), if any event group doesn't match constraint check
+criteria, further lookup is done to find alternative event.
+
+By current design, the array of alternatives events in PMU code is
+expected to be sorted by column 0. This is because in
+find_alternative() the return criteria is based on event code
+comparison. ie. "event < ev_alt[i][0])". This optimisation is there
+since find_alternative() can be called multiple times. In power9 PMU
+code, the alternative event array is not sorted properly and hence there
+is breakage in finding alternative events.
+
+To work with existing logic, fix the alternative event array to be
+sorted by column 0 for power9-pmu.c
+
+Results:
+
+With alternative events, multiplexing can be avoided. That is, for
+example, in power9 PM_LD_MISS_L1 (0x3e054) has alternative event,
+PM_LD_MISS_L1_ALT (0x400f0). This is an identical event which can be
+programmed in a different PMC.
+
+Before:
+
+ # perf stat -e r3e054,r300fc
+
+ Performance counter stats for 'system wide':
+
+ 1057860 r3e054 (50.21%)
+ 379 r300fc (49.79%)
+
+ 0.944329741 seconds time elapsed
+
+Since both the events are using PMC3 in this case, they are
+multiplexed here.
+
+After:
+
+ # perf stat -e r3e054,r300fc
+
+ Performance counter stats for 'system wide':
+
+ 1006948 r3e054
+ 182 r300fc
+
+Fixes: 91e0bd1e6251 ("powerpc/perf: Add PM_LD_MISS_L1 and PM_BR_2PATH to power9 event list")
+Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+Reviewed-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220419114828.89843-1-atrajeev@linux.vnet.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/perf/power9-pmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
+index 2a57e93a79dc..7245355bee28 100644
+--- a/arch/powerpc/perf/power9-pmu.c
++++ b/arch/powerpc/perf/power9-pmu.c
+@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
+
+ /* Table of alternatives, sorted by column 0 */
+ static const unsigned int power9_event_alternatives[][MAX_ALT] = {
+- { PM_INST_DISP, PM_INST_DISP_ALT },
+- { PM_RUN_CYC_ALT, PM_RUN_CYC },
+- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
+- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
+ { PM_BR_2PATH, PM_BR_2PATH_ALT },
++ { PM_INST_DISP, PM_INST_DISP_ALT },
++ { PM_RUN_CYC_ALT, PM_RUN_CYC },
++ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
++ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
+ };
+
+ static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+--
+2.35.1
+
--- /dev/null
+From c19d56fb938b1c461653f3801f301df2befecdb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Apr 2022 17:02:20 +0800
+Subject: sched/pelt: Fix attach_entity_load_avg() corner case
+
+From: kuyo chang <kuyo.chang@mediatek.com>
+
+[ Upstream commit 40f5aa4c5eaebfeaca4566217cb9c468e28ed682 ]
+
+The warning in cfs_rq_is_decayed() triggered:
+
+ SCHED_WARN_ON(cfs_rq->avg.load_avg ||
+ cfs_rq->avg.util_avg ||
+ cfs_rq->avg.runnable_avg)
+
+There exists a corner case in attach_entity_load_avg() which will
+cause load_sum to be zero while load_avg will not be.
+
+Consider se_weight is 88761 as per the sched_prio_to_weight[] table.
+Further assume the get_pelt_divider() is 47742, this gives:
+se->avg.load_avg is 1.
+
+However, calculating load_sum:
+
+ se->avg.load_sum = div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
+ se->avg.load_sum = 1*47742/88761 = 0.
+
+Then enqueue_load_avg() adds this to the cfs_rq totals:
+
+ cfs_rq->avg.load_avg += se->avg.load_avg;
+ cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+
+Resulting in load_avg being 1 with load_sum is 0, which will trigger
+the WARN.
+
+Fixes: f207934fb79d ("sched/fair: Align PELT windows between cfs_rq and its se")
+Signed-off-by: kuyo chang <kuyo.chang@mediatek.com>
+[peterz: massage changelog]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Link: https://lkml.kernel.org/r/20220414090229.342-1-kuyo.chang@mediatek.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index acd9833b8ec2..1a306ef51bbe 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3748,11 +3748,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+
+ se->avg.runnable_sum = se->avg.runnable_avg * divider;
+
+- se->avg.load_sum = divider;
+- if (se_weight(se)) {
+- se->avg.load_sum =
+- div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
+- }
++ se->avg.load_sum = se->avg.load_avg * divider;
++ if (se_weight(se) < se->avg.load_sum)
++ se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
++ else
++ se->avg.load_sum = 1;
+
+ enqueue_load_avg(cfs_rq, se);
+ cfs_rq->avg.util_avg += se->avg.util_avg;
+--
+2.35.1
+
stat-fix-inconsistency-between-struct-stat-and-struc.patch
nvme-add-a-quirk-to-disable-namespace-identifiers.patch
nvme-pci-disable-namespace-identifiers-for-qemu-cont.patch
+sched-pelt-fix-attach_entity_load_avg-corner-case.patch
+perf-core-fix-perf_mmap-fail-when-config_perf_use_vm.patch
+drm-panel-raspberrypi-touchscreen-avoid-null-deref-i.patch
+drm-panel-raspberrypi-touchscreen-initialise-the-bri.patch
+kvm-ppc-fix-tce-handling-for-vfio.patch
+drm-vc4-use-pm_runtime_resume_and_get-to-fix-pm_runt.patch
+powerpc-perf-fix-power9-event-alternatives.patch
+perf-report-set-perf_sample_data_src-bit-for-arm-spe.patch