--- /dev/null
+From 3975e72b164dc8347a28dd0d5f11b346af534635 Mon Sep 17 00:00:00 2001
+From: Christopher Obbard <chris.obbard@collabora.com>
+Date: Fri, 13 Oct 2023 12:47:26 +0100
+Subject: arm64: dts: rockchip: Add i2s0-2ch-bus-bclk-off pins to RK3399
+
+From: Christopher Obbard <chris.obbard@collabora.com>
+
+commit 3975e72b164dc8347a28dd0d5f11b346af534635 upstream.
+
+Commit 0efaf8078393 ("arm64: dts: rockchip: add i2s0-2ch-bus pins on
+rk3399") introduced a pinctl for i2s0 in two-channel mode. Commit
+91419ae0420f ("arm64: dts: rockchip: use BCLK to GPIO switch on rk3399")
+modified i2s0 to switch the corresponding pins off when idle.
+
+Although an idle pinctrl node was added for i2s0 in 8-channel mode, a
+similar idle pinctrl node for i2s0 in 2-channel mode was not added. Add
+it.
+
+Fixes: 91419ae0420f ("arm64: dts: rockchip: use BCLK to GPIO switch on rk3399")
+Signed-off-by: Christopher Obbard <chris.obbard@collabora.com>
+Link: https://lore.kernel.org/r/20231013114737.494410-2-chris.obbard@collabora.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399.dtsi | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -2396,6 +2396,16 @@
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
++ i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
++ rockchip,pins =
++ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++ <3 RK_PD1 1 &pcfg_pull_none>,
++ <3 RK_PD2 1 &pcfg_pull_none>,
++ <3 RK_PD3 1 &pcfg_pull_none>,
++ <3 RK_PD7 1 &pcfg_pull_none>,
++ <4 RK_PA0 1 &pcfg_pull_none>;
++ };
++
+ i2s0_8ch_bus: i2s0-8ch-bus {
+ rockchip,pins =
+ <3 RK_PD0 1 &pcfg_pull_none>,
--- /dev/null
+From 8cd79b729e746cb167f1563d015a93fc0a079899 Mon Sep 17 00:00:00 2001
+From: Christopher Obbard <chris.obbard@collabora.com>
+Date: Fri, 13 Oct 2023 12:47:27 +0100
+Subject: arm64: dts: rockchip: Fix i2s0 pin conflict on ROCK Pi 4 boards
+
+From: Christopher Obbard <chris.obbard@collabora.com>
+
+commit 8cd79b729e746cb167f1563d015a93fc0a079899 upstream.
+
+Commit 91419ae0420f ("arm64: dts: rockchip: use BCLK to GPIO switch on
+rk3399") modified i2s0 to switch the corresponding pins off when idle.
+For the ROCK Pi 4 boards, this means that i2s0 has the following pinctrl
+setting:
+
+ pinctrl-names = "bclk_on", "bclk_off";
+ pinctrl-0 = <&i2s0_2ch_bus>;
+ pinctrl-1 = <&i2s0_8ch_bus_bclk_off>;
+
+Due to this change, i2s0 fails to probe on my Radxa ROCK 4SE and ROCK Pi
+4B boards:
+
+ rockchip-pinctrl pinctrl: pin gpio3-29 already requested by leds; cannot claim for ff880000.i2s
+ rockchip-pinctrl pinctrl: pin-125 (ff880000.i2s) status -22
+ rockchip-pinctrl pinctrl: could not request pin 125 (gpio3-29) from group i2s0-8ch-bus-bclk-off on device rockchip-pinctrl
+ rockchip-i2s ff880000.i2s: Error applying setting, reverse things back
+ rockchip-i2s ff880000.i2s: bclk disable failed -22
+
+A pin requested for i2s0_8ch_bus_bclk_off has already been requested by
+user_led2, so whichever driver probes first will have the pin allocated.
+
+The hardware uses 2-channel i2s so fix this error by setting pinctl-1 to
+i2s0_2ch_bus_bclk_off which doesn't contain the pin allocated to user_led2.
+
+I checked the schematics for all Radxa boards based on ROCK Pi 4 and this
+change is compatible with all boards.
+
+Fixes: 91419ae0420f ("arm64: dts: rockchip: use BCLK to GPIO switch on rk3399")
+Signed-off-by: Christopher Obbard <chris.obbard@collabora.com>
+Link: https://lore.kernel.org/r/20231013114737.494410-3-chris.obbard@collabora.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -493,6 +493,7 @@
+
+ &i2s0 {
+ pinctrl-0 = <&i2s0_2ch_bus>;
++ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ rockchip,capture-channels = <2>;
+ rockchip,playback-channels = <2>;
+ status = "okay";
--- /dev/null
+From 64ffd2f1d00c6235dabe9704bbb0d9ce3e28147f Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Fri, 20 Oct 2023 10:26:29 -0500
+Subject: drm/amd: Disable ASPM for VI w/ all Intel systems
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 64ffd2f1d00c6235dabe9704bbb0d9ce3e28147f upstream.
+
+Originally we were quirking ASPM disabled specifically for VI when
+used with Alder Lake, but it appears to have problems with Rocket
+Lake as well.
+
+Like we've done in the case of dpm for newer platforms, disable
+ASPM for all Intel systems.
+
+Cc: stable@vger.kernel.org # 5.15+
+Fixes: 0064b0ce85bb ("drm/amd/pm: enable ASPM by default")
+Reported-and-tested-by: Paolo Gentili <paolo.gentili@canonical.com>
+Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2036742
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/vi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -1147,7 +1147,7 @@ static void vi_program_aspm(struct amdgp
+ bool bL1SS = false;
+ bool bClkReqSupport = true;
+
+- if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
++ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
+ return;
+
+ if (adev->flags & AMD_IS_APU ||
--- /dev/null
+From 3d887d512494d678b17c57b835c32f4e48d34f26 Mon Sep 17 00:00:00 2001
+From: Lukasz Majczak <lma@semihalf.com>
+Date: Fri, 22 Sep 2023 08:34:10 +0200
+Subject: drm/dp_mst: Fix NULL deref in get_mst_branch_device_by_guid_helper()
+
+From: Lukasz Majczak <lma@semihalf.com>
+
+commit 3d887d512494d678b17c57b835c32f4e48d34f26 upstream.
+
+As drm_dp_get_mst_branch_device_by_guid() is called from
+drm_dp_get_mst_branch_device_by_guid(), mstb parameter has to be checked,
+otherwise NULL dereference may occur in the call to
+the memcpy() and cause following:
+
+[12579.365869] BUG: kernel NULL pointer dereference, address: 0000000000000049
+[12579.365878] #PF: supervisor read access in kernel mode
+[12579.365880] #PF: error_code(0x0000) - not-present page
+[12579.365882] PGD 0 P4D 0
+[12579.365887] Oops: 0000 [#1] PREEMPT SMP NOPTI
+...
+[12579.365895] Workqueue: events_long drm_dp_mst_up_req_work
+[12579.365899] RIP: 0010:memcmp+0xb/0x29
+[12579.365921] Call Trace:
+[12579.365927] get_mst_branch_device_by_guid_helper+0x22/0x64
+[12579.365930] drm_dp_mst_up_req_work+0x137/0x416
+[12579.365933] process_one_work+0x1d0/0x419
+[12579.365935] worker_thread+0x11a/0x289
+[12579.365938] kthread+0x13e/0x14f
+[12579.365941] ? process_one_work+0x419/0x419
+[12579.365943] ? kthread_blkcg+0x31/0x31
+[12579.365946] ret_from_fork+0x1f/0x30
+
+As get_mst_branch_device_by_guid_helper() is recursive, moving condition
+to the first line allow to remove a similar one for step over of NULL elements
+inside a loop.
+
+Fixes: 5e93b8208d3c ("drm/dp/mst: move GUID storage from mgr, port to only mst branch")
+Cc: <stable@vger.kernel.org> # 4.14+
+Signed-off-by: Lukasz Majczak <lma@semihalf.com>
+Reviewed-by: Radoslaw Biernacki <rad@chromium.org>
+Signed-off-by: Manasi Navare <navaremanasi@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230922063410.23626-1-lma@semihalf.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst
+ struct drm_dp_mst_branch *found_mstb;
+ struct drm_dp_mst_port *port;
+
++ if (!mstb)
++ return NULL;
++
+ if (memcmp(mstb->guid, guid, 16) == 0)
+ return mstb;
+
+
+ list_for_each_entry(port, &mstb->ports, next) {
+- if (!port->mstb)
+- continue;
+-
+ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+ if (found_mstb)
--- /dev/null
+From 4cbed7702eb775cca22fff6827a549092cb59f61 Mon Sep 17 00:00:00 2001
+From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Date: Fri, 20 Oct 2023 08:24:41 -0700
+Subject: drm/i915/pmu: Check if pmu is closed before stopping event
+
+From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+
+commit 4cbed7702eb775cca22fff6827a549092cb59f61 upstream.
+
+When the driver unbinds, pmu is unregistered and i915->uabi_engines is
+set to RB_ROOT. Due to this, when i915 PMU tries to stop the engine
+events, it issues a warn_on because engine lookup fails.
+
+All perf hooks are taking care of this using a pmu->closed flag that is
+set when PMU unregisters. The stop event seems to have been left out.
+
+Check for pmu->closed in pmu_event_stop as well.
+
+Based on discussion here -
+https://patchwork.freedesktop.org/patch/492079/?series=105790&rev=2
+
+v2: s/is/if/ in commit title
+v3: Add fixes tag and cc stable
+
+Cc: <stable@vger.kernel.org> # v5.11+
+Fixes: b00bccb3f0bb ("drm/i915/pmu: Handle PCI unbind")
+Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231020152441.3764850-1-umesh.nerlige.ramappa@intel.com
+(cherry picked from commit 31f6a06f0c543b43a38fab10f39e5fc45ad62aa2)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_pmu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -760,9 +760,18 @@ static void i915_pmu_event_start(struct
+
+ static void i915_pmu_event_stop(struct perf_event *event, int flags)
+ {
++ struct drm_i915_private *i915 =
++ container_of(event->pmu, typeof(*i915), pmu.base);
++ struct i915_pmu *pmu = &i915->pmu;
++
++ if (pmu->closed)
++ goto out;
++
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+ i915_pmu_disable(event);
++
++out:
+ event->hw.state = PERF_HES_STOPPED;
+ }
+
--- /dev/null
+From 92fe9dcbe4e109a7ce6bab3e452210a35b0ab493 Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@surriel.com>
+Date: Thu, 5 Oct 2023 23:59:06 -0400
+Subject: hugetlbfs: clear resv_map pointer if mmap fails
+
+From: Rik van Riel <riel@surriel.com>
+
+commit 92fe9dcbe4e109a7ce6bab3e452210a35b0ab493 upstream.
+
+Patch series "hugetlbfs: close race between MADV_DONTNEED and page fault", v7.
+
+Malloc libraries, like jemalloc and tcalloc, take decisions on when to
+call madvise independently from the code in the main application.
+
+This sometimes results in the application page faulting on an address,
+right after the malloc library has shot down the backing memory with
+MADV_DONTNEED.
+
+Usually this is harmless, because we always have some 4kB pages sitting
+around to satisfy a page fault. However, with hugetlbfs systems often
+allocate only the exact number of huge pages that the application wants.
+
+Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
+any lock taken on the page fault path, which can open up the following
+race condition:
+
+ CPU 1 CPU 2
+
+ MADV_DONTNEED
+ unmap page
+ shoot down TLB entry
+ page fault
+ fail to allocate a huge page
+ killed with SIGBUS
+ free page
+
+Fix that race by extending the hugetlb_vma_lock locking scheme to also
+cover private hugetlb mappings (with resv_map), and pulling the locking
+from __unmap_hugepage_final_range into helper functions called from
+zap_page_range_single. This ensures page faults stay locked out of the
+MADV_DONTNEED VMA until the huge pages have actually been freed.
+
+
+This patch (of 3):
+
+Hugetlbfs leaves a dangling pointer in the VMA if mmap fails. This has
+not been a problem so far, but other code in this patch series tries to
+follow that pointer.
+
+Link: https://lkml.kernel.org/r/20231006040020.3677377-1-riel@surriel.com
+Link: https://lkml.kernel.org/r/20231006040020.3677377-2-riel@surriel.com
+Fixes: 04ada095dcfc ("hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1138,8 +1138,7 @@ static void set_vma_resv_map(struct vm_a
+ VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+ VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
+
+- set_vma_private_data(vma, (get_vma_private_data(vma) &
+- HPAGE_RESV_MASK) | (unsigned long)map);
++ set_vma_private_data(vma, (unsigned long)map);
+ }
+
+ static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
+@@ -6898,8 +6897,10 @@ out_err:
+ */
+ if (chg >= 0 && add < 0)
+ region_abort(resv_map, from, to, regions_needed);
+- if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
++ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ kref_put(&resv_map->refs, resv_map_release);
++ set_vma_resv_map(vma, NULL);
++ }
+ return false;
+ }
+
--- /dev/null
+From bf4916922c60f43efaa329744b3eef539aa6a2b2 Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@surriel.com>
+Date: Thu, 5 Oct 2023 23:59:07 -0400
+Subject: hugetlbfs: extend hugetlb_vma_lock to private VMAs
+
+From: Rik van Riel <riel@surriel.com>
+
+commit bf4916922c60f43efaa329744b3eef539aa6a2b2 upstream.
+
+Extend the locking scheme used to protect shared hugetlb mappings from
+truncate vs page fault races, in order to protect private hugetlb mappings
+(with resv_map) against MADV_DONTNEED.
+
+Add a read-write semaphore to the resv_map data structure, and use that
+from the hugetlb_vma_(un)lock_* functions, in preparation for closing the
+race between MADV_DONTNEED and page faults.
+
+Link: https://lkml.kernel.org/r/20231006040020.3677377-3-riel@surriel.com
+Fixes: 04ada095dcfc ("hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing")
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hugetlb.h | 6 ++++++
+ mm/hugetlb.c | 41 +++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 43 insertions(+), 4 deletions(-)
+
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -70,6 +70,7 @@ struct resv_map {
+ long adds_in_progress;
+ struct list_head region_cache;
+ long region_cache_count;
++ struct rw_semaphore rw_sema;
+ #ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On private mappings, the counter to uncharge reservations is stored
+@@ -879,6 +880,11 @@ static inline bool hugepage_migration_su
+ return arch_hugetlb_migration_supported(h);
+ }
+
++static inline bool __vma_private_lock(struct vm_area_struct *vma)
++{
++ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
++}
++
+ /*
+ * Movability check is different as compared to migration check.
+ * It determines whether or not a huge page should be placed on
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -96,6 +96,7 @@ static void hugetlb_vma_lock_alloc(struc
+ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
++static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
+
+ static inline bool subpool_is_free(struct hugepage_subpool *spool)
+ {
+@@ -272,6 +273,10 @@ void hugetlb_vma_lock_read(struct vm_are
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ down_read(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ down_read(&resv_map->rw_sema);
+ }
+ }
+
+@@ -281,6 +286,10 @@ void hugetlb_vma_unlock_read(struct vm_a
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ up_read(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ up_read(&resv_map->rw_sema);
+ }
+ }
+
+@@ -290,6 +299,10 @@ void hugetlb_vma_lock_write(struct vm_ar
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ down_write(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ down_write(&resv_map->rw_sema);
+ }
+ }
+
+@@ -299,17 +312,27 @@ void hugetlb_vma_unlock_write(struct vm_
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ up_write(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ up_write(&resv_map->rw_sema);
+ }
+ }
+
+ int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+ {
+- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+- if (!__vma_shareable_lock(vma))
+- return 1;
++ if (__vma_shareable_lock(vma)) {
++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+- return down_write_trylock(&vma_lock->rw_sema);
++ return down_write_trylock(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ return down_write_trylock(&resv_map->rw_sema);
++ }
++
++ return 1;
+ }
+
+ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+@@ -318,6 +341,10 @@ void hugetlb_vma_assert_locked(struct vm
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ lockdep_assert_held(&vma_lock->rw_sema);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ lockdep_assert_held(&resv_map->rw_sema);
+ }
+ }
+
+@@ -350,6 +377,11 @@ static void __hugetlb_vma_unlock_write_f
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ __hugetlb_vma_unlock_write_put(vma_lock);
++ } else if (__vma_private_lock(vma)) {
++ struct resv_map *resv_map = vma_resv_map(vma);
++
++ /* no free for anon vmas, but still need to unlock */
++ up_write(&resv_map->rw_sema);
+ }
+ }
+
+@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
+ kref_init(&resv_map->refs);
+ spin_lock_init(&resv_map->lock);
+ INIT_LIST_HEAD(&resv_map->regions);
++ init_rwsem(&resv_map->rw_sema);
+
+ resv_map->adds_in_progress = 0;
+ /*
--- /dev/null
+From 099d7439ce03d0e7bc8f0c3d7878b562f3a48d3d Mon Sep 17 00:00:00 2001
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Date: Thu, 12 Oct 2023 11:52:33 -0400
+Subject: maple_tree: add GFP_KERNEL to allocations in mas_expected_entries()
+
+From: Liam R. Howlett <Liam.Howlett@oracle.com>
+
+commit 099d7439ce03d0e7bc8f0c3d7878b562f3a48d3d upstream.
+
+Users complained about OOM errors during fork without triggering
+compaction. This can be fixed by modifying the flags used in
+mas_expected_entries() so that the compaction will be triggered in low
+memory situations. Since mas_expected_entries() is only used during fork,
+the extra argument does not need to be passed through.
+
+Additionally, the two test_maple_tree test cases and one benchmark test
+were altered to use the correct locking type so that allocations would not
+trigger sleeping and thus fail. Testing was completed with lockdep atomic
+sleep detection.
+
+The additional locking change requires rwsem support additions to the
+tools/ directory through the use of pthreads pthread_rwlock_t. With this
+change test_maple_tree works in userspace, as a module, and in-kernel.
+
+Users may notice that the system gave up early on attempting to start new
+processes instead of attempting to reclaim memory.
+
+Link: https://lkml.kernel.org/r/20230915093243epcms1p46fa00bbac1ab7b7dca94acb66c44c456@epcms1p4
+Link: https://lkml.kernel.org/r/20231012155233.2272446-1-Liam.Howlett@oracle.com
+Fixes: 54a611b60590 ("Maple Tree: add new data structure")
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Reviewed-by: Peng Zhang <zhangpeng.00@bytedance.com>
+Cc: <jason.sim@samsung.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/maple_tree.c | 2 +-
+ lib/test_maple_tree.c | 35 ++++++++++++++++++++++++-----------
+ tools/include/linux/rwsem.h | 40 ++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 65 insertions(+), 12 deletions(-)
+ create mode 100644 tools/include/linux/rwsem.h
+
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5913,7 +5913,7 @@ int mas_expected_entries(struct ma_state
+ /* Internal nodes */
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ /* Add working room for split (2 nodes) + new parents */
+- mas_node_count(mas, nr_nodes + 3);
++ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+
+ /* Detect if allocations run out */
+ mas->mas_flags |= MA_STATE_PREALLOC;
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/maple_tree.h>
+ #include <linux/module.h>
++#include <linux/rwsem.h>
+
+ #define MTREE_ALLOC_MAX 0x2000000000000Ul
+ #ifndef CONFIG_DEBUG_MAPLE_TREE
+@@ -1678,17 +1679,21 @@ static noinline void __init check_forkin
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
++ struct rw_semaphore newmt_lock;
++
++ init_rwsem(&newmt_lock);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
++ mt_set_external_lock(&newmt, &newmt_lock);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_reset(&mas);
+- mas_lock(&newmas);
++ down_write(&newmt_lock);
+ mas.index = 0;
+ mas.last = 0;
+ if (mas_expected_entries(&newmas, nr_entries)) {
+@@ -1703,10 +1708,10 @@ static noinline void __init check_forkin
+ }
+ rcu_read_unlock();
+ mas_destroy(&newmas);
+- mas_unlock(&newmas);
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+- mtree_destroy(&newmt);
++ __mt_destroy(&newmt);
++ up_write(&newmt_lock);
+ }
+
+ static noinline void __init check_iteration(struct maple_tree *mt)
+@@ -1818,6 +1823,10 @@ static noinline void __init bench_forkin
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
++ struct rw_semaphore newmt_lock;
++
++ init_rwsem(&newmt_lock);
++ mt_set_external_lock(&newmt, &newmt_lock);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+@@ -1832,7 +1841,7 @@ static noinline void __init bench_forkin
+ mas.index = 0;
+ mas.last = 0;
+ rcu_read_lock();
+- mas_lock(&newmas);
++ down_write(&newmt_lock);
+ if (mas_expected_entries(&newmas, nr_entries)) {
+ printk("OOM!");
+ BUG_ON(1);
+@@ -1843,11 +1852,11 @@ static noinline void __init bench_forkin
+ mas_store(&newmas, val);
+ }
+ mas_destroy(&newmas);
+- mas_unlock(&newmas);
+ rcu_read_unlock();
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+- mtree_destroy(&newmt);
++ __mt_destroy(&newmt);
++ up_write(&newmt_lock);
+ }
+ }
+ #endif
+@@ -2453,6 +2462,10 @@ static noinline void __init check_dup_ga
+ void *tmp;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
++ struct rw_semaphore newmt_lock;
++
++ init_rwsem(&newmt_lock);
++ mt_set_external_lock(&newmt, &newmt_lock);
+
+ if (!zero_start)
+ i = 1;
+@@ -2462,9 +2475,9 @@ static noinline void __init check_dup_ga
+ mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ xa_mk_value(i), GFP_KERNEL);
+
+- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
++ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_non_kernel(99999);
+- mas_lock(&newmas);
++ down_write(&newmt_lock);
+ ret = mas_expected_entries(&newmas, nr_entries);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, ret != 0);
+@@ -2477,9 +2490,9 @@ static noinline void __init check_dup_ga
+ }
+ rcu_read_unlock();
+ mas_destroy(&newmas);
+- mas_unlock(&newmas);
+
+- mtree_destroy(&newmt);
++ __mt_destroy(&newmt);
++ up_write(&newmt_lock);
+ }
+
+ /* Duplicate many sizes of trees. Mainly to test expected entry values */
+--- /dev/null
++++ b/tools/include/linux/rwsem.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++#ifndef _TOOLS__RWSEM_H
++#define _TOOLS__RWSEM_H
++
++#include <pthread.h>
++
++struct rw_semaphore {
++ pthread_rwlock_t lock;
++};
++
++static inline int init_rwsem(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_init(&sem->lock, NULL);
++}
++
++static inline int exit_rwsem(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_destroy(&sem->lock);
++}
++
++static inline int down_read(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_rdlock(&sem->lock);
++}
++
++static inline int up_read(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_unlock(&sem->lock);
++}
++
++static inline int down_write(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_wrlock(&sem->lock);
++}
++
++static inline int up_write(struct rw_semaphore *sem)
++{
++ return pthread_rwlock_unlock(&sem->lock);
++}
++#endif /* _TOOLS_RWSEM_H */
--- /dev/null
+From e0f81ab1e4f42ffece6440dc78f583eb352b9a71 Mon Sep 17 00:00:00 2001
+From: Sebastian Ott <sebott@redhat.com>
+Date: Fri, 29 Sep 2023 10:19:41 -0700
+Subject: mm: fix vm_brk_flags() to not bail out while holding lock
+
+From: Sebastian Ott <sebott@redhat.com>
+
+commit e0f81ab1e4f42ffece6440dc78f583eb352b9a71 upstream.
+
+Calling vm_brk_flags() with flags set other than VM_EXEC will exit the
+function without releasing the mmap_write_lock.
+
+Just do the sanity check before the lock is acquired. This doesn't fix an
+actual issue since no caller sets a flag other than VM_EXEC.
+
+Link: https://lkml.kernel.org/r/20230929171937.work.697-kees@kernel.org
+Fixes: 2e7ce7d354f2 ("mm/mmap: change do_brk_flags() to expand existing VMA and add do_brk_munmap()")
+Signed-off-by: Sebastian Ott <sebott@redhat.com>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -3147,13 +3147,13 @@ int vm_brk_flags(unsigned long addr, uns
+ if (!len)
+ return 0;
+
+- if (mmap_write_lock_killable(mm))
+- return -EINTR;
+-
+ /* Until we need other flags, refuse anything except VM_EXEC. */
+ if ((flags & (~VM_EXEC)) != 0)
+ return -EINVAL;
+
++ if (mmap_write_lock_killable(mm))
++ return -EINTR;
++
+ ret = check_brk_limits(addr, len);
+ if (ret)
+ goto limits_failed;
--- /dev/null
+From 229e2253766c7cdfe024f1fe280020cc4711087c Mon Sep 17 00:00:00 2001
+From: Gregory Price <gourry.memverge@gmail.com>
+Date: Tue, 3 Oct 2023 10:48:56 -0400
+Subject: mm/migrate: fix do_pages_move for compat pointers
+
+From: Gregory Price <gourry.memverge@gmail.com>
+
+commit 229e2253766c7cdfe024f1fe280020cc4711087c upstream.
+
+do_pages_move does not handle compat pointers for the page list.
+correctly. Add in_compat_syscall check and appropriate get_user fetch
+when iterating the page list.
+
+It makes the syscall in compat mode (32-bit userspace, 64-bit kernel)
+work the same way as the native 32-bit syscall again, restoring the
+behavior before my broken commit 5b1b561ba73c ("mm: simplify
+compat_sys_move_pages").
+
+More specifically, my patch moved the parsing of the 'pages' array from
+the main entry point into do_pages_stat(), which left the syscall
+working correctly for the 'stat' operation (nodes = NULL), while the
+'move' operation (nodes != NULL) is now missing the conversion and
+interprets 'pages' as an array of 64-bit pointers instead of the
+intended 32-bit userspace pointers.
+
+It is possible that nobody noticed this bug because the few
+applications that actually call move_pages are unlikely to run in
+compat mode because of their large memory requirements, but this
+clearly fixes a user-visible regression and should have been caught by
+ltp.
+
+Link: https://lkml.kernel.org/r/20231003144857.752952-1-gregory.price@memverge.com
+Fixes: 5b1b561ba73c ("mm: simplify compat_sys_move_pages")
+Signed-off-by: Gregory Price <gregory.price@memverge.com>
+Reported-by: Arnd Bergmann <arnd@arndb.de>
+Co-developed-by: Arnd Bergmann <arnd@arndb.de>
+Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1787,6 +1787,7 @@ static int do_pages_move(struct mm_struc
+ const int __user *nodes,
+ int __user *status, int flags)
+ {
++ compat_uptr_t __user *compat_pages = (void __user *)pages;
+ int current_node = NUMA_NO_NODE;
+ LIST_HEAD(pagelist);
+ int start, i;
+@@ -1800,8 +1801,17 @@ static int do_pages_move(struct mm_struc
+ int node;
+
+ err = -EFAULT;
+- if (get_user(p, pages + i))
+- goto out_flush;
++ if (in_compat_syscall()) {
++ compat_uptr_t cp;
++
++ if (get_user(cp, compat_pages + i))
++ goto out_flush;
++
++ p = compat_ptr(cp);
++ } else {
++ if (get_user(p, pages + i))
++ goto out_flush;
++ }
+ if (get_user(node, nodes + i))
+ goto out_flush;
+ addr = (unsigned long)untagged_addr(p);
--- /dev/null
+From 61e21cf2d2c3cc5e60e8d0a62a77e250fccda62c Mon Sep 17 00:00:00 2001
+From: Kemeng Shi <shikemeng@huaweicloud.com>
+Date: Wed, 27 Sep 2023 17:44:01 +0800
+Subject: mm/page_alloc: correct start page when guard page debug is enabled
+
+From: Kemeng Shi <shikemeng@huaweicloud.com>
+
+commit 61e21cf2d2c3cc5e60e8d0a62a77e250fccda62c upstream.
+
+When guard page debug is enabled and set_page_guard returns success, we
+miss to forward page to point to start of next split range and we will do
+split unexpectedly in page range without target page. Move start page
+update before set_page_guard to fix this.
+
+As we split to wrong target page, then splited pages are not able to merge
+back to original order when target page is put back and splited pages
+except target page is not usable. To be specific:
+
+Consider target page is the third page in buddy page with order 2.
+| buddy-2 | Page | Target | Page |
+
+After break down to target page, we will only set first page to Guard
+because of bug.
+| Guard | Page | Target | Page |
+
+When we try put_page_back_buddy with target page, the buddy page of target
+if neither guard nor buddy, Then it's not able to construct original page
+with order 2
+| Guard | Page | buddy-0 | Page |
+
+All pages except target page is not in free list and is not usable.
+
+Link: https://lkml.kernel.org/r/20230927094401.68205-1-shikemeng@huaweicloud.com
+Fixes: 06be6ff3d2ec ("mm,hwpoison: rework soft offline for free pages")
+Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
+Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -9638,6 +9638,7 @@ static void break_down_buddy_pages(struc
+ next_page = page;
+ current_buddy = page + size;
+ }
++ page = next_page;
+
+ if (set_page_guard(zone, current_buddy, high, migratetype))
+ continue;
+@@ -9645,7 +9646,6 @@ static void break_down_buddy_pages(struc
+ if (current_buddy != target) {
+ add_to_free_list(current_buddy, zone, high, migratetype);
+ set_buddy_order(current_buddy, high);
+- page = next_page;
+ }
+ }
+ }
--- /dev/null
+From 1aee9158bc978f91701c5992e395efbc6da2de3c Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sat, 14 Oct 2023 21:34:40 -0400
+Subject: nfsd: lock_rename() needs both directories to live on the same fs
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 1aee9158bc978f91701c5992e395efbc6da2de3c upstream.
+
+... checking that after lock_rename() is too late. Incidentally,
+NFSv2 had no nfserr_xdev...
+
+Fixes: aa387d6ce153 "nfsd: fix EXDEV checking in rename"
+Cc: stable@vger.kernel.org # v3.9+
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Acked-by: Chuck Lever <chuck.lever@oracle.com>
+Tested-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/vfs.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1659,6 +1659,12 @@ nfsd_rename(struct svc_rqst *rqstp, stru
+ if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
+ goto out;
+
++ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
++ if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
++ goto out;
++ if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
++ goto out;
++
+ retry:
+ host_err = fh_want_write(ffhp);
+ if (host_err) {
+@@ -1690,12 +1696,6 @@ retry:
+ if (ndentry == trap)
+ goto out_dput_new;
+
+- host_err = -EXDEV;
+- if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+- goto out_dput_new;
+- if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+- goto out_dput_new;
+-
+ if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
+ nfsd_has_cached_files(ndentry)) {
+ close_cached = true;
pinctrl-qcom-lpass-lpi-fix-concurrent-register-updat.patch
mcb-return-actual-parsed-size-when-reading-chameleon.patch
mcb-lpc-reallocate-memory-region-to-avoid-memory-ove.patch
+virtio_balloon-fix-endless-deflation-and-inflation-on-arm64.patch
+virtio-mmio-fix-memory-leak-of-vm_dev.patch
+virtio-crypto-handle-config-changed-by-work-queue.patch
+virtio_pci-fix-the-common-cfg-map-size.patch
+vsock-virtio-initialize-the_virtio_vsock-before-using-vqs.patch
+vhost-allow-null-msg.size-on-vhost_iotlb_invalidate.patch
+arm64-dts-rockchip-add-i2s0-2ch-bus-bclk-off-pins-to-rk3399.patch
+arm64-dts-rockchip-fix-i2s0-pin-conflict-on-rock-pi-4-boards.patch
+mm-fix-vm_brk_flags-to-not-bail-out-while-holding-lock.patch
+hugetlbfs-clear-resv_map-pointer-if-mmap-fails.patch
+mm-page_alloc-correct-start-page-when-guard-page-debug-is-enabled.patch
+mm-migrate-fix-do_pages_move-for-compat-pointers.patch
+hugetlbfs-extend-hugetlb_vma_lock-to-private-vmas.patch
+maple_tree-add-gfp_kernel-to-allocations-in-mas_expected_entries.patch
+nfsd-lock_rename-needs-both-directories-to-live-on-the-same-fs.patch
+drm-i915-pmu-check-if-pmu-is-closed-before-stopping-event.patch
+drm-amd-disable-aspm-for-vi-w-all-intel-systems.patch
+drm-dp_mst-fix-null-deref-in-get_mst_branch_device_by_guid_helper.patch
--- /dev/null
+From ca50ec377c2e94b0a9f8735de2856cd0f13beab4 Mon Sep 17 00:00:00 2001
+From: Eric Auger <eric.auger@redhat.com>
+Date: Wed, 27 Sep 2023 16:05:44 +0200
+Subject: vhost: Allow null msg.size on VHOST_IOTLB_INVALIDATE
+
+From: Eric Auger <eric.auger@redhat.com>
+
+commit ca50ec377c2e94b0a9f8735de2856cd0f13beab4 upstream.
+
+Commit e2ae38cf3d91 ("vhost: fix hung thread due to erroneous iotlb
+entries") Forbade vhost iotlb msg with null size to prevent entries
+with size = start = 0 and last = ULONG_MAX to end up in the iotlb.
+
+Then commit 95932ab2ea07 ("vhost: allow batching hint without size")
+only applied the check for VHOST_IOTLB_UPDATE and VHOST_IOTLB_INVALIDATE
+message types to fix a regression observed with batching hit.
+
+Still, the introduction of that check introduced a regression for
+some users attempting to invalidate the whole ULONG_MAX range by
+setting the size to 0. This is the case with qemu/smmuv3/vhost
+integration which does not work anymore. It Looks safe to partially
+revert the original commit and allow VHOST_IOTLB_INVALIDATE messages
+with null size. vhost_iotlb_del_range() will compute a correct end
+iova. Same for vhost_vdpa_iotlb_unmap().
+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Fixes: e2ae38cf3d91 ("vhost: fix hung thread due to erroneous iotlb entries")
+Cc: stable@vger.kernel.org # v5.17+
+Acked-by: Jason Wang <jasowang@redhat.com>
+Message-Id: <20230927140544.205088-1-eric.auger@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/vhost.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1176,9 +1176,7 @@ ssize_t vhost_chr_write_iter(struct vhos
+ goto done;
+ }
+
+- if ((msg.type == VHOST_IOTLB_UPDATE ||
+- msg.type == VHOST_IOTLB_INVALIDATE) &&
+- msg.size == 0) {
++ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
--- /dev/null
+From fa2e6947aa8844f25f5bad0d8cd1a541d9bc83eb Mon Sep 17 00:00:00 2001
+From: zhenwei pi <pizhenwei@bytedance.com>
+Date: Sat, 7 Oct 2023 14:43:09 +0800
+Subject: virtio-crypto: handle config changed by work queue
+
+From: zhenwei pi <pizhenwei@bytedance.com>
+
+commit fa2e6947aa8844f25f5bad0d8cd1a541d9bc83eb upstream.
+
+MST pointed out: config change callback is also handled incorrectly
+in this driver, it takes a mutex from interrupt context.
+
+Handle config changed by work queue instead.
+
+Cc: stable@vger.kernel.org
+Cc: Gonglei (Arei) <arei.gonglei@huawei.com>
+Cc: Halil Pasic <pasic@linux.ibm.com>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
+Message-Id: <20231007064309.844889-1-pizhenwei@bytedance.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/virtio/virtio_crypto_common.h | 3 +++
+ drivers/crypto/virtio/virtio_crypto_core.c | 14 +++++++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/virtio/virtio_crypto_common.h
++++ b/drivers/crypto/virtio/virtio_crypto_common.h
+@@ -35,6 +35,9 @@ struct virtio_crypto {
+ struct virtqueue *ctrl_vq;
+ struct data_queue *data_vq;
+
++ /* Work struct for config space updates */
++ struct work_struct config_work;
++
+ /* To protect the vq operations for the controlq */
+ spinlock_t ctrl_lock;
+
+--- a/drivers/crypto/virtio/virtio_crypto_core.c
++++ b/drivers/crypto/virtio/virtio_crypto_core.c
+@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct vi
+ virtcrypto_free_queues(vcrypto);
+ }
+
++static void vcrypto_config_changed_work(struct work_struct *work)
++{
++ struct virtio_crypto *vcrypto =
++ container_of(work, struct virtio_crypto, config_work);
++
++ virtcrypto_update_status(vcrypto);
++}
++
+ static int virtcrypto_probe(struct virtio_device *vdev)
+ {
+ int err = -EFAULT;
+@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virti
+ if (err)
+ goto free_engines;
+
++ INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
++
+ return 0;
+
+ free_engines:
+@@ -489,6 +499,7 @@ static void virtcrypto_remove(struct vir
+
+ dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
++ flush_work(&vcrypto->config_work);
+ if (virtcrypto_dev_started(vcrypto))
+ virtcrypto_dev_stop(vcrypto);
+ virtio_reset_device(vdev);
+@@ -503,7 +514,7 @@ static void virtcrypto_config_changed(st
+ {
+ struct virtio_crypto *vcrypto = vdev->priv;
+
+- virtcrypto_update_status(vcrypto);
++ schedule_work(&vcrypto->config_work);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -511,6 +522,7 @@ static int virtcrypto_freeze(struct virt
+ {
+ struct virtio_crypto *vcrypto = vdev->priv;
+
++ flush_work(&vcrypto->config_work);
+ virtio_reset_device(vdev);
+ virtcrypto_free_unused_reqs(vcrypto);
+ if (virtcrypto_dev_started(vcrypto))
--- /dev/null
+From fab7f259227b8f70aa6d54e1de1a1f5f4729041c Mon Sep 17 00:00:00 2001
+From: Maximilian Heyne <mheyne@amazon.de>
+Date: Mon, 11 Sep 2023 09:03:29 +0000
+Subject: virtio-mmio: fix memory leak of vm_dev
+
+From: Maximilian Heyne <mheyne@amazon.de>
+
+commit fab7f259227b8f70aa6d54e1de1a1f5f4729041c upstream.
+
+With the recent removal of vm_dev from devres its memory is only freed
+via the callback virtio_mmio_release_dev. However, this only takes
+effect after device_add is called by register_virtio_device. Until then
+it's an unmanaged resource and must be explicitly freed on error exit.
+
+This bug was discovered and resolved using Coverity Static Analysis
+Security Testing (SAST) by Synopsys, Inc.
+
+Cc: stable@vger.kernel.org
+Fixes: 55c91fedd03d ("virtio-mmio: don't break lifecycle of vm_dev")
+Signed-off-by: Maximilian Heyne <mheyne@amazon.de>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Message-Id: <20230911090328.40538-1-mheyne@amazon.de>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+---
+ drivers/virtio/virtio_mmio.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -614,14 +614,17 @@ static int virtio_mmio_probe(struct plat
+ spin_lock_init(&vm_dev->lock);
+
+ vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(vm_dev->base))
+- return PTR_ERR(vm_dev->base);
++ if (IS_ERR(vm_dev->base)) {
++ rc = PTR_ERR(vm_dev->base);
++ goto free_vm_dev;
++ }
+
+ /* Check magic value */
+ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+ if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
+ dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
+- return -ENODEV;
++ rc = -ENODEV;
++ goto free_vm_dev;
+ }
+
+ /* Check device version */
+@@ -629,7 +632,8 @@ static int virtio_mmio_probe(struct plat
+ if (vm_dev->version < 1 || vm_dev->version > 2) {
+ dev_err(&pdev->dev, "Version %ld not supported!\n",
+ vm_dev->version);
+- return -ENXIO;
++ rc = -ENXIO;
++ goto free_vm_dev;
+ }
+
+ vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
+@@ -638,7 +642,8 @@ static int virtio_mmio_probe(struct plat
+ * virtio-mmio device with an ID 0 is a (dummy) placeholder
+ * with no function. End probing now with no error reported.
+ */
+- return -ENODEV;
++ rc = -ENODEV;
++ goto free_vm_dev;
+ }
+ vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
+
+@@ -668,6 +673,10 @@ static int virtio_mmio_probe(struct plat
+ put_device(&vm_dev->vdev.dev);
+
+ return rc;
++
++free_vm_dev:
++ kfree(vm_dev);
++ return rc;
+ }
+
+ static int virtio_mmio_remove(struct platform_device *pdev)
--- /dev/null
+From 07622bd415639e9709579f400afd19e7e9866e5e Mon Sep 17 00:00:00 2001
+From: Gavin Shan <gshan@redhat.com>
+Date: Thu, 31 Aug 2023 11:10:07 +1000
+Subject: virtio_balloon: Fix endless deflation and inflation on arm64
+
+From: Gavin Shan <gshan@redhat.com>
+
+commit 07622bd415639e9709579f400afd19e7e9866e5e upstream.
+
+The deflation request to the target, which isn't unaligned to the
+guest page size causes endless deflation and inflation actions. For
+example, we receive the flooding QMP events for the changes on memory
+balloon's size after a deflation request to the unaligned target is
+sent for the ARM64 guest, where we have 64KB base page size.
+
+ /home/gavin/sandbox/qemu.main/build/qemu-system-aarch64 \
+ -accel kvm -machine virt,gic-version=host -cpu host \
+ -smp maxcpus=8,cpus=8,sockets=2,clusters=2,cores=2,threads=1 \
+ -m 1024M,slots=16,maxmem=64G \
+ -object memory-backend-ram,id=mem0,size=512M \
+ -object memory-backend-ram,id=mem1,size=512M \
+ -numa node,nodeid=0,memdev=mem0,cpus=0-3 \
+ -numa node,nodeid=1,memdev=mem1,cpus=4-7 \
+ : \
+ -device virtio-balloon-pci,id=balloon0,bus=pcie.10
+
+ { "execute" : "balloon", "arguments": { "value" : 1073672192 } }
+ {"return": {}}
+ {"timestamp": {"seconds": 1693272173, "microseconds": 88667}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272174, "microseconds": 89704}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272175, "microseconds": 90819}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272176, "microseconds": 91961}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272177, "microseconds": 93040}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073676288}}
+ {"timestamp": {"seconds": 1693272178, "microseconds": 94117}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073676288}}
+ {"timestamp": {"seconds": 1693272179, "microseconds": 95337}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272180, "microseconds": 96615}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073676288}}
+ {"timestamp": {"seconds": 1693272181, "microseconds": 97626}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272182, "microseconds": 98693}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073676288}}
+ {"timestamp": {"seconds": 1693272183, "microseconds": 99698}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272184, "microseconds": 100727}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272185, "microseconds": 90430}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ {"timestamp": {"seconds": 1693272186, "microseconds": 102999}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073676288}}
+ :
+ <The similar QMP events repeat>
+
+Fix it by aligning the target up to the guest page size, 64KB in this
+specific case. With this applied, no flooding QMP events are observed
+and the memory balloon's size can be stablizied to 0x3ffe0000 soon
+after the deflation request is sent.
+
+ { "execute" : "balloon", "arguments": { "value" : 1073672192 } }
+ {"return": {}}
+ {"timestamp": {"seconds": 1693273328, "microseconds": 793075}, \
+ "event": "BALLOON_CHANGE", "data": {"actual": 1073610752}}
+ { "execute" : "query-balloon" }
+ {"return": {"actual": 1073610752}}
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gavin Shan <gshan@redhat.com>
+Tested-by: Zhenyu Zhang <zhenyzha@redhat.com>
+Message-Id: <20230831011007.1032822-1-gshan@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/virtio/virtio_balloon.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -395,7 +395,11 @@ static inline s64 towards_target(struct
+ virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
+
+- target = num_pages;
++ /*
++ * Aligned up to guest page size to avoid inflating and deflating
++ * balloon endlessly.
++ */
++ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
+ return target - vb->num_pages;
+ }
+
--- /dev/null
+From 061b39fdfe7fd98946e67637213bcbb10a318cca Mon Sep 17 00:00:00 2001
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Date: Tue, 10 Oct 2023 11:11:18 +0800
+Subject: virtio_pci: fix the common cfg map size
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+commit 061b39fdfe7fd98946e67637213bcbb10a318cca upstream.
+
+The function vp_modern_map_capability() takes the size parameter,
+which corresponds to the size of virtio_pci_common_cfg. As a result,
+this indicates the size of memory area to map.
+
+Now the size is the size of virtio_pci_common_cfg, but some feature(such
+as the _F_RING_RESET) needs the virtio_pci_modern_common_cfg, so this
+commit changes the size to the size of virtio_pci_modern_common_cfg.
+
+Cc: stable@vger.kernel.org
+Fixes: 0b50cece0b78 ("virtio_pci: introduce helper to get/set queue reset")
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Message-Id: <20231010031120.81272-3-xuanzhuo@linux.alibaba.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/virtio/virtio_pci_modern_dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_pci_modern_dev.c
++++ b/drivers/virtio/virtio_pci_modern_dev.c
+@@ -282,7 +282,7 @@ int vp_modern_probe(struct virtio_pci_mo
+ err = -EINVAL;
+ mdev->common = vp_modern_map_capability(mdev, common,
+ sizeof(struct virtio_pci_common_cfg), 4,
+- 0, sizeof(struct virtio_pci_common_cfg),
++ 0, sizeof(struct virtio_pci_modern_common_cfg),
+ NULL, NULL);
+ if (!mdev->common)
+ goto err_map_common;
--- /dev/null
+From 53b08c4985158430fd6d035fb49443bada535210 Mon Sep 17 00:00:00 2001
+From: Alexandru Matei <alexandru.matei@uipath.com>
+Date: Tue, 24 Oct 2023 22:17:42 +0300
+Subject: vsock/virtio: initialize the_virtio_vsock before using VQs
+
+From: Alexandru Matei <alexandru.matei@uipath.com>
+
+commit 53b08c4985158430fd6d035fb49443bada535210 upstream.
+
+Once VQs are filled with empty buffers and we kick the host, it can send
+connection requests. If the_virtio_vsock is not initialized before,
+replies are silently dropped and do not reach the host.
+
+virtio_transport_send_pkt() can queue packets once the_virtio_vsock is
+set, but they won't be processed until vsock->tx_run is set to true. We
+queue vsock->send_pkt_work when initialization finishes to send those
+packets queued earlier.
+
+Fixes: 0deab087b16a ("vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock")
+Signed-off-by: Alexandru Matei <alexandru.matei@uipath.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Link: https://lore.kernel.org/r/20231024191742.14259-1-alexandru.matei@uipath.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/virtio_transport.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/net/vmw_vsock/virtio_transport.c
++++ b/net/vmw_vsock/virtio_transport.c
+@@ -590,6 +590,11 @@ static int virtio_vsock_vqs_init(struct
+
+ virtio_device_ready(vdev);
+
++ return 0;
++}
++
++static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
++{
+ mutex_lock(&vsock->tx_lock);
+ vsock->tx_run = true;
+ mutex_unlock(&vsock->tx_lock);
+@@ -604,7 +609,16 @@ static int virtio_vsock_vqs_init(struct
+ vsock->event_run = true;
+ mutex_unlock(&vsock->event_lock);
+
+- return 0;
++ /* virtio_transport_send_pkt() can queue packets once
++ * the_virtio_vsock is set, but they won't be processed until
++ * vsock->tx_run is set to true. We queue vsock->send_pkt_work
++ * when initialization finishes to send those packets queued
++ * earlier.
++ * We don't need to queue the other workers (rx, event) because
++ * as long as we don't fill the queues with empty buffers, the
++ * host can't send us any notification.
++ */
++ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ }
+
+ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
+@@ -707,6 +721,7 @@ static int virtio_vsock_probe(struct vir
+ goto out;
+
+ rcu_assign_pointer(the_virtio_vsock, vsock);
++ virtio_vsock_vqs_start(vsock);
+
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+@@ -779,6 +794,7 @@ static int virtio_vsock_restore(struct v
+ goto out;
+
+ rcu_assign_pointer(the_virtio_vsock, vsock);
++ virtio_vsock_vqs_start(vsock);
+
+ out:
+ mutex_unlock(&the_virtio_vsock_mutex);