--- /dev/null
+From 81e9d6f8647650a7bead74c5f926e29970e834d1 Mon Sep 17 00:00:00 2001
+From: Seth Jenkins <sethjenkins@google.com>
+Date: Tue, 31 Jan 2023 12:25:55 -0500
+Subject: aio: fix mremap after fork null-deref
+
+From: Seth Jenkins <sethjenkins@google.com>
+
+commit 81e9d6f8647650a7bead74c5f926e29970e834d1 upstream.
+
+Commit e4a0d3e720e7 ("aio: Make it possible to remap aio ring") introduced
+a null-deref if mremap is called on an old aio mapping after fork as
+mm->ioctx_table will be set to NULL.
+
+[jmoyer@redhat.com: fix 80 column issue]
+Link: https://lkml.kernel.org/r/x49sffq4nvg.fsf@segfault.boston.devel.redhat.com
+Fixes: e4a0d3e720e7 ("aio: Make it possible to remap aio ring")
+Signed-off-by: Seth Jenkins <sethjenkins@google.com>
+Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Pavel Emelyanov <xemul@parallels.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/aio.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_are
+ spin_lock(&mm->ioctx_lock);
+ rcu_read_lock();
+ table = rcu_dereference(mm->ioctx_table);
++ if (!table)
++ goto out_unlock;
++
+ for (i = 0; i < table->nr; i++) {
+ struct kioctx *ctx;
+
+@@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_are
+ }
+ }
+
++out_unlock:
+ rcu_read_unlock();
+ spin_unlock(&mm->ioctx_lock);
+ return res;
--- /dev/null
+From 8f32378986218812083b127da5ba42d48297d7c4 Mon Sep 17 00:00:00 2001
+From: Jack Xiao <Jack.Xiao@amd.com>
+Date: Fri, 10 Feb 2023 10:31:32 +0800
+Subject: drm/amd/amdgpu: fix warning during suspend
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jack Xiao <Jack.Xiao@amd.com>
+
+commit 8f32378986218812083b127da5ba42d48297d7c4 upstream.
+
+Freeing memory was warned during suspend.
+Move the self test out of suspend.
+
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2151825
+Cc: jfalempe@redhat.com
+Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Reviewed-and-tested-by: Evan Quan <evan.quan@amd.com>
+Tested-by: Jocelyn Falempe <jfalempe@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
+ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4248,6 +4248,9 @@ int amdgpu_device_resume(struct drm_devi
+ #endif
+ adev->in_suspend = false;
+
++ if (adev->enable_mes)
++ amdgpu_mes_self_test(adev);
++
+ if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+ DRM_WARN("smart shift update failed\n");
+
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -1339,7 +1339,7 @@ static int mes_v11_0_late_init(void *han
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* it's only intended for use in mes_self_test case, not for s0ix and reset */
+- if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
++ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
+ amdgpu_mes_self_test(adev);
+
--- /dev/null
+From 2a00299e7447395d0898e7c6214817c06a61a8e8 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Thu, 9 Feb 2023 12:15:21 -0500
+Subject: drm/amd/display: Fail atomic_check early on normalize_zpos error
+
+From: Leo Li <sunpeng.li@amd.com>
+
+commit 2a00299e7447395d0898e7c6214817c06a61a8e8 upstream.
+
+[Why]
+
+drm_atomic_normalize_zpos() can return an error code when there's
+modeset lock contention. This was being ignored.
+
+[How]
+
+Bail out of atomic check if normalize_zpos() returns an error.
+
+Fixes: b261509952bc ("drm/amd/display: Fix double cursor on non-video RGB MPO")
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com>
+Reviewed-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -9556,7 +9556,11 @@ static int amdgpu_dm_atomic_check(struct
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+- drm_atomic_normalize_zpos(dev, state);
++ ret = drm_atomic_normalize_zpos(dev, state);
++ if (ret) {
++ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
++ goto fail;
++ }
+
+ /* Remove exiting planes if they are modified */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
--- /dev/null
+From bb2ff6c27bc9e1da4d3ec5e7b1d6b9df1092cb5a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Tue, 7 Feb 2023 16:33:37 +0200
+Subject: drm: Disable dynamic debug as broken
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit bb2ff6c27bc9e1da4d3ec5e7b1d6b9df1092cb5a upstream.
+
+CONFIG_DRM_USE_DYNAMIC_DEBUG breaks debug prints for (at least modular)
+drm drivers. The debug prints can be reinstated by manually frobbing
+/sys/module/drm/parameters/debug after the fact, but at that point the
+damage is done and all debugs from driver probe are lost. This makes
+drivers totally undebuggable.
+
+There's a more complete fix in progress [1], with further details, but
+we need this fixed in stable kernels. Mark the feature as broken and
+disable it by default, with hopes distros follow suit and disable it as
+well.
+
+[1] https://lore.kernel.org/r/20230125203743.564009-1-jim.cromie@gmail.com
+
+Fixes: 84ec67288c10 ("drm_print: wrap drm_*_dbg in dyndbg descriptor factory macro")
+Cc: Jim Cromie <jim.cromie@gmail.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: David Airlie <airlied@gmail.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.1+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Jim Cromie <jim.cromie@gmail.com>
+Acked-by: Maxime Ripard <maxime@cerno.tech>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230207143337.2126678-1-jani.nikula@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 315cbdf61979..9abfb482b615 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -53,7 +53,8 @@ config DRM_DEBUG_MM
+
+ config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+- default y
++ default n
++ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+--
+2.39.1
+
--- /dev/null
+From a950b989ea29ab3b38ea7f6e3d2540700a3c54e8 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zackr@vmware.com>
+Date: Sat, 11 Feb 2023 00:05:14 -0500
+Subject: drm/vmwgfx: Do not drop the reference to the handle too soon
+
+From: Zack Rusin <zackr@vmware.com>
+
+commit a950b989ea29ab3b38ea7f6e3d2540700a3c54e8 upstream.
+
+v3: Fix vmw_user_bo_lookup which was also dropping the gem reference
+before the kernel was done with buffer depending on userspace doing
+the right thing. Same bug, different spot.
+
+It is possible for userspace to predict the next buffer handle and
+to destroy the buffer while it's still used by the kernel. Delay
+dropping the internal reference on the buffers until kernel is done
+with them.
+
+Instead of immediately dropping the gem reference in vmw_user_bo_lookup
+and vmw_gem_object_create_with_handle let the callers decide when they're
+ready give the control back to userspace.
+
+Also fixes the second usage of vmw_gem_object_create_with_handle in
+vmwgfx_surface.c which wasn't grabbing an explicit reference
+to the gem object which could have been destroyed by the userspace
+on the owning surface at any point.
+
+Signed-off-by: Zack Rusin <zackr@vmware.com>
+Fixes: 8afa13a0583f ("drm/vmwgfx: Implement DRIVER_GEM")
+Reviewed-by: Martin Krastev <krastevm@vmware.com>
+Reviewed-by: Maaz Mombasawala <mombasawalam@vmware.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230211050514.2431155-1-zack@kde.org
+(cherry picked from commit 9ef8d83e8e25d5f1811b3a38eb1484f85f64296c)
+Cc: <stable@vger.kernel.org> # v5.17+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 8 +++++---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 ++
+ drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 4 ++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 +++-
+ drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 1 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 1 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 10 ++++++----
+ 7 files changed, 20 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -598,6 +598,7 @@ static int vmw_user_bo_synccpu_release(s
+ ttm_bo_put(&vmw_bo->base);
+ }
+
++ drm_gem_object_put(&vmw_bo->base.base);
+ return ret;
+ }
+
+@@ -638,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm
+
+ ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
+ vmw_bo_unreference(&vbo);
++ drm_gem_object_put(&vbo->base.base);
+ if (unlikely(ret != 0)) {
+ if (ret == -ERESTARTSYS || ret == -EBUSY)
+ return -EBUSY;
+@@ -695,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device
+ * struct vmw_buffer_object should be placed.
+ * Return: Zero on success, Negative error code on error.
+ *
+- * The vmw buffer object pointer will be refcounted.
++ * The vmw buffer object pointer will be refcounted (both ttm and gem)
+ */
+ int vmw_user_bo_lookup(struct drm_file *filp,
+ uint32_t handle,
+@@ -712,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *
+
+ *out = gem_to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->base);
+- drm_gem_object_put(gobj);
+
+ return 0;
+ }
+@@ -779,7 +780,8 @@ int vmw_dumb_create(struct drm_file *fil
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
+-
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_put(&vbo->base.base);
+ return ret;
+ }
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct
+ }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ ttm_bo_put(&vmw_bo->base);
++ drm_gem_object_put(&vmw_bo->base.base);
+ if (unlikely(ret != 0))
+ return ret;
+
+@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struc
+ }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ ttm_bo_put(&vmw_bo->base);
++ drm_gem_object_put(&vmw_bo->base.base);
+ if (unlikely(ret != 0))
+ return ret;
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -152,8 +152,6 @@ int vmw_gem_object_create_with_handle(st
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+- /* drop reference from allocate - handle holds it now */
+- drm_gem_object_put(&(*p_vbo)->base.base);
+ out_no_bo:
+ return ret;
+ }
+@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct d
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_put(&vbo->base.base);
+ out_no_bo:
+ return ret;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1669,8 +1669,10 @@ static struct drm_framebuffer *vmw_kms_f
+
+ err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+- if (bo)
++ if (bo) {
+ vmw_bo_unreference(&bo);
++ drm_gem_object_put(&bo->base.base);
++ }
+ if (surface)
+ vmw_surface_unreference(&surface);
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device
+ ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+ vmw_bo_unreference(&buf);
++ drm_gem_object_put(&buf->base.base);
+
+ out_unlock:
+ mutex_unlock(&overlay->mutex);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_
+ num_output_sig, tfile, shader_handle);
+ out_bad_arg:
+ vmw_bo_unreference(&buffer);
++ drm_gem_object_put(&buffer->base.base);
+ return ret;
+ }
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -683,7 +683,7 @@ static void vmw_user_surface_base_releas
+ container_of(base, struct vmw_user_surface, prime.base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+- if (base->shareable && res && res->backup)
++ if (res && res->backup)
+ drm_gem_object_put(&res->backup->base.base);
+
+ *p_base = NULL;
+@@ -860,7 +860,11 @@ int vmw_surface_define_ioctl(struct drm_
+ goto out_unlock;
+ }
+ vmw_bo_reference(res->backup);
+- drm_gem_object_get(&res->backup->base.base);
++ /*
++ * We don't expose the handle to the userspace and surface
++ * already holds a gem reference
++ */
++ drm_gem_handle_delete(file_priv, backup_handle);
+ }
+
+ tmp = vmw_resource_reference(&srf->res);
+@@ -1564,8 +1568,6 @@ vmw_gb_surface_define_internal(struct dr
+ drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
+ rep->buffer_size = res->backup->base.base.size;
+ rep->buffer_handle = backup_handle;
+- if (user_srf->prime.base.shareable)
+- drm_gem_object_get(&res->backup->base.base);
+ } else {
+ rep->buffer_map_handle = 0;
+ rep->buffer_size = 0;
--- /dev/null
+From 1a6897921f52ceb2c8665ef826e405bd96385159 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zackr@vmware.com>
+Date: Wed, 8 Feb 2023 13:00:50 -0500
+Subject: drm/vmwgfx: Stop accessing buffer objects which failed init
+
+From: Zack Rusin <zackr@vmware.com>
+
+commit 1a6897921f52ceb2c8665ef826e405bd96385159 upstream.
+
+ttm_bo_init_reserved on failure puts the buffer object back which
+causes it to be deleted, but kfree was still being called on the same
+buffer in vmw_bo_create leading to a double free.
+
+After the double free the vmw_gem_object_create_with_handle was
+setting the gem function objects before checking the return status
+of vmw_bo_create leading to null pointer access.
+
+Fix the entire path by relaying on ttm_bo_init_reserved to delete the
+buffer objects on failure and making sure the return status is checked
+before setting the gem function objects on the buffer object.
+
+Signed-off-by: Zack Rusin <zackr@vmware.com>
+Fixes: 8afa13a0583f ("drm/vmwgfx: Implement DRIVER_GEM")
+Reviewed-by: Maaz Mombasawala <mombasawalam@vmware.com>
+Reviewed-by: Martin Krastev <krastevm@vmware.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230208180050.2093426-1-zack@kde.org
+(cherry picked from commit 36d421e632e9a0e8375eaed0143551a34d81a7e3)
+Cc: <stable@vger.kernel.org> # v5.17+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 +++-
+ drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 4 ++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+index aa1cd5126a32..53da183e2bfe 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
+ return -ENOMEM;
+ }
+
++ /*
++ * vmw_bo_init will delete the *p_bo object if it fails
++ */
+ ret = vmw_bo_init(vmw, *p_bo, size,
+ placement, interruptible, pin,
+ bo_free);
+@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
+
+ return ret;
+ out_error:
+- kfree(*p_bo);
+ *p_bo = NULL;
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+index ce609e7d758f..83d8f18cc16f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+@@ -146,11 +146,11 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement,
+ true, false, &vmw_gem_destroy, p_vbo);
+-
+- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ if (ret != 0)
+ goto out_no_bo;
+
++ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
++
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&(*p_vbo)->base.base);
+--
+2.39.1
+
--- /dev/null
+From 3efc61d95259956db25347e2a9562c3e54546e20 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 29 Jan 2023 09:28:56 +0100
+Subject: fbdev: Fix invalid page access after closing deferred I/O devices
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 3efc61d95259956db25347e2a9562c3e54546e20 upstream.
+
+When a fbdev with deferred I/O is once opened and closed, the dirty
+pages still remain queued in the pageref list, and eventually later
+those may be processed in the delayed work. This may lead to a
+corruption of pages, hitting an Oops.
+
+This patch makes sure to cancel the delayed work and clean up the
+pageref list at closing the device for addressing the bug. A part of
+the cleanup code is factored out as a new helper function that is
+called from the common fb_release().
+
+Reviewed-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Tested-by: Miko Larsson <mikoxyzzz@gmail.com>
+Fixes: 56c134f7f1b5 ("fbdev: Track deferred-I/O pages in pageref struct")
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230129082856.22113-1-tiwai@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_defio.c | 10 +++++++++-
+ drivers/video/fbdev/core/fbmem.c | 4 ++++
+ include/linux/fb.h | 1 +
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
+-void fb_deferred_io_cleanup(struct fb_info *info)
++void fb_deferred_io_release(struct fb_info *info)
+ {
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page;
+@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_in
+ page = fb_deferred_io_page(info, i);
+ page->mapping = NULL;
+ }
++}
++EXPORT_SYMBOL_GPL(fb_deferred_io_release);
++
++void fb_deferred_io_cleanup(struct fb_info *info)
++{
++ struct fb_deferred_io *fbdefio = info->fbdefio;
++
++ fb_deferred_io_release(info);
+
+ kvfree(info->pagerefs);
+ mutex_destroy(&fbdefio->lock);
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1453,6 +1453,10 @@ __releases(&info->lock)
+ struct fb_info * const info = file->private_data;
+
+ lock_fb_info(info);
++#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
++ if (info->fbdefio)
++ fb_deferred_io_release(info);
++#endif
+ if (info->fbops->fb_release)
+ info->fbops->fb_release(info,1);
+ module_put(info->fbops->owner);
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -662,6 +662,7 @@ extern int fb_deferred_io_init(struct f
+ extern void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file);
++extern void fb_deferred_io_release(struct fb_info *info);
+ extern void fb_deferred_io_cleanup(struct fb_info *info);
+ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
+ loff_t end, int datasync);
--- /dev/null
+From 55d77bae73426237b3c74c1757a894b056550dff Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Thu, 26 Jan 2023 08:04:47 +0100
+Subject: kasan: fix Oops due to missing calls to kasan_arch_is_ready()
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 55d77bae73426237b3c74c1757a894b056550dff upstream.
+
+On powerpc64, you can build a kernel with KASAN as soon as you build it
+with RADIX MMU support. However if the CPU doesn't have RADIX MMU, KASAN
+isn't enabled at init and the following Oops is encountered.
+
+ [ 0.000000][ T0] KASAN not enabled as it requires radix!
+
+ [ 4.484295][ T26] BUG: Unable to handle kernel data access at 0xc00e000000804a04
+ [ 4.485270][ T26] Faulting instruction address: 0xc00000000062ec6c
+ [ 4.485748][ T26] Oops: Kernel access of bad area, sig: 11 [#1]
+ [ 4.485920][ T26] BE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
+ [ 4.486259][ T26] Modules linked in:
+ [ 4.486637][ T26] CPU: 0 PID: 26 Comm: kworker/u2:2 Not tainted 6.2.0-rc3-02590-gf8a023b0a805 #249
+ [ 4.486907][ T26] Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1200 0xf000005 of:SLOF,HEAD pSeries
+ [ 4.487445][ T26] Workqueue: eval_map_wq .tracer_init_tracefs_work_func
+ [ 4.488744][ T26] NIP: c00000000062ec6c LR: c00000000062bb84 CTR: c0000000002ebcd0
+ [ 4.488867][ T26] REGS: c0000000049175c0 TRAP: 0380 Not tainted (6.2.0-rc3-02590-gf8a023b0a805)
+ [ 4.489028][ T26] MSR: 8000000002009032 <SF,VEC,EE,ME,IR,DR,RI> CR: 44002808 XER: 00000000
+ [ 4.489584][ T26] CFAR: c00000000062bb80 IRQMASK: 0
+ [ 4.489584][ T26] GPR00: c0000000005624d4 c000000004917860 c000000001cfc000 1800000000804a04
+ [ 4.489584][ T26] GPR04: c0000000003a2650 0000000000000cc0 c00000000000d3d8 c00000000000d3d8
+ [ 4.489584][ T26] GPR08: c0000000049175b0 a80e000000000000 0000000000000000 0000000017d78400
+ [ 4.489584][ T26] GPR12: 0000000044002204 c000000003790000 c00000000435003c c0000000043f1c40
+ [ 4.489584][ T26] GPR16: c0000000043f1c68 c0000000043501a0 c000000002106138 c0000000043f1c08
+ [ 4.489584][ T26] GPR20: c0000000043f1c10 c0000000043f1c20 c000000004146c40 c000000002fdb7f8
+ [ 4.489584][ T26] GPR24: c000000002fdb834 c000000003685e00 c000000004025030 c000000003522e90
+ [ 4.489584][ T26] GPR28: 0000000000000cc0 c0000000003a2650 c000000004025020 c000000004025020
+ [ 4.491201][ T26] NIP [c00000000062ec6c] .kasan_byte_accessible+0xc/0x20
+ [ 4.491430][ T26] LR [c00000000062bb84] .__kasan_check_byte+0x24/0x90
+ [ 4.491767][ T26] Call Trace:
+ [ 4.491941][ T26] [c000000004917860] [c00000000062ae70] .__kasan_kmalloc+0xc0/0x110 (unreliable)
+ [ 4.492270][ T26] [c0000000049178f0] [c0000000005624d4] .krealloc+0x54/0x1c0
+ [ 4.492453][ T26] [c000000004917990] [c0000000003a2650] .create_trace_option_files+0x280/0x530
+ [ 4.492613][ T26] [c000000004917a90] [c000000002050d90] .tracer_init_tracefs_work_func+0x274/0x2c0
+ [ 4.492771][ T26] [c000000004917b40] [c0000000001f9948] .process_one_work+0x578/0x9f0
+ [ 4.492927][ T26] [c000000004917c30] [c0000000001f9ebc] .worker_thread+0xfc/0x950
+ [ 4.493084][ T26] [c000000004917d60] [c00000000020be84] .kthread+0x1a4/0x1b0
+ [ 4.493232][ T26] [c000000004917e10] [c00000000000d3d8] .ret_from_kernel_thread+0x58/0x60
+ [ 4.495642][ T26] Code: 60000000 7cc802a6 38a00000 4bfffc78 60000000 7cc802a6 38a00001 4bfffc68 60000000 3d20a80e 7863e8c2 792907c6 <7c6348ae> 20630007 78630fe0 68630001
+ [ 4.496704][ T26] ---[ end trace 0000000000000000 ]---
+
+The Oops is due to kasan_byte_accessible() not checking the readiness of
+KASAN. Add missing call to kasan_arch_is_ready() and bail out when not
+ready. The same problem is observed with ____kasan_kfree_large() so fix
+it the same.
+
+Also, as KASAN is not available and no shadow area is allocated for linear
+memory mapping, there is no point in allocating shadow mem for vmalloc
+memory as shown below in /sys/kernel/debug/kernel_page_tables
+
+ ---[ kasan shadow mem start ]---
+ 0xc00f000000000000-0xc00f00000006ffff 0x00000000040f0000 448K r w pte valid present dirty accessed
+ 0xc00f000000860000-0xc00f00000086ffff 0x000000000ac10000 64K r w pte valid present dirty accessed
+ 0xc00f3ffffffe0000-0xc00f3fffffffffff 0x0000000004d10000 128K r w pte valid present dirty accessed
+ ---[ kasan shadow mem end ]---
+
+So, also verify KASAN readiness before allocating and poisoning
+shadow mem for VMAs.
+
+Link: https://lkml.kernel.org/r/150768c55722311699fdcf8f5379e8256749f47d.1674716617.git.christophe.leroy@csgroup.eu
+Fixes: 41b7a347bf14 ("powerpc: Book3S 64-bit outline-only KASAN support")
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Reported-by: Nathan Lynch <nathanl@linux.ibm.com>
+Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org> [5.19+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/common.c | 3 +++
+ mm/kasan/generic.c | 7 ++++++-
+ mm/kasan/shadow.c | 12 ++++++++++++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/mm/kasan/common.c b/mm/kasan/common.c
+index 833bf2cfd2a3..21e66d7f261d 100644
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -246,6 +246,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
+
+ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+ {
++ if (!kasan_arch_is_ready())
++ return false;
++
+ if (ptr != page_address(virt_to_head_page(ptr))) {
+ kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
+ return true;
+diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
+index b076f597a378..cb762982c8ba 100644
+--- a/mm/kasan/generic.c
++++ b/mm/kasan/generic.c
+@@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
+
+ bool kasan_byte_accessible(const void *addr)
+ {
+- s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
++ s8 shadow_byte;
++
++ if (!kasan_arch_is_ready())
++ return true;
++
++ shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
+
+ return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
+ }
+diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
+index 2fba1f51f042..15cfb34d16a1 100644
+--- a/mm/kasan/shadow.c
++++ b/mm/kasan/shadow.c
+@@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+ unsigned long shadow_start, shadow_end;
+ int ret;
+
++ if (!kasan_arch_is_ready())
++ return 0;
++
+ if (!is_vmalloc_or_module_addr((void *)addr))
+ return 0;
+
+@@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long region_start, region_end;
+ unsigned long size;
+
++ if (!kasan_arch_is_ready())
++ return;
++
+ region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
+ region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
+
+@@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
+ */
+
++ if (!kasan_arch_is_ready())
++ return (void *)start;
++
+ if (!is_vmalloc_or_module_addr(start))
+ return (void *)start;
+
+@@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ */
+ void __kasan_poison_vmalloc(const void *start, unsigned long size)
+ {
++ if (!kasan_arch_is_ready())
++ return;
++
+ if (!is_vmalloc_or_module_addr(start))
+ return;
+
+--
+2.39.1
+
--- /dev/null
+From badc28d4924bfed73efc93f716a0c3aa3afbdf6f Mon Sep 17 00:00:00 2001
+From: Qi Zheng <zhengqi.arch@bytedance.com>
+Date: Thu, 2 Feb 2023 18:56:12 +0800
+Subject: mm: shrinkers: fix deadlock in shrinker debugfs
+
+From: Qi Zheng <zhengqi.arch@bytedance.com>
+
+commit badc28d4924bfed73efc93f716a0c3aa3afbdf6f upstream.
+
+The debugfs_remove_recursive() is invoked by unregister_shrinker(), which
+is holding the write lock of shrinker_rwsem. It will waits for the
+handler of debugfs file complete. The handler also needs to hold the read
+lock of shrinker_rwsem to do something. So it may cause the following
+deadlock:
+
+ CPU0 CPU1
+
+debugfs_file_get()
+shrinker_debugfs_count_show()/shrinker_debugfs_scan_write()
+
+ unregister_shrinker()
+ --> down_write(&shrinker_rwsem);
+ debugfs_remove_recursive()
+ // wait for (A)
+ --> wait_for_completion();
+
+ // wait for (B)
+--> down_read_killable(&shrinker_rwsem)
+debugfs_file_put() -- (A)
+
+ up_write() -- (B)
+
+The down_read_killable() can be killed, so that the above deadlock can be
+recovered. But it still requires an extra kill action, otherwise it will
+block all subsequent shrinker-related operations, so it's better to fix
+it.
+
+[akpm@linux-foundation.org: fix CONFIG_SHRINKER_DEBUG=n stub]
+Link: https://lkml.kernel.org/r/20230202105612.64641-1-zhengqi.arch@bytedance.com
+Fixes: 5035ebc644ae ("mm: shrinkers: introduce debugfs interface for memory shrinkers")
+Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
+Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Kent Overstreet <kent.overstreet@gmail.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/shrinker.h | 5 +++--
+ mm/shrinker_debug.c | 13 ++++++++-----
+ mm/vmscan.c | 6 +++++-
+ 3 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/include/linux/shrinker.h
++++ b/include/linux/shrinker.h
+@@ -104,7 +104,7 @@ extern void synchronize_shrinkers(void);
+
+ #ifdef CONFIG_SHRINKER_DEBUG
+ extern int shrinker_debugfs_add(struct shrinker *shrinker);
+-extern void shrinker_debugfs_remove(struct shrinker *shrinker);
++extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+ extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+ #else /* CONFIG_SHRINKER_DEBUG */
+@@ -112,8 +112,9 @@ static inline int shrinker_debugfs_add(s
+ {
+ return 0;
+ }
+-static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
++static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+ {
++ return NULL;
+ }
+ static inline __printf(2, 3)
+ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+--- a/mm/shrinker_debug.c
++++ b/mm/shrinker_debug.c
+@@ -246,18 +246,21 @@ int shrinker_debugfs_rename(struct shrin
+ }
+ EXPORT_SYMBOL(shrinker_debugfs_rename);
+
+-void shrinker_debugfs_remove(struct shrinker *shrinker)
++struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+ {
++ struct dentry *entry = shrinker->debugfs_entry;
++
+ lockdep_assert_held(&shrinker_rwsem);
+
+ kfree_const(shrinker->name);
+ shrinker->name = NULL;
+
+- if (!shrinker->debugfs_entry)
+- return;
++ if (entry) {
++ ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
++ shrinker->debugfs_entry = NULL;
++ }
+
+- debugfs_remove_recursive(shrinker->debugfs_entry);
+- ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
++ return entry;
+ }
+
+ static int __init shrinker_debugfs_init(void)
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -740,6 +740,8 @@ EXPORT_SYMBOL(register_shrinker);
+ */
+ void unregister_shrinker(struct shrinker *shrinker)
+ {
++ struct dentry *debugfs_entry;
++
+ if (!(shrinker->flags & SHRINKER_REGISTERED))
+ return;
+
+@@ -748,9 +750,11 @@ void unregister_shrinker(struct shrinker
+ shrinker->flags &= ~SHRINKER_REGISTERED;
+ if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+ unregister_memcg_shrinker(shrinker);
+- shrinker_debugfs_remove(shrinker);
++ debugfs_entry = shrinker_debugfs_remove(shrinker);
+ up_write(&shrinker_rwsem);
+
++ debugfs_remove_recursive(debugfs_entry);
++
+ kfree(shrinker->nr_deferred);
+ shrinker->nr_deferred = NULL;
+ }
--- /dev/null
+From ce4d9a1ea35ac5429e822c4106cb2859d5c71f3e Mon Sep 17 00:00:00 2001
+From: "Isaac J. Manjarres" <isaacmanjarres@google.com>
+Date: Wed, 8 Feb 2023 15:20:00 -0800
+Subject: of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem
+
+From: Isaac J. Manjarres <isaacmanjarres@google.com>
+
+commit ce4d9a1ea35ac5429e822c4106cb2859d5c71f3e upstream.
+
+Patch series "Fix kmemleak crashes when scanning CMA regions", v2.
+
+When trying to boot a device with an ARM64 kernel with the following
+config options enabled:
+
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_DEBUG_KMEMLEAK=y
+
+a crash is encountered when kmemleak starts to scan the list of gray
+or allocated objects that it maintains. Upon closer inspection, it was
+observed that these page-faults always occurred when kmemleak attempted
+to scan a CMA region.
+
+At the moment, kmemleak is made aware of CMA regions that are specified
+through the devicetree to be dynamically allocated within a range of
+addresses. However, kmemleak should not need to scan CMA regions or any
+reserved memory region, as those regions can be used for DMA transfers
+between drivers and peripherals, and thus wouldn't contain anything
+useful for kmemleak.
+
+Additionally, since CMA regions are unmapped from the kernel's address
+space when they are freed to the buddy allocator at boot when
+CONFIG_DEBUG_PAGEALLOC is enabled, kmemleak shouldn't attempt to access
+those memory regions, as that will trigger a crash. Thus, kmemleak
+should ignore all dynamically allocated reserved memory regions.
+
+
+This patch (of 1):
+
+Currently, kmemleak ignores dynamically allocated reserved memory regions
+that don't have a kernel mapping. However, regions that do retain a
+kernel mapping (e.g. CMA regions) do get scanned by kmemleak.
+
+This is not ideal for two reasons:
+
+1 kmemleak works by scanning memory regions for pointers to allocated
+ objects to determine if those objects have been leaked or not.
+ However, reserved memory regions can be used between drivers and
+ peripherals for DMA transfers, and thus, would not contain pointers to
+ allocated objects, making it unnecessary for kmemleak to scan these
+ reserved memory regions.
+
+2 When CONFIG_DEBUG_PAGEALLOC is enabled, along with kmemleak, the
+ CMA reserved memory regions are unmapped from the kernel's address
+ space when they are freed to buddy at boot. These CMA reserved regions
+ are still tracked by kmemleak, however, and when kmemleak attempts to
+ scan them, a crash will happen, as accessing the CMA region will result
+ in a page-fault, since the regions are unmapped.
+
+Thus, use kmemleak_ignore_phys() for all dynamically allocated reserved
+memory regions, instead of those that do not have a kernel mapping
+associated with them.
+
+Link: https://lkml.kernel.org/r/20230208232001.2052777-1-isaacmanjarres@google.com
+Link: https://lkml.kernel.org/r/20230208232001.2052777-2-isaacmanjarres@google.com
+Fixes: a7259df76702 ("memblock: make memblock_find_in_range method private")
+Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
+Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Frank Rowand <frowand.list@gmail.com>
+Cc: Kirill A. Shutemov <kirill.shtuemov@linux.intel.com>
+Cc: Nick Kossifidis <mick@ics.forth.gr>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Cc: Saravana Kannan <saravanak@google.com>
+Cc: <stable@vger.kernel.org> [5.15+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/of/of_reserved_mem.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_re
+ err = memblock_mark_nomap(base, size);
+ if (err)
+ memblock_phys_free(base, size);
+- kmemleak_ignore_phys(base);
+ }
+
++ kmemleak_ignore_phys(base);
++
+ return err;
+ }
+
ceph-move-mount-state-enum-to-super.h.patch
ceph-blocklist-the-kclient-when-receiving-corrupted-.patch
selftests-mptcp-userspace-fix-v4-v6-test-in-v6.1.patch
+of-reserved_mem-have-kmemleak-ignore-dynamically-allocated-reserved-mem.patch
+kasan-fix-oops-due-to-missing-calls-to-kasan_arch_is_ready.patch
+mm-shrinkers-fix-deadlock-in-shrinker-debugfs.patch
+aio-fix-mremap-after-fork-null-deref.patch
+vmxnet3-move-rss-code-block-under-eop-descriptor.patch
+fbdev-fix-invalid-page-access-after-closing-deferred-i-o-devices.patch
+drm-disable-dynamic-debug-as-broken.patch
+drm-amd-amdgpu-fix-warning-during-suspend.patch
+drm-amd-display-fail-atomic_check-early-on-normalize_zpos-error.patch
+drm-vmwgfx-stop-accessing-buffer-objects-which-failed-init.patch
+drm-vmwgfx-do-not-drop-the-reference-to-the-handle-too-soon.patch
--- /dev/null
+From ec76d0c2da5c6dfb6a33f1545cc15997013923da Mon Sep 17 00:00:00 2001
+From: Ronak Doshi <doshir@vmware.com>
+Date: Wed, 8 Feb 2023 14:38:59 -0800
+Subject: vmxnet3: move rss code block under eop descriptor
+
+From: Ronak Doshi <doshir@vmware.com>
+
+commit ec76d0c2da5c6dfb6a33f1545cc15997013923da upstream.
+
+Commit b3973bb40041 ("vmxnet3: set correct hash type based on
+rss information") added hashType information into skb. However,
+rssType field is populated for eop descriptor. This can lead
+to incorrectly reporting of hashType for packets which use
+multiple rx descriptors. Multiple rx descriptors are used
+for Jumbo frame or LRO packets, which can hit this issue.
+
+This patch moves the RSS codeblock under eop descritor.
+
+Cc: stable@vger.kernel.org
+Fixes: b3973bb40041 ("vmxnet3: set correct hash type based on rss information")
+Signed-off-by: Ronak Doshi <doshir@vmware.com>
+Acked-by: Peng Li <lpeng@vmware.com>
+Acked-by: Guolin Yang <gyang@vmware.com>
+Link: https://lore.kernel.org/r/20230208223900.5794-1-doshir@vmware.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 50 +++++++++++++++++++-------------------
+ 1 file changed, 25 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx
+ rxd->len = rbi->len;
+ }
+
+-#ifdef VMXNET3_RSS
+- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+- (adapter->netdev->features & NETIF_F_RXHASH)) {
+- enum pkt_hash_types hash_type;
+-
+- switch (rcd->rssType) {
+- case VMXNET3_RCD_RSS_TYPE_IPV4:
+- case VMXNET3_RCD_RSS_TYPE_IPV6:
+- hash_type = PKT_HASH_TYPE_L3;
+- break;
+- case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+- case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+- case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+- case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+- hash_type = PKT_HASH_TYPE_L4;
+- break;
+- default:
+- hash_type = PKT_HASH_TYPE_L3;
+- break;
+- }
+- skb_set_hash(ctx->skb,
+- le32_to_cpu(rcd->rssHash),
+- hash_type);
+- }
+-#endif
+ skb_record_rx_queue(ctx->skb, rq->qid);
+ skb_put(ctx->skb, rcd->len);
+
+@@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx
+ u32 mtu = adapter->netdev->mtu;
+ skb->len += skb->data_len;
+
++#ifdef VMXNET3_RSS
++ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
++ (adapter->netdev->features & NETIF_F_RXHASH)) {
++ enum pkt_hash_types hash_type;
++
++ switch (rcd->rssType) {
++ case VMXNET3_RCD_RSS_TYPE_IPV4:
++ case VMXNET3_RCD_RSS_TYPE_IPV6:
++ hash_type = PKT_HASH_TYPE_L3;
++ break;
++ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
++ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
++ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
++ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
++ hash_type = PKT_HASH_TYPE_L4;
++ break;
++ default:
++ hash_type = PKT_HASH_TYPE_L3;
++ break;
++ }
++ skb_set_hash(skb,
++ le32_to_cpu(rcd->rssHash),
++ hash_type);
++ }
++#endif
+ vmxnet3_rx_csum(adapter, skb,
+ (union Vmxnet3_GenericDesc *)rcd);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);