--- /dev/null
+From 044012b52029204900af9e4230263418427f4ba4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Stefan=20M=C3=A4tje?= <stefan.maetje@esd.eu>
+Date: Wed, 25 Aug 2021 23:52:27 +0200
+Subject: can: usb: esd_usb2: esd_usb2_rx_event(): fix the interchange of the CAN RX and TX error counters
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stefan Mätje <stefan.maetje@esd.eu>
+
+commit 044012b52029204900af9e4230263418427f4ba4 upstream.
+
+This patch fixes the interchanged fetch of the CAN RX and TX error
+counters from the ESD_EV_CAN_ERROR_EXT message. The RX error counter
+is really in struct rx_msg::data[2] and the TX error counter is in
+struct rx_msg::data[3].
+
+Fixes: 96d8e90382dc ("can: Add driver for esd CAN-USB/2 device")
+Link: https://lore.kernel.org/r/20210825215227.4947-2-stefan.maetje@esd.eu
+Cc: stable@vger.kernel.org
+Signed-off-by: Stefan Mätje <stefan.maetje@esd.eu>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/esd_usb2.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd
+ if (id == ESD_EV_CAN_ERROR_EXT) {
+ u8 state = msg->msg.rx.data[0];
+ u8 ecc = msg->msg.rx.data[1];
+- u8 txerr = msg->msg.rx.data[2];
+- u8 rxerr = msg->msg.rx.data[3];
++ u8 rxerr = msg->msg.rx.data[2];
++ u8 txerr = msg->msg.rx.data[3];
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (skb == NULL) {
--- /dev/null
+From b2f9fa1f3bd8846f50b355fc2168236975c4d264 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Wed, 18 Aug 2021 21:38:42 +0800
+Subject: ceph: correctly handle releasing an embedded cap flush
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit b2f9fa1f3bd8846f50b355fc2168236975c4d264 upstream.
+
+The ceph_cap_flush structures are usually dynamically allocated, but
+the ceph_cap_snap has an embedded one.
+
+When force umounting, the client will try to remove all the session
+caps. During this, it will free them, but that should not be done
+with the ones embedded in a capsnap.
+
+Fix this by adding a new boolean that indicates that the cap flush is
+embedded in a capsnap, and skip freeing it if that's set.
+
+At the same time, switch to using list_del_init() when detaching the
+i_list and g_list heads. It's possible for a forced umount to remove
+these objects but then handle_cap_flushsnap_ack() races in and does the
+list_del_init() again, corrupting memory.
+
+Cc: stable@vger.kernel.org
+URL: https://tracker.ceph.com/issues/52283
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 21 +++++++++++++--------
+ fs/ceph/mds_client.c | 7 ++++---
+ fs/ceph/snap.c | 3 +++
+ fs/ceph/super.h | 3 ++-
+ 4 files changed, 22 insertions(+), 12 deletions(-)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1753,7 +1753,11 @@ int __ceph_mark_dirty_caps(struct ceph_i
+
+ struct ceph_cap_flush *ceph_alloc_cap_flush(void)
+ {
+- return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
++ struct ceph_cap_flush *cf;
++
++ cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
++ cf->is_capsnap = false;
++ return cf;
+ }
+
+ void ceph_free_cap_flush(struct ceph_cap_flush *cf)
+@@ -1788,7 +1792,7 @@ static bool __detach_cap_flush_from_mdsc
+ prev->wake = true;
+ wake = false;
+ }
+- list_del(&cf->g_list);
++ list_del_init(&cf->g_list);
+ return wake;
+ }
+
+@@ -1803,7 +1807,7 @@ static bool __detach_cap_flush_from_ci(s
+ prev->wake = true;
+ wake = false;
+ }
+- list_del(&cf->i_list);
++ list_del_init(&cf->i_list);
+ return wake;
+ }
+
+@@ -2423,7 +2427,7 @@ static void __kick_flushing_caps(struct
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+
+ list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
+- if (!cf->caps) {
++ if (cf->is_capsnap) {
+ last_snap_flush = cf->tid;
+ break;
+ }
+@@ -2442,7 +2446,7 @@ static void __kick_flushing_caps(struct
+
+ first_tid = cf->tid + 1;
+
+- if (cf->caps) {
++ if (!cf->is_capsnap) {
+ struct cap_msg_args arg;
+
+ dout("kick_flushing_caps %p cap %p tid %llu %s\n",
+@@ -3589,7 +3593,7 @@ static void handle_cap_flush_ack(struct
+ cleaned = cf->caps;
+
+ /* Is this a capsnap? */
+- if (cf->caps == 0)
++ if (cf->is_capsnap)
+ continue;
+
+ if (cf->tid <= flush_tid) {
+@@ -3662,8 +3666,9 @@ out:
+ while (!list_empty(&to_remove)) {
+ cf = list_first_entry(&to_remove,
+ struct ceph_cap_flush, i_list);
+- list_del(&cf->i_list);
+- ceph_free_cap_flush(cf);
++ list_del_init(&cf->i_list);
++ if (!cf->is_capsnap)
++ ceph_free_cap_flush(cf);
+ }
+
+ if (wake_ci)
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1621,7 +1621,7 @@ static int remove_session_caps_cb(struct
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ list_for_each_entry(cf, &to_remove, i_list)
+- list_del(&cf->g_list);
++ list_del_init(&cf->g_list);
+
+ if (!list_empty(&ci->i_dirty_item)) {
+ pr_warn_ratelimited(
+@@ -1673,8 +1673,9 @@ static int remove_session_caps_cb(struct
+ struct ceph_cap_flush *cf;
+ cf = list_first_entry(&to_remove,
+ struct ceph_cap_flush, i_list);
+- list_del(&cf->i_list);
+- ceph_free_cap_flush(cf);
++ list_del_init(&cf->i_list);
++ if (!cf->is_capsnap)
++ ceph_free_cap_flush(cf);
+ }
+
+ wake_up_all(&ci->i_cap_wq);
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -487,6 +487,9 @@ void ceph_queue_cap_snap(struct ceph_ino
+ pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
+ return;
+ }
++ capsnap->cap_flush.is_capsnap = true;
++ INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
++ INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
+
+ spin_lock(&ci->i_ceph_lock);
+ used = __ceph_caps_used(ci);
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -182,8 +182,9 @@ struct ceph_cap {
+
+ struct ceph_cap_flush {
+ u64 tid;
+- int caps; /* 0 means capsnap */
++ int caps;
+ bool wake; /* wake up flush waiters when finish ? */
++ bool is_capsnap; /* true means capsnap */
+ struct list_head g_list; // global
+ struct list_head i_list; // per inode
+ };
--- /dev/null
+From 32bc8f8373d2d6a681c96e4b25dca60d4d1c6016 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Michel=20D=C3=A4nzer?= <mdaenzer@redhat.com>
+Date: Tue, 17 Aug 2021 10:23:25 +0200
+Subject: drm/amdgpu: Cancel delayed work when GFXOFF is disabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michel Dänzer <mdaenzer@redhat.com>
+
+commit 32bc8f8373d2d6a681c96e4b25dca60d4d1c6016 upstream.
+
+schedule_delayed_work does not push back the work if it was already
+scheduled before, so amdgpu_device_delay_enable_gfx_off ran ~100 ms
+after the first time GFXOFF was disabled and re-enabled, even if GFXOFF
+was disabled and re-enabled again during those 100 ms.
+
+This resulted in frame drops / stutter with the upcoming mutter 41
+release on Navi 14, due to constantly enabling GFXOFF in the HW and
+disabling it again (for getting the GPU clock counter).
+
+To fix this, call cancel_delayed_work_sync when the disable count
+transitions from 0 to 1, and only schedule the delayed work on the
+reverse transition, not if the disable count was already 0. This makes
+sure the delayed work doesn't run at unexpected times, and allows it to
+be lock-free.
+
+v2:
+* Use cancel_delayed_work_sync & mutex_trylock instead of
+ mod_delayed_work.
+v3:
+* Make amdgpu_device_delay_enable_gfx_off lock-free (Christian König)
+v4:
+* Fix race condition between amdgpu_gfx_off_ctrl incrementing
+ adev->gfx.gfx_off_req_count and amdgpu_device_delay_enable_gfx_off
+ checking for it to be 0 (Evan Quan)
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com> # v3
+Acked-by: Christian König <christian.koenig@amd.com> # v3
+Signed-off-by: Michel Dänzer <mdaenzer@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 11 +++-----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 38 +++++++++++++++++++----------
+ 2 files changed, 31 insertions(+), 18 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2690,12 +2690,11 @@ static void amdgpu_device_delay_enable_g
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
+
+- mutex_lock(&adev->gfx.gfx_off_mutex);
+- if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
+- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+- adev->gfx.gfx_off_state = true;
+- }
+- mutex_unlock(&adev->gfx.gfx_off_mutex);
++ WARN_ON_ONCE(adev->gfx.gfx_off_state);
++ WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
++
++ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
+ }
+
+ /**
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_d
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+- if (!enable)
+- adev->gfx.gfx_off_req_count++;
+- else if (adev->gfx.gfx_off_req_count > 0)
++ if (enable) {
++ /* If the count is already 0, it means there's an imbalance bug somewhere.
++ * Note that the bug may be in a different caller than the one which triggers the
++ * WARN_ON_ONCE.
++ */
++ if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
++ goto unlock;
++
+ adev->gfx.gfx_off_req_count--;
+
+- if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+- } else if (!enable && adev->gfx.gfx_off_state) {
+- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+- adev->gfx.gfx_off_state = false;
+-
+- if (adev->gfx.funcs->init_spm_golden) {
+- dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
+- amdgpu_gfx_init_spm_golden(adev);
++ if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
++ } else {
++ if (adev->gfx.gfx_off_req_count == 0) {
++ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
++
++ if (adev->gfx.gfx_off_state &&
++ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
++ adev->gfx.gfx_off_state = false;
++
++ if (adev->gfx.funcs->init_spm_golden) {
++ dev_dbg(adev->dev,
++ "GFXOFF is disabled, re-init SPM golden settings\n");
++ amdgpu_gfx_init_spm_golden(adev);
++ }
+ }
+ }
++
++ adev->gfx.gfx_off_req_count++;
+ }
+
++unlock:
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+ }
+
--- /dev/null
+From c41a4e877a185241d8e83501453326fb98f67354 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 24 Aug 2021 11:42:47 +0200
+Subject: drm/amdgpu: Fix build with missing pm_suspend_target_state module export
+
+From: Borislav Petkov <bp@suse.de>
+
+commit c41a4e877a185241d8e83501453326fb98f67354 upstream.
+
+Building a randconfig here triggered:
+
+ ERROR: modpost: "pm_suspend_target_state" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
+
+because the module export of that symbol happens in
+kernel/power/suspend.c which is enabled with CONFIG_SUSPEND.
+
+The ifdef guards in amdgpu_acpi_is_s0ix_supported(), however, test for
+CONFIG_PM_SLEEP which is defined like this:
+
+ config PM_SLEEP
+ def_bool y
+ depends on SUSPEND || HIBERNATE_CALLBACKS
+
+and that randconfig has:
+
+ # CONFIG_SUSPEND is not set
+ CONFIG_HIBERNATE_CALLBACKS=y
+
+leading to the module export missing.
+
+Change the ifdeffery to depend directly on CONFIG_SUSPEND.
+
+Fixes: 5706cb3c910c ("drm/amdgpu: fix checking pmops when PM_SLEEP is not enabled")
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lkml.kernel.org/r/YSP6Lv53QV0cOAsd@zn.tnic
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -904,7 +904,7 @@ void amdgpu_acpi_fini(struct amdgpu_devi
+ */
+ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
+ {
+-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
++#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ if (adev->flags & AMD_IS_APU)
+ return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
--- /dev/null
+From 2a7b9a8437130fd328001f4edfac8eec98dfe298 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 18 Aug 2021 14:05:28 +0200
+Subject: drm/amdgpu: use the preferred pin domain after the check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+commit 2a7b9a8437130fd328001f4edfac8eec98dfe298 upstream.
+
+For some reason we run into an use case where a BO is already pinned
+into GTT, but should be pinned into VRAM|GTT again.
+
+Handle that case gracefully as well.
+
+Reviewed-by: Shashank Sharma <Shashank.sharma@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -937,11 +937,6 @@ int amdgpu_bo_pin_restricted(struct amdg
+ return -EINVAL;
+ }
+
+- /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+- * See function amdgpu_display_supported_domains()
+- */
+- domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
+-
+ if (bo->tbo.pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+ uint32_t mem_flags = bo->tbo.mem.placement;
+@@ -966,6 +961,11 @@ int amdgpu_bo_pin_restricted(struct amdg
+ return 0;
+ }
+
++ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
++ * See function amdgpu_display_supported_domains()
++ */
++ domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
++
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
--- /dev/null
+From 71de496cc489b6bae2f51f89da7f28849bf2836e Mon Sep 17 00:00:00 2001
+From: Swati Sharma <swati2.sharma@intel.com>
+Date: Thu, 12 Aug 2021 18:41:07 +0530
+Subject: drm/i915/dp: Drop redundant debug print
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Swati Sharma <swati2.sharma@intel.com>
+
+commit 71de496cc489b6bae2f51f89da7f28849bf2836e upstream.
+
+drm_dp_dpcd_read/write already has debug error message.
+Drop redundant error messages which gives false
+status even if correct value is read in drm_dp_dpcd_read().
+
+v2: -Added fixes tag (Ankit)
+v3: -Fixed build error (CI)
+
+Fixes: 9488a030ac91 ("drm/i915: Add support for enabling link status and recovery")
+Cc: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: Jani Nikula <jani.nikula@intel.com>
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: Manasi Navare <manasi.d.navare@intel.com>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: Uma Shankar <uma.shankar@intel.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v5.12+
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Swati Sharma <swati2.sharma@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210812131107.5531-1-swati2.sharma@intel.com
+(cherry picked from commit b6dfa416172939edaa46a5a647457b94c6d94119)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dp.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -3833,23 +3833,18 @@ static void intel_dp_check_device_servic
+
+ static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+ {
+- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
+- drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
++ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
+ return;
+- }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
+- drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
++ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
+ return;
+- }
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
--- /dev/null
+From a63bcf08f0efb5348105bb8e0e1e8c6671077753 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 30 Jul 2021 12:53:42 -0700
+Subject: drm/i915: Fix syncmap memory leak
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit a63bcf08f0efb5348105bb8e0e1e8c6671077753 upstream.
+
+A small race exists between intel_gt_retire_requests_timeout and
+intel_timeline_exit which could result in the syncmap not getting
+free'd. Rather than work to hard to seal this race, simply cleanup the
+syncmap on fini.
+
+unreferenced object 0xffff88813bc53b18 (size 96):
+ comm "gem_close_race", pid 5410, jiffies 4294917818 (age 1105.600s)
+ hex dump (first 32 bytes):
+ 01 00 00 00 00 00 00 00 00 00 00 00 0a 00 00 00 ................
+ 00 00 00 00 00 00 00 00 6b 6b 6b 6b 06 00 00 00 ........kkkk....
+ backtrace:
+ [<00000000120b863a>] __sync_alloc_leaf+0x1e/0x40 [i915]
+ [<00000000042f6959>] __sync_set+0x1bb/0x240 [i915]
+ [<0000000090f0e90f>] i915_request_await_dma_fence+0x1c7/0x400 [i915]
+ [<0000000056a48219>] i915_request_await_object+0x222/0x360 [i915]
+ [<00000000aaac4ee3>] i915_gem_do_execbuffer+0x1bd0/0x2250 [i915]
+ [<000000003c9d830f>] i915_gem_execbuffer2_ioctl+0x405/0xce0 [i915]
+ [<00000000fd7a8e68>] drm_ioctl_kernel+0xb0/0xf0 [drm]
+ [<00000000e721ee87>] drm_ioctl+0x305/0x3c0 [drm]
+ [<000000008b0d8986>] __x64_sys_ioctl+0x71/0xb0
+ [<0000000076c362a4>] do_syscall_64+0x33/0x80
+ [<00000000eb7a4831>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Fixes: 531958f6f357 ("drm/i915/gt: Track timeline activeness in enter/exit")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210730195342.110234-1-matthew.brost@intel.com
+(cherry picked from commit faf890985e30d5e88cc3a7c50c1bcad32f89ab7c)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/intel_timeline.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -127,6 +127,15 @@ static void intel_timeline_fini(struct r
+
+ i915_vma_put(timeline->hwsp_ggtt);
+ i915_active_fini(&timeline->active);
++
++ /*
++ * A small race exists between intel_gt_retire_requests_timeout and
++ * intel_timeline_exit which could result in the syncmap not getting
++ * free'd. Rather than work to hard to seal this race, simply cleanup
++ * the syncmap on fini.
++ */
++ i915_syncmap_free(&timeline->sync);
++
+ kfree(timeline);
+ }
+
--- /dev/null
+From 1c8094e394bceb4f1880f9d539bdd255c130826e Mon Sep 17 00:00:00 2001
+From: Rob Herring <robh@kernel.org>
+Date: Tue, 17 Aug 2021 12:47:55 -0500
+Subject: dt-bindings: sifive-l2-cache: Fix 'select' matching
+
+From: Rob Herring <robh@kernel.org>
+
+commit 1c8094e394bceb4f1880f9d539bdd255c130826e upstream.
+
+When the schema fixups are applied to 'select' the result is a single
+entry is required for a match, but that will never match as there should
+be 2 entries. Also, a 'select' schema should have the widest possible
+match, so use 'contains' which matches the compatible string(s) in any
+position and not just the first position.
+
+Fixes: 993dcfac64eb ("dt-bindings: riscv: sifive-l2-cache: convert bindings to json-schema")
+Signed-off-by: Rob Herring <robh@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
++++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+@@ -24,10 +24,10 @@ allOf:
+ select:
+ properties:
+ compatible:
+- items:
+- - enum:
+- - sifive,fu540-c000-ccache
+- - sifive,fu740-c000-ccache
++ contains:
++ enum:
++ - sifive,fu540-c000-ccache
++ - sifive,fu740-c000-ccache
+
+ required:
+ - compatible
--- /dev/null
+From 946746d1ad921e5f493b536533dda02ea22ca609 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Wed, 25 Aug 2021 12:17:55 -0700
+Subject: mm/memory_hotplug: fix potential permanent lru cache disable
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 946746d1ad921e5f493b536533dda02ea22ca609 upstream.
+
+If offline_pages failed after lru_cache_disable(), it forgot to do
+lru_cache_enable() in error path. So we would have lru cache disabled
+permanently in this case.
+
+Link: https://lkml.kernel.org/r/20210821094246.10149-3-linmiaohe@huawei.com
+Fixes: d479960e44f2 ("mm: disable LRU pagevec during the migration temporarily")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory_hotplug.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1854,6 +1854,7 @@ failed_removal_isolated:
+ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ memory_notify(MEM_CANCEL_OFFLINE, &arg);
+ failed_removal_pcplists_disabled:
++ lru_cache_enable();
+ zone_pcp_enable(zone);
+ failed_removal:
+ pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
--- /dev/null
+From 2b9fff64f03219d78044d1ab40dde8e3d42e968a Mon Sep 17 00:00:00 2001
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+Date: Wed, 25 Aug 2021 08:57:42 +0800
+Subject: net: stmmac: fix kernel panic due to NULL pointer dereference of buf->xdp
+
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+
+commit 2b9fff64f03219d78044d1ab40dde8e3d42e968a upstream.
+
+Ensure a valid XSK buffer before proceed to free the xdp buffer.
+
+The following kernel panic is observed without this patch:
+
+RIP: 0010:xp_free+0x5/0x40
+Call Trace:
+stmmac_napi_poll_rxtx+0x332/0xb30 [stmmac]
+? stmmac_tx_timer+0x3c/0xb0 [stmmac]
+net_rx_action+0x13d/0x3d0
+__do_softirq+0xfc/0x2fb
+? smpboot_register_percpu_thread+0xe0/0xe0
+run_ksoftirqd+0x32/0x70
+smpboot_thread_fn+0x1d8/0x2c0
+kthread+0x169/0x1a0
+? kthread_park+0x90/0x90
+ret_from_fork+0x1f/0x30
+---[ end trace 0000000000000002 ]---
+
+Fixes: bba2556efad6 ("net: stmmac: Enable RX via AF_XDP zero-copy")
+Cc: <stable@vger.kernel.org> # 5.13.x
+Suggested-by: Ong Boon Leong <boon.leong.ong@intel.com>
+Signed-off-by: Song Yoong Siang <yoong.siang.song@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4925,6 +4925,10 @@ read_again:
+
+ prefetch(np);
+
++ /* Ensure a valid XSK buffer before proceed */
++ if (!buf->xdp)
++ break;
++
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats,
+@@ -4945,10 +4949,6 @@ read_again:
+ continue;
+ }
+
+- /* Ensure a valid XSK buffer before proceed */
+- if (!buf->xdp)
+- break;
+-
+ /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
+ if (likely(status & rx_not_ls)) {
+ xsk_buff_free(buf->xdp);
--- /dev/null
+From a6451192da2691dcf39507bd758dde35d4606ee1 Mon Sep 17 00:00:00 2001
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+Date: Wed, 25 Aug 2021 08:55:29 +0800
+Subject: net: stmmac: fix kernel panic due to NULL pointer dereference of xsk_pool
+
+From: Song Yoong Siang <yoong.siang.song@intel.com>
+
+commit a6451192da2691dcf39507bd758dde35d4606ee1 upstream.
+
+After free xsk_pool, there is possibility that napi polling is still
+running in the middle, thus causes a kernel crash due to kernel NULL
+pointer dereference of rx_q->xsk_pool and tx_q->xsk_pool.
+
+Fix this by changing the XDP pool setup sequence to:
+ 1. disable napi before free xsk_pool
+ 2. enable napi after init xsk_pool
+
+The following kernel panic is observed without this patch:
+
+RIP: 0010:xsk_uses_need_wakeup+0x5/0x10
+Call Trace:
+stmmac_napi_poll_rxtx+0x3a9/0xae0 [stmmac]
+__napi_poll+0x27/0x130
+net_rx_action+0x233/0x280
+__do_softirq+0xe2/0x2b6
+run_ksoftirqd+0x1a/0x20
+smpboot_thread_fn+0xac/0x140
+? sort_range+0x20/0x20
+kthread+0x124/0x150
+? set_kthread_struct+0x40/0x40
+ret_from_fork+0x1f/0x30
+---[ end trace a77c8956b79ac107 ]---
+
+Fixes: bba2556efad6 ("net: stmmac: Enable RX via AF_XDP zero-copy")
+Cc: <stable@vger.kernel.org> # 5.13.x
+Signed-off-by: Song Yoong Siang <yoong.siang.song@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+@@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
+- stmmac_disable_rx_queue(priv, queue);
+- stmmac_disable_tx_queue(priv, queue);
+ napi_disable(&ch->rx_napi);
+ napi_disable(&ch->tx_napi);
++ stmmac_disable_rx_queue(priv, queue);
++ stmmac_disable_tx_queue(priv, queue);
+ }
+
+ set_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+- napi_enable(&ch->rxtx_napi);
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
++ napi_enable(&ch->rxtx_napi);
+
+ err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
+ if (err)
+@@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struc
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
++ napi_disable(&ch->rxtx_napi);
+ stmmac_disable_rx_queue(priv, queue);
+ stmmac_disable_tx_queue(priv, queue);
+ synchronize_rcu();
+- napi_disable(&ch->rxtx_napi);
+ }
+
+ xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
+@@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struc
+ clear_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+- napi_enable(&ch->rx_napi);
+- napi_enable(&ch->tx_napi);
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
++ napi_enable(&ch->rx_napi);
++ napi_enable(&ch->tx_napi);
+ }
+
+ return 0;
--- /dev/null
+From 310d2e83cb9b7f1e7232319880e3fcb57592fa10 Mon Sep 17 00:00:00 2001
+From: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Date: Thu, 19 Aug 2021 13:39:54 +0200
+Subject: powerpc: Re-enable ARCH_ENABLE_SPLIT_PMD_PTLOCK
+
+From: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+
+commit 310d2e83cb9b7f1e7232319880e3fcb57592fa10 upstream.
+
+Commit 66f24fa766e3 ("mm: drop redundant ARCH_ENABLE_SPLIT_PMD_PTLOCK")
+broke PMD split page table lock for powerpc.
+
+It selects the non-existent config ARCH_ENABLE_PMD_SPLIT_PTLOCK in
+arch/powerpc/platforms/Kconfig.cputype, but clearly intended to
+select ARCH_ENABLE_SPLIT_PMD_PTLOCK (notice the word swapping!), as
+that commit did for all other architectures.
+
+Fix it by selecting the correct symbol ARCH_ENABLE_SPLIT_PMD_PTLOCK.
+
+Fixes: 66f24fa766e3 ("mm: drop redundant ARCH_ENABLE_SPLIT_PMD_PTLOCK")
+Cc: stable@vger.kernel.org # v5.13+
+Signed-off-by: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Reviewed-by: Daniel Axtens <dja@axtens.net>
+[mpe: Reword change log to make it clear this is a bug fix]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210819113954.17515-3-lukas.bulwahn@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/platforms/Kconfig.cputype | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -97,7 +97,7 @@ config PPC_BOOK3S_64
+ select PPC_HAVE_PMU_SUPPORT
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
+- select ARCH_ENABLE_PMD_SPLIT_PTLOCK
++ select ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_HUGETLBFS
+ select ARCH_SUPPORTS_NUMA_BALANCING
--- /dev/null
+From 4e9655763b82a91e4c341835bb504a2b1590f984 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Wed, 25 Aug 2021 13:41:42 +0800
+Subject: Revert "btrfs: compression: don't try to compress if we don't have enough pages"
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit 4e9655763b82a91e4c341835bb504a2b1590f984 upstream.
+
+This reverts commit f2165627319ffd33a6217275e5690b1ab5c45763.
+
+[BUG]
+It's no longer possible to create compressed inline extent after commit
+f2165627319f ("btrfs: compression: don't try to compress if we don't
+have enough pages").
+
+[CAUSE]
+For compression code, there are several possible reasons we have a range
+that needs to be compressed while it's no more than one page.
+
+- Compressed inline write
+ The data is always smaller than one sector and the test lacks the
+ condition to properly recognize a non-inline extent.
+
+- Compressed subpage write
+ For the incoming subpage compressed write support, we require page
+ alignment of the delalloc range.
+ And for 64K page size, we can compress just one page into smaller
+ sectors.
+
+For those reasons, the requirement for the data to be more than one page
+is not correct, and is already causing regression for compressed inline
+data writeback. The idea of skipping one page to avoid wasting CPU time
+could be revisited in the future.
+
+[FIX]
+Fix it by reverting the offending commit.
+
+Reported-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org>
+Link: https://lore.kernel.org/linux-btrfs/afa2742.c084f5d6.17b6b08dffc@tnonline.net
+Fixes: f2165627319f ("btrfs: compression: don't try to compress if we don't have enough pages")
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -603,7 +603,7 @@ again:
+ * inode has not been flagged as nocompress. This flag can
+ * change at any time if we discover bad compression ratios.
+ */
+- if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
++ if (inode_need_compress(BTRFS_I(inode), start, end)) {
+ WARN_ON(pages);
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages) {
--- /dev/null
+From df7b16d1c00ecb3da3a30c999cdb39f273c99a2f Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 24 Aug 2021 14:19:26 +0200
+Subject: Revert "USB: serial: ch341: fix character loss at high transfer rates"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johan Hovold <johan@kernel.org>
+
+commit df7b16d1c00ecb3da3a30c999cdb39f273c99a2f upstream.
+
+This reverts commit 3c18e9baee0ef97510dcda78c82285f52626764b.
+
+These devices do not appear to send a zero-length packet when the
+transfer size is a multiple of the bulk-endpoint max-packet size. This
+means that incoming data may not be processed by the driver until a
+short packet is received or the receive buffer is full.
+
+Revert back to using endpoint-sized receive buffers to avoid stalled
+reads.
+
+Reported-by: Paul Größel <pb.g@gmx.de>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=214131
+Fixes: 3c18e9baee0e ("USB: serial: ch341: fix character loss at high transfer rates")
+Cc: stable@vger.kernel.org
+Cc: Willy Tarreau <w@1wt.eu>
+Link: https://lore.kernel.org/r/20210824121926.19311-1-johan@kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/ch341.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -851,7 +851,6 @@ static struct usb_serial_driver ch341_de
+ .owner = THIS_MODULE,
+ .name = "ch341-uart",
+ },
+- .bulk_in_size = 512,
+ .id_table = id_table,
+ .num_ports = 1,
+ .open = ch341_open,
--- /dev/null
+From 379eb01c21795edb4ca8d342503bd2183a19ec3a Mon Sep 17 00:00:00 2001
+From: Vincent Chen <vincent.chen@sifive.com>
+Date: Tue, 3 Aug 2021 17:27:51 +0800
+Subject: riscv: Ensure the value of FP registers in the core dump file is up to date
+
+From: Vincent Chen <vincent.chen@sifive.com>
+
+commit 379eb01c21795edb4ca8d342503bd2183a19ec3a upstream.
+
+The value of FP registers in the core dump file comes from the
+thread.fstate. However, kernel saves the FP registers to the thread.fstate
+only before scheduling out the process. If no process switch happens
+during the exception handling process, kernel will not have a chance to
+save the latest value of FP registers to thread.fstate. It will cause the
+value of FP registers in the core dump file may be incorrect. To solve this
+problem, this patch force lets kernel save the FP register into the
+thread.fstate if the target task_struct equals the current.
+
+Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
+Reviewed-by: Jisheng Zhang <jszhang@kernel.org>
+Fixes: b8c8a9590e4f ("RISC-V: Add FP register ptrace support for gdb.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/ptrace.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/riscv/kernel/ptrace.c
++++ b/arch/riscv/kernel/ptrace.c
+@@ -10,6 +10,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/syscall.h>
+ #include <asm/thread_info.h>
++#include <asm/switch_to.h>
+ #include <linux/audit.h>
+ #include <linux/ptrace.h>
+ #include <linux/elf.h>
+@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_str
+ {
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
++ if (target == current)
++ fstate_save(current, task_pt_regs(current));
++
+ membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
+ membuf_store(&to, fstate->fcsr);
+ return membuf_zero(&to, 4); // explicitly pad
--- /dev/null
+From 02c6dcd543f8f051973ee18bfbc4dc3bd595c558 Mon Sep 17 00:00:00 2001
+From: Li Jinlin <lijinlin3@huawei.com>
+Date: Tue, 24 Aug 2021 10:59:21 +0800
+Subject: scsi: core: Fix hang of freezing queue between blocking and running device
+
+From: Li Jinlin <lijinlin3@huawei.com>
+
+commit 02c6dcd543f8f051973ee18bfbc4dc3bd595c558 upstream.
+
+We found a hang, the steps to reproduce are as follows:
+
+ 1. blocking device via scsi_device_set_state()
+
+ 2. dd if=/dev/sda of=/mnt/t.log bs=1M count=10
+
+ 3. echo none > /sys/block/sda/queue/scheduler
+
+ 4. echo "running" >/sys/block/sda/device/state
+
+Step 3 and 4 should complete after step 4, but they hang.
+
+ CPU#0 CPU#1 CPU#2
+ --------------- ---------------- ----------------
+ Step 1: blocking device
+
+ Step 2: dd xxxx
+ ^^^^^^ get request
+ q_usage_counter++
+
+ Step 3: switching scheculer
+ elv_iosched_store
+ elevator_switch
+ blk_mq_freeze_queue
+ blk_freeze_queue
+ > blk_freeze_queue_start
+ ^^^^^^ mq_freeze_depth++
+
+ > blk_mq_run_hw_queues
+ ^^^^^^ can't run queue when dev blocked
+
+ > blk_mq_freeze_queue_wait
+ ^^^^^^ Hang here!!!
+ wait q_usage_counter==0
+
+ Step 4: running device
+ store_state_field
+ scsi_rescan_device
+ scsi_attach_vpd
+ scsi_vpd_inquiry
+ __scsi_execute
+ blk_get_request
+ blk_mq_alloc_request
+ blk_queue_enter
+ ^^^^^^ Hang here!!!
+ wait mq_freeze_depth==0
+
+ blk_mq_run_hw_queues
+ ^^^^^^ dispatch IO, q_usage_counter will reduce to zero
+
+ blk_mq_unfreeze_queue
+ ^^^^^ mq_freeze_depth--
+
+To fix this, we need to run queue before rescanning device when the device
+state changes to SDEV_RUNNING.
+
+Link: https://lore.kernel.org/r/20210824025921.3277629-1-lijinlin3@huawei.com
+Fixes: f0f82e2476f6 ("scsi: core: Fix capacity set to zero after offlinining device")
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Li Jinlin <lijinlin3@huawei.com>
+Signed-off-by: Qiu Laibin <qiulaibin@huawei.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/scsi_sysfs.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -808,12 +808,15 @@ store_state_field(struct device *dev, st
+ ret = scsi_device_set_state(sdev, state);
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to
+- * rescan the device to revalidate it, and run the queue to
+- * avoid I/O hang.
++ * run the queue to avoid I/O hang, and rescan the device
++ * to revalidate it. Running the queue first is necessary
++ * because another thread may be waiting inside
++ * blk_mq_freeze_queue_wait() and because that call may be
++ * waiting for pending I/O to finish.
+ */
+ if (ret == 0 && state == SDEV_RUNNING) {
+- scsi_rescan_device(dev);
+ blk_mq_run_hw_queues(sdev->request_queue, true);
++ scsi_rescan_device(dev);
+ }
+ mutex_unlock(&sdev->state_mutex);
+
blk-iocost-fix-lockdep-warning-on-blkcg-lock.patch
ovl-fix-uninitialized-pointer-read-in-ovl_lookup_rea.patch
net-mscc-fix-non-gpl-export-of-regmap-apis.patch
+can-usb-esd_usb2-esd_usb2_rx_event-fix-the-interchange-of-the-can-rx-and-tx-error-counters.patch
+ceph-correctly-handle-releasing-an-embedded-cap-flush.patch
+dt-bindings-sifive-l2-cache-fix-select-matching.patch
+riscv-ensure-the-value-of-fp-registers-in-the-core-dump-file-is-up-to-date.patch
+powerpc-re-enable-arch_enable_split_pmd_ptlock.patch
+mm-memory_hotplug-fix-potential-permanent-lru-cache-disable.patch
+revert-btrfs-compression-don-t-try-to-compress-if-we-don-t-have-enough-pages.patch
+net-stmmac-fix-kernel-panic-due-to-null-pointer-dereference-of-xsk_pool.patch
+net-stmmac-fix-kernel-panic-due-to-null-pointer-dereference-of-buf-xdp.patch
+drm-i915-fix-syncmap-memory-leak.patch
+drm-i915-dp-drop-redundant-debug-print.patch
+drm-amdgpu-cancel-delayed-work-when-gfxoff-is-disabled.patch
+drm-amdgpu-use-the-preferred-pin-domain-after-the-check.patch
+drm-amdgpu-fix-build-with-missing-pm_suspend_target_state-module-export.patch
+revert-usb-serial-ch341-fix-character-loss-at-high-transfer-rates.patch
+usb-serial-option-add-new-vid-pid-to-support-fibocom-fg150.patch
+usb-renesas-xhci-prefer-firmware-loading-on-unknown-rom-state.patch
+usb-typec-tcpm-raise-vdm_sm_running-flag-only-when-vdm-sm-is-running.patch
+usb-dwc3-gadget-fix-dwc3_calc_trbs_left.patch
+usb-dwc3-gadget-stop-ep0-transfers-during-pullup-disable.patch
+scsi-core-fix-hang-of-freezing-queue-between-blocking-and-running-device.patch
--- /dev/null
+From 51f1954ad853d01ba4dc2b35dee14d8490ee05a1 Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 19 Aug 2021 03:17:03 +0200
+Subject: usb: dwc3: gadget: Fix dwc3_calc_trbs_left()
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 51f1954ad853d01ba4dc2b35dee14d8490ee05a1 upstream.
+
+We can't depend on the TRB's HWO bit to determine if the TRB ring is
+"full". A TRB is only available when the driver had processed it, not
+when the controller consumed and relinquished the TRB's ownership to the
+driver. Otherwise, the driver may overwrite unprocessed TRBs. This can
+happen when many transfer events accumulate and the system is slow to
+process them and/or when there are too many small requests.
+
+If a request is in the started_list, that means there is one or more
+unprocessed TRBs remained. Check this instead of the TRB's HWO bit
+whether the TRB ring is full.
+
+Fixes: c4233573f6ee ("usb: dwc3: gadget: prepare TRBs on update transfers too")
+Cc: <stable@vger.kernel.org>
+Acked-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/e91e975affb0d0d02770686afc3a5b9eb84409f6.1629335416.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb
+
+ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+ {
+- struct dwc3_trb *tmp;
+ u8 trbs_left;
+
+ /*
+- * If enqueue & dequeue are equal than it is either full or empty.
+- *
+- * One way to know for sure is if the TRB right before us has HWO bit
+- * set or not. If it has, then we're definitely full and can't fit any
+- * more transfers in our ring.
++ * If the enqueue & dequeue are equal then the TRB ring is either full
++ * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
++ * pending to be processed by the driver.
+ */
+ if (dep->trb_enqueue == dep->trb_dequeue) {
+- tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+- if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
++ /*
++ * If there is any request remained in the started_list at
++ * this point, that means there is no TRB available.
++ */
++ if (!list_empty(&dep->started_list))
+ return 0;
+
+ return DWC3_TRB_NUM - 1;
--- /dev/null
+From 4a1e25c0a029b97ea4a3d423a6392bfacc3b2e39 Mon Sep 17 00:00:00 2001
+From: Wesley Cheng <wcheng@codeaurora.org>
+Date: Tue, 24 Aug 2021 21:28:55 -0700
+Subject: usb: dwc3: gadget: Stop EP0 transfers during pullup disable
+
+From: Wesley Cheng <wcheng@codeaurora.org>
+
+commit 4a1e25c0a029b97ea4a3d423a6392bfacc3b2e39 upstream.
+
+During a USB cable disconnect, or soft disconnect scenario, a pending
+SETUP transaction may not be completed, leading to the following
+error:
+
+ dwc3 a600000.dwc3: timed out waiting for SETUP phase
+
+If this occurs, then the entire pullup disable routine is skipped and
+proper cleanup and halting of the controller does not complete.
+
+Instead of returning an error (which is ignored from the UDC
+perspective), allow the pullup disable routine to continue, which
+will also handle disabling of EP0/1. This will end any active
+transfers as well. Ensure to clear any delayed_status also, as the
+timeout could happen within the STATUS stage.
+
+Fixes: bb0147364850 ("usb: dwc3: gadget: don't clear RUN/STOP when it's invalid to do so")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Acked-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Wesley Cheng <wcheng@codeaurora.org>
+Link: https://lore.kernel.org/r/20210825042855.7977-1-wcheng@codeaurora.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2243,10 +2243,8 @@ static int dwc3_gadget_pullup(struct usb
+
+ ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+ msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+- if (ret == 0) {
+- dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
+- return -ETIMEDOUT;
+- }
++ if (ret == 0)
++ dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
+ }
+
+ /*
+@@ -2458,6 +2456,7 @@ static int __dwc3_gadget_start(struct dw
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
++ dwc->delayed_status = false;
+ dwc3_ep0_out_start(dwc);
+
+ dwc3_gadget_enable_irq(dwc);
--- /dev/null
+From c82cacd2f1e622a461a77d275a75d7e19e7635a3 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 26 Aug 2021 14:41:27 +0200
+Subject: usb: renesas-xhci: Prefer firmware loading on unknown ROM state
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit c82cacd2f1e622a461a77d275a75d7e19e7635a3 upstream.
+
+The recent attempt to handle an unknown ROM state in the commit
+d143825baf15 ("usb: renesas-xhci: Fix handling of unknown ROM state")
+resulted in a regression and reverted later by the commit 44cf53602f5a
+("Revert "usb: renesas-xhci: Fix handling of unknown ROM state"").
+The problem of the former fix was that it treated the failure of
+firmware loading as a fatal error. Since the firmware files aren't
+included in the standard linux-firmware tree, most users don't have
+them, hence they got the non-working system after that. The revert
+fixed the regression, but also it didn't make the firmware loading
+triggered even on the devices that do need it. So we need still a fix
+for them.
+
+This is another attempt to handle the unknown ROM state. Like the
+previous fix, this also tries to load the firmware when ROM shows
+unknown state. In this patch, however, the failure of a firmware
+loading (such as a missing firmware file) isn't handled as a fatal
+error any longer when ROM has been already detected, but it falls back
+to the ROM mode like before. The error is returned only when no ROM
+is detected and the firmware loading failed.
+
+Along with it, for simplifying the code flow, the detection and the
+check of ROM is factored out from renesas_fw_check_running() and done
+in the caller side, renesas_xhci_check_request_fw(). It avoids the
+redundant ROM checks.
+
+The patch was tested on Lenovo Thinkpad T14 gen (BIOS 1.34). Also it
+was confirmed that no regression is seen on another Thinkpad T14
+machine that has worked without the patch, too.
+
+Fixes: 44cf53602f5a ("Revert "usb: renesas-xhci: Fix handling of unknown ROM state"")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+BugLink: https://bugzilla.opensuse.org/show_bug.cgi?id=1189207
+Link: https://lore.kernel.org/r/20210826124127.14789-1-tiwai@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-pci-renesas.c | 35 +++++++++++++++++++----------
+ 1 file changed, 23 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
+index 5923844ed821..ef5e91a5542d 100644
+--- a/drivers/usb/host/xhci-pci-renesas.c
++++ b/drivers/usb/host/xhci-pci-renesas.c
+@@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
+ return 0;
+
+ case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
+- return 0;
++ dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
++ return -ENOENT;
+
+ case RENESAS_ROM_STATUS_ERROR: /* Error State */
+ default: /* All other states are marked as "Reserved states" */
+@@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
+ u8 fw_state;
+ int err;
+
+- /* Check if device has ROM and loaded, if so skip everything */
+- err = renesas_check_rom(pdev);
+- if (err) { /* we have rom */
+- err = renesas_check_rom_state(pdev);
+- if (!err)
+- return err;
+- }
+-
+ /*
+ * Test if the device is actually needing the firmware. As most
+ * BIOSes will initialize the device for us. If the device is
+@@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ (struct xhci_driver_data *)id->driver_data;
+ const char *fw_name = driver_data->firmware;
+ const struct firmware *fw;
++ bool has_rom;
+ int err;
+
++ /* Check if device has ROM and loaded, if so skip everything */
++ has_rom = renesas_check_rom(pdev);
++ if (has_rom) {
++ err = renesas_check_rom_state(pdev);
++ if (!err)
++ return 0;
++ else if (err != -ENOENT)
++ has_rom = false;
++ }
++
+ err = renesas_fw_check_running(pdev);
+ /* Continue ahead, if the firmware is already running. */
+ if (err == 0)
+ return 0;
+
++ /* no firmware interface available */
+ if (err != 1)
+- return err;
++ return has_rom ? 0 : err;
+
+ pci_dev_get(pdev);
+- err = request_firmware(&fw, fw_name, &pdev->dev);
++ err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
+ pci_dev_put(pdev);
+ if (err) {
+- dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
++ if (has_rom) {
++ dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
++ fw_name);
++ return 0;
++ }
++ dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
++ fw_name, err);
+ return err;
+ }
+
+--
+2.32.0
+
--- /dev/null
+From 2829a4e3cf3a6ac2fa3cdb681b37574630fb9c1a Mon Sep 17 00:00:00 2001
+From: Zhengjun Zhang <zhangzhengjun@aicrobo.com>
+Date: Mon, 9 Aug 2021 21:35:53 +0800
+Subject: USB: serial: option: add new VID/PID to support Fibocom FG150
+
+From: Zhengjun Zhang <zhangzhengjun@aicrobo.com>
+
+commit 2829a4e3cf3a6ac2fa3cdb681b37574630fb9c1a upstream.
+
+Fibocom FG150 is a 5G module based on Qualcomm SDX55 platform,
+support Sub-6G band.
+
+Here are the outputs of lsusb -v and usb-devices:
+
+> T: Bus=02 Lev=01 Prnt=01 Port=01 Cnt=01 Dev#= 2 Spd=5000 MxCh= 0
+> D: Ver= 3.20 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs= 1
+> P: Vendor=2cb7 ProdID=010b Rev=04.14
+> S: Manufacturer=Fibocom
+> S: Product=Fibocom Modem_SN:XXXXXXXX
+> S: SerialNumber=XXXXXXXX
+> C: #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=896mA
+> I: If#=0x0 Alt= 0 #EPs= 1 Cls=ef(misc ) Sub=04 Prot=01 Driver=rndis_host
+> I: If#=0x1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=rndis_host
+> I: If#=0x2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=(none)
+> I: If#=0x3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=(none)
+> I: If#=0x4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+
+> Bus 002 Device 002: ID 2cb7:010b Fibocom Fibocom Modem_SN:XXXXXXXX
+> Device Descriptor:
+> bLength 18
+> bDescriptorType 1
+> bcdUSB 3.20
+> bDeviceClass 0
+> bDeviceSubClass 0
+> bDeviceProtocol 0
+> bMaxPacketSize0 9
+> idVendor 0x2cb7 Fibocom
+> idProduct 0x010b
+> bcdDevice 4.14
+> iManufacturer 1 Fibocom
+> iProduct 2 Fibocom Modem_SN:XXXXXXXX
+> iSerial 3 XXXXXXXX
+> bNumConfigurations 1
+> Configuration Descriptor:
+> bLength 9
+> bDescriptorType 2
+> wTotalLength 0x00e6
+> bNumInterfaces 5
+> bConfigurationValue 1
+> iConfiguration 4 RNDIS_DUN_DIAG_ADB
+> bmAttributes 0xa0
+> (Bus Powered)
+> Remote Wakeup
+> MaxPower 896mA
+> Interface Association:
+> bLength 8
+> bDescriptorType 11
+> bFirstInterface 0
+> bInterfaceCount 2
+> bFunctionClass 239 Miscellaneous Device
+> bFunctionSubClass 4
+> bFunctionProtocol 1
+> iFunction 7 RNDIS
+> Interface Descriptor:
+> bLength 9
+> bDescriptorType 4
+> bInterfaceNumber 0
+> bAlternateSetting 0
+> bNumEndpoints 1
+> bInterfaceClass 239 Miscellaneous Device
+> bInterfaceSubClass 4
+> bInterfaceProtocol 1
+> iInterface 0
+> ** UNRECOGNIZED: 05 24 00 10 01
+> ** UNRECOGNIZED: 05 24 01 00 01
+> ** UNRECOGNIZED: 04 24 02 00
+> ** UNRECOGNIZED: 05 24 06 00 01
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x81 EP 1 IN
+> bmAttributes 3
+> Transfer Type Interrupt
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0008 1x 8 bytes
+> bInterval 9
+> bMaxBurst 0
+> Interface Descriptor:
+> bLength 9
+> bDescriptorType 4
+> bInterfaceNumber 1
+> bAlternateSetting 0
+> bNumEndpoints 2
+> bInterfaceClass 10 CDC Data
+> bInterfaceSubClass 0
+> bInterfaceProtocol 0
+> iInterface 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x8e EP 14 IN
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 6
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x0f EP 15 OUT
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 6
+> Interface Descriptor:
+> bLength 9
+> bDescriptorType 4
+> bInterfaceNumber 2
+> bAlternateSetting 0
+> bNumEndpoints 3
+> bInterfaceClass 255 Vendor Specific Class
+> bInterfaceSubClass 0
+> bInterfaceProtocol 0
+> iInterface 0
+> ** UNRECOGNIZED: 05 24 00 10 01
+> ** UNRECOGNIZED: 05 24 01 00 00
+> ** UNRECOGNIZED: 04 24 02 02
+> ** UNRECOGNIZED: 05 24 06 00 00
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x83 EP 3 IN
+> bmAttributes 3
+> Transfer Type Interrupt
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x000a 1x 10 bytes
+> bInterval 9
+> bMaxBurst 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x82 EP 2 IN
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x01 EP 1 OUT
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Interface Descriptor:
+> bLength 9
+> bDescriptorType 4
+> bInterfaceNumber 3
+> bAlternateSetting 0
+> bNumEndpoints 2
+> bInterfaceClass 255 Vendor Specific Class
+> bInterfaceSubClass 255 Vendor Specific Subclass
+> bInterfaceProtocol 48
+> iInterface 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x84 EP 4 IN
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x02 EP 2 OUT
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Interface Descriptor:
+> bLength 9
+> bDescriptorType 4
+> bInterfaceNumber 4
+> bAlternateSetting 0
+> bNumEndpoints 2
+> bInterfaceClass 255 Vendor Specific Class
+> bInterfaceSubClass 66
+> bInterfaceProtocol 1
+> iInterface 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x03 EP 3 OUT
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Endpoint Descriptor:
+> bLength 7
+> bDescriptorType 5
+> bEndpointAddress 0x85 EP 5 IN
+> bmAttributes 2
+> Transfer Type Bulk
+> Synch Type None
+> Usage Type Data
+> wMaxPacketSize 0x0400 1x 1024 bytes
+> bInterval 0
+> bMaxBurst 0
+> Binary Object Store Descriptor:
+> bLength 5
+> bDescriptorType 15
+> wTotalLength 0x0016
+> bNumDeviceCaps 2
+> USB 2.0 Extension Device Capability:
+> bLength 7
+> bDescriptorType 16
+> bDevCapabilityType 2
+> bmAttributes 0x00000006
+> BESL Link Power Management (LPM) Supported
+> SuperSpeed USB Device Capability:
+> bLength 10
+> bDescriptorType 16
+> bDevCapabilityType 3
+> bmAttributes 0x00
+> wSpeedsSupported 0x000f
+> Device can operate at Low Speed (1Mbps)
+> Device can operate at Full Speed (12Mbps)
+> Device can operate at High Speed (480Mbps)
+> Device can operate at SuperSpeed (5Gbps)
+> bFunctionalitySupport 1
+> Lowest fully-functional device speed is Full Speed (12Mbps)
+> bU1DevExitLat 1 micro seconds
+> bU2DevExitLat 500 micro seconds
+> Device Status: 0x0000
+> (Bus Powered)
+
+Signed-off-by: Zhengjun Zhang <zhangzhengjun@aicrobo.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2074,6 +2074,8 @@ static const struct usb_device_id option
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+ .driver_info = RSVD(6) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
--- /dev/null
+From ef52b4a9fcc24e17e81cc60357e6107ae4e9c48e Mon Sep 17 00:00:00 2001
+From: Kyle Tso <kyletso@google.com>
+Date: Thu, 26 Aug 2021 20:42:01 +0800
+Subject: usb: typec: tcpm: Raise vdm_sm_running flag only when VDM SM is running
+
+From: Kyle Tso <kyletso@google.com>
+
+commit ef52b4a9fcc24e17e81cc60357e6107ae4e9c48e upstream.
+
+If the port is going to send Discover_Identity Message, vdm_sm_running
+flag was intentionally set before entering Ready States in order to
+avoid the conflict because the port and the port partner might start
+AMS at almost the same time after entering Ready States.
+
+However, the original design has a problem. When the port is doing
+DR_SWAP from Device to Host, it raises the flag. Later in the
+tcpm_send_discover_work, the flag blocks the procedure of sending the
+Discover_Identity and it might never be cleared until disconnection.
+
+Since there exists another flag send_discover representing that the port
+is going to send Discover_Identity or not, it is enough to use that flag
+to prevent the conflict. Also change the timing of the set/clear of
+vdm_sm_running to indicate whether the VDM SM is actually running or
+not.
+
+Fixes: c34e85fa69b9 ("usb: typec: tcpm: Send DISCOVER_IDENTITY from dedicated work")
+Cc: stable <stable@vger.kernel.org>
+Cc: Badhri Jagan Sridharan <badhri@google.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Kyle Tso <kyletso@google.com>
+Link: https://lore.kernel.org/r/20210826124201.1562502-1-kyletso@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c | 81 +++++++++++++++++++-----------------------
+ 1 file changed, 38 insertions(+), 43 deletions(-)
+
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -341,6 +341,7 @@ struct tcpm_port {
+ bool vbus_source;
+ bool vbus_charge;
+
++ /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
+ bool send_discover;
+ bool op_vsafe5v;
+
+@@ -370,6 +371,7 @@ struct tcpm_port {
+ struct hrtimer send_discover_timer;
+ struct kthread_work send_discover_work;
+ bool state_machine_running;
++ /* Set to true when VDM State Machine has following actions. */
+ bool vdm_sm_running;
+
+ struct completion tx_complete;
+@@ -1403,6 +1405,7 @@ static void tcpm_queue_vdm(struct tcpm_p
+ /* Set ready, vdm state machine will actually send */
+ port->vdm_retries = 0;
+ port->vdm_state = VDM_STATE_READY;
++ port->vdm_sm_running = true;
+
+ mod_vdm_delayed_work(port, 0);
+ }
+@@ -1645,7 +1648,6 @@ static int tcpm_pd_svdm(struct tcpm_port
+ rlen = 1;
+ } else {
+ tcpm_register_partner_altmodes(port);
+- port->vdm_sm_running = false;
+ }
+ break;
+ case CMD_ENTER_MODE:
+@@ -1693,14 +1695,12 @@ static int tcpm_pd_svdm(struct tcpm_port
+ (VDO_SVDM_VERS(svdm_version));
+ break;
+ }
+- port->vdm_sm_running = false;
+ break;
+ default:
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
+ (VDO_SVDM_VERS(svdm_version));
+- port->vdm_sm_running = false;
+ break;
+ }
+
+@@ -1741,6 +1741,20 @@ static void tcpm_handle_vdm_request(stru
+ }
+
+ if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
++ /*
++ * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
++ * advance because we are dropping the lock but may send VDMs soon.
++ * For the cases of INIT received:
++ * - If no response to send, it will be cleared later in this function.
++ * - If there are responses to send, it will be cleared in the state machine.
++ * For the cases of RSP received:
++ * - If no further INIT to send, it will be cleared later in this function.
++ * - Otherwise, it will be cleared in the state machine if timeout or it will go
++ * back here until no further INIT to send.
++ * For the cases of unknown type received:
++ * - We will send NAK and the flag will be cleared in the state machine.
++ */
++ port->vdm_sm_running = true;
+ rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ } else {
+ if (port->negotiated_rev >= PD_REV30)
+@@ -1809,6 +1823,8 @@ static void tcpm_handle_vdm_request(stru
+
+ if (rlen > 0)
+ tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
++ else
++ port->vdm_sm_running = false;
+ }
+
+ static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
+@@ -1874,8 +1890,10 @@ static void vdm_run_state_machine(struct
+ * if there's traffic or we're not in PDO ready state don't send
+ * a VDM.
+ */
+- if (port->state != SRC_READY && port->state != SNK_READY)
++ if (port->state != SRC_READY && port->state != SNK_READY) {
++ port->vdm_sm_running = false;
+ break;
++ }
+
+ /* TODO: AMS operation for Unstructured VDM */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
+@@ -2528,10 +2546,6 @@ static void tcpm_pd_ctrl_request(struct
+ TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+ port->supply_voltage);
+- /* Set VDM running flag ASAP */
+- if (port->data_role == TYPEC_HOST &&
+- port->send_discover)
+- port->vdm_sm_running = true;
+ tcpm_set_state(port, SNK_READY, 0);
+ } else {
+ /*
+@@ -2569,14 +2583,10 @@ static void tcpm_pd_ctrl_request(struct
+ switch (port->state) {
+ case SNK_NEGOTIATE_CAPABILITIES:
+ /* USB PD specification, Figure 8-43 */
+- if (port->explicit_contract) {
++ if (port->explicit_contract)
+ next_state = SNK_READY;
+- if (port->data_role == TYPEC_HOST &&
+- port->send_discover)
+- port->vdm_sm_running = true;
+- } else {
++ else
+ next_state = SNK_WAIT_CAPABILITIES;
+- }
+
+ /* Threshold was relaxed before sending Request. Restore it back. */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+@@ -2591,10 +2601,6 @@ static void tcpm_pd_ctrl_request(struct
+ port->pps_status = (type == PD_CTRL_WAIT ?
+ -EAGAIN : -EOPNOTSUPP);
+
+- if (port->data_role == TYPEC_HOST &&
+- port->send_discover)
+- port->vdm_sm_running = true;
+-
+ /* Threshold was relaxed before sending Request. Restore it back. */
+ tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
+ port->pps_data.active,
+@@ -2670,10 +2676,6 @@ static void tcpm_pd_ctrl_request(struct
+ }
+ break;
+ case DR_SWAP_SEND:
+- if (port->data_role == TYPEC_DEVICE &&
+- port->send_discover)
+- port->vdm_sm_running = true;
+-
+ tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
+ break;
+ case PR_SWAP_SEND:
+@@ -2711,7 +2713,7 @@ static void tcpm_pd_ctrl_request(struct
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+- if (port->vdm_sm_running) {
++ if (port->send_discover) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+@@ -2727,7 +2729,7 @@ static void tcpm_pd_ctrl_request(struct
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+- if (port->vdm_sm_running) {
++ if (port->send_discover) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+@@ -2736,7 +2738,7 @@ static void tcpm_pd_ctrl_request(struct
+ }
+ break;
+ case PD_CTRL_VCONN_SWAP:
+- if (port->vdm_sm_running) {
++ if (port->send_discover) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+@@ -4470,18 +4472,20 @@ static void run_state_machine(struct tcp
+ /* DR_Swap states */
+ case DR_SWAP_SEND:
+ tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
++ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
++ port->send_discover = true;
+ tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
+ PD_T_SENDER_RESPONSE);
+ break;
+ case DR_SWAP_ACCEPT:
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+- /* Set VDM state machine running flag ASAP */
+- if (port->data_role == TYPEC_DEVICE && port->send_discover)
+- port->vdm_sm_running = true;
++ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
++ port->send_discover = true;
+ tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
+ break;
+ case DR_SWAP_SEND_TIMEOUT:
+ tcpm_swap_complete(port, -ETIMEDOUT);
++ port->send_discover = false;
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+@@ -4493,7 +4497,6 @@ static void run_state_machine(struct tcp
+ } else {
+ tcpm_set_roles(port, true, port->pwr_role,
+ TYPEC_HOST);
+- port->send_discover = true;
+ }
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+@@ -4633,8 +4636,6 @@ static void run_state_machine(struct tcp
+ break;
+ case VCONN_SWAP_SEND_TIMEOUT:
+ tcpm_swap_complete(port, -ETIMEDOUT);
+- if (port->data_role == TYPEC_HOST && port->send_discover)
+- port->vdm_sm_running = true;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case VCONN_SWAP_START:
+@@ -4650,14 +4651,10 @@ static void run_state_machine(struct tcp
+ case VCONN_SWAP_TURN_ON_VCONN:
+ tcpm_set_vconn(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+- if (port->data_role == TYPEC_HOST && port->send_discover)
+- port->vdm_sm_running = true;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case VCONN_SWAP_TURN_OFF_VCONN:
+ tcpm_set_vconn(port, false);
+- if (port->data_role == TYPEC_HOST && port->send_discover)
+- port->vdm_sm_running = true;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+
+@@ -4665,8 +4662,6 @@ static void run_state_machine(struct tcp
+ case PR_SWAP_CANCEL:
+ case VCONN_SWAP_CANCEL:
+ tcpm_swap_complete(port, port->swap_status);
+- if (port->data_role == TYPEC_HOST && port->send_discover)
+- port->vdm_sm_running = true;
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, SRC_READY, 0);
+ else
+@@ -5016,9 +5011,6 @@ static void _tcpm_pd_vbus_on(struct tcpm
+ switch (port->state) {
+ case SNK_TRANSITION_SINK_VBUS:
+ port->explicit_contract = true;
+- /* Set the VDM flag ASAP */
+- if (port->data_role == TYPEC_HOST && port->send_discover)
+- port->vdm_sm_running = true;
+ tcpm_set_state(port, SNK_READY, 0);
+ break;
+ case SNK_DISCOVERY:
+@@ -5412,15 +5404,18 @@ static void tcpm_send_discover_work(stru
+ if (!port->send_discover)
+ goto unlock;
+
++ if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
++ port->send_discover = false;
++ goto unlock;
++ }
++
+ /* Retry if the port is not idle */
+ if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ goto unlock;
+ }
+
+- /* Only send the Message if the port is host for PD rev2.0 */
+- if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
+- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
++ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+
+ unlock:
+ mutex_unlock(&port->lock);