--- /dev/null
+From 4b31b92b143f7d209f3d494c56d4c4673e9fc53d Mon Sep 17 00:00:00 2001
+From: Harsh Jain <harsh.jain@amd.com>
+Date: Wed, 2 Nov 2022 15:23:08 +0530
+Subject: drm/amdgpu: complete gfxoff allow signal during suspend without delay
+
+From: Harsh Jain <harsh.jain@amd.com>
+
+commit 4b31b92b143f7d209f3d494c56d4c4673e9fc53d upstream.
+
+change guarantees that gfxoff is allowed before moving further in
+s2idle sequence to add more reliablity about gfxoff in amdgpu IP's
+suspend flow
+
+Signed-off-by: Harsh Jain <harsh.jain@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: "Limonciello, Mario" <Mario.Limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -586,10 +586,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_d
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+ /* If going to s2idle, no need to wait */
+- if (adev->in_s0ix)
+- delay = GFX_OFF_NO_DELAY;
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++ if (adev->in_s0ix) {
++ if (!amdgpu_dpm_set_powergating_by_smu(adev,
++ AMD_IP_BLOCK_TYPE_GFX, true))
++ adev->gfx.gfx_off_state = true;
++ } else {
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
++ }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
--- /dev/null
+From 8579538c89e33ce78be2feb41e07489c8cbf8f31 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Fri, 20 Jan 2023 16:38:06 +0000
+Subject: io_uring/msg_ring: fix remote queue to disabled ring
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 8579538c89e33ce78be2feb41e07489c8cbf8f31 upstream.
+
+IORING_SETUP_R_DISABLED rings don't have the submitter task set, so
+it's not always safe to use ->submitter_task. Disallow posting msg_ring
+messaged to disabled rings. Also add task NULL check for loosy sync
+around testing for IORING_SETUP_R_DISABLED.
+
+Cc: stable@vger.kernel.org
+Fixes: 6d043ee1164ca ("io_uring: do msg_ring in target task via tw")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/msg_ring.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -30,6 +30,8 @@ static int io_msg_ring_data(struct io_ki
+
+ if (msg->src_fd || msg->dst_fd || msg->flags)
+ return -EINVAL;
++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++ return -EBADFD;
+
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true))
+ return 0;
+@@ -84,6 +86,8 @@ static int io_msg_send_fd(struct io_kioc
+
+ if (target_ctx == ctx)
+ return -EINVAL;
++ if (target_ctx->flags & IORING_SETUP_R_DISABLED)
++ return -EBADFD;
+
+ ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
+ if (unlikely(ret))
bluetooth-hci_sync-cancel-cmd_timer-if-hci_open-fail.patch
drm-i915-allow-panel-fixed-modes-to-have-differing-s.patch
drm-i915-allow-alternate-fixed-modes-always-for-edp.patch
+drm-amdgpu-complete-gfxoff-allow-signal-during-suspend-without-delay.patch
+io_uring-msg_ring-fix-remote-queue-to-disabled-ring.patch
+wifi-mac80211-proper-mark-itxqs-for-resumption.patch
+wifi-mac80211-fix-itxq-ampdu-fragmentation-handling.patch
--- /dev/null
+From 592234e941f1addaa598601c9227e3b72d608625 Mon Sep 17 00:00:00 2001
+From: Alexander Wetzel <alexander@wetzel-home.de>
+Date: Fri, 6 Jan 2023 23:31:41 +0100
+Subject: wifi: mac80211: Fix iTXQ AMPDU fragmentation handling
+
+From: Alexander Wetzel <alexander@wetzel-home.de>
+
+commit 592234e941f1addaa598601c9227e3b72d608625 upstream.
+
+mac80211 must not enable aggregation wile transmitting a fragmented
+MPDU. Enforce that for mac80211 internal TX queues (iTXQs).
+
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Link: https://lore.kernel.org/oe-lkp/202301021738.7cd3e6ae-oliver.sang@intel.com
+Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
+Link: https://lore.kernel.org/r/20230106223141.98696-1-alexander@wetzel-home.de
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/agg-tx.c | 2 --
+ net/mac80211/ht.c | 37 +++++++++++++++++++++++++++++++++++++
+ net/mac80211/tx.c | 13 +++++++------
+ 3 files changed, 44 insertions(+), 8 deletions(-)
+
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_star
+ */
+ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+- ieee80211_agg_stop_txq(sta, tid);
+-
+ /*
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -391,6 +391,43 @@ void ieee80211_ba_session_work(struct wo
+
+ tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+ if (!blocked && tid_tx) {
++ struct ieee80211_sub_if_data *sdata = sta->sdata;
++ struct ieee80211_local *local = sdata->local;
++
++ if (local->ops->wake_tx_queue) {
++ struct txq_info *txqi =
++ to_txq_info(sta->sta.txq[tid]);
++ struct fq *fq = &local->fq;
++
++ spin_lock_bh(&fq->lock);
++
++ /* Allow only frags to be dequeued */
++ set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
++
++ if (!skb_queue_empty(&txqi->frags)) {
++ /* Fragmented Tx is ongoing, wait for it
++ * to finish. Reschedule worker to retry
++ * later.
++ */
++
++ spin_unlock_bh(&fq->lock);
++ spin_unlock_bh(&sta->lock);
++
++ /* Give the task working on the txq a
++ * chance to send out the queued frags
++ */
++ synchronize_net();
++
++ mutex_unlock(&sta->ampdu_mlme.mtx);
++
++ ieee80211_queue_work(&sdata->local->hw,
++ work);
++ return;
++ }
++
++ spin_unlock_bh(&fq->lock);
++ }
++
+ /*
+ * Assign it over to the normal tid_tx array
+ * where it "goes live".
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1295,7 +1295,8 @@ ieee80211_tx_prepare(struct ieee80211_su
+ if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+- info->flags & IEEE80211_TX_CTL_AMPDU)
++ (info->flags & IEEE80211_TX_CTL_AMPDU &&
++ !local->ops->wake_tx_queue))
+ info->flags |= IEEE80211_TX_CTL_DONTFRAG;
+ }
+
+@@ -3725,7 +3726,6 @@ struct sk_buff *ieee80211_tx_dequeue(str
+ return NULL;
+
+ begin:
+- skb = NULL;
+ spin_lock(&local->queue_stop_reason_lock);
+ q_stopped = local->queue_stop_reasons[q];
+ spin_unlock(&local->queue_stop_reason_lock);
+@@ -3738,9 +3738,6 @@ begin:
+
+ spin_lock_bh(&fq->lock);
+
+- if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
+- goto out;
+-
+ /* Make sure fragments stay together. */
+ skb = __skb_dequeue(&txqi->frags);
+ if (unlikely(skb)) {
+@@ -3750,6 +3747,9 @@ begin:
+ IEEE80211_SKB_CB(skb)->control.flags &=
+ ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ } else {
++ if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
++ goto out;
++
+ skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
+ }
+
+@@ -3800,7 +3800,8 @@ begin:
+ }
+
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+- info->flags |= IEEE80211_TX_CTL_AMPDU;
++ info->flags |= (IEEE80211_TX_CTL_AMPDU |
++ IEEE80211_TX_CTL_DONTFRAG);
+ else
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
--- /dev/null
+From 4444bc2116aecdcde87dce80373540adc8bd478b Mon Sep 17 00:00:00 2001
+From: Alexander Wetzel <alexander@wetzel-home.de>
+Date: Fri, 30 Dec 2022 13:18:49 +0100
+Subject: wifi: mac80211: Proper mark iTXQs for resumption
+
+From: Alexander Wetzel <alexander@wetzel-home.de>
+
+commit 4444bc2116aecdcde87dce80373540adc8bd478b upstream.
+
+When a running wake_tx_queue() call is aborted due to a hw queue stop
+the corresponding iTXQ is not always correctly marked for resumption:
+wake_tx_push_queue() can stops the queue run without setting
+@IEEE80211_TXQ_STOP_NETIF_TX.
+
+Without the @IEEE80211_TXQ_STOP_NETIF_TX flag __ieee80211_wake_txqs()
+will not schedule a new queue run and remaining frames in the queue get
+stuck till another frame is queued to it.
+
+Fix the issue for all drivers - also the ones with custom wake_tx_queue
+callbacks - by moving the logic into ieee80211_tx_dequeue() and drop the
+redundant @txqs_stopped.
+
+@IEEE80211_TXQ_STOP_NETIF_TX is also renamed to @IEEE80211_TXQ_DIRTY to
+better describe the flag.
+
+Fixes: c850e31f79f0 ("wifi: mac80211: add internal handler for wake_tx_queue")
+Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
+Link: https://lore.kernel.org/r/20221230121850.218810-1-alexander@wetzel-home.de
+Cc: stable@vger.kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/mac80211.h | 4 ----
+ net/mac80211/debugfs_sta.c | 5 +++--
+ net/mac80211/driver-ops.h | 2 +-
+ net/mac80211/ieee80211_i.h | 2 +-
+ net/mac80211/tx.c | 23 +++++++++++++++--------
+ net/mac80211/util.c | 20 ++++++--------------
+ 6 files changed, 26 insertions(+), 30 deletions(-)
+
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1827,8 +1827,6 @@ struct ieee80211_vif_cfg {
+ * @drv_priv: data area for driver use, will always be aligned to
+ * sizeof(void \*).
+ * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
+- * protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
+ * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
+@@ -1857,8 +1855,6 @@ struct ieee80211_vif {
+ bool probe_req_reg;
+ bool rx_mcast_action_reg;
+
+- bool txqs_stopped[IEEE80211_NUM_ACS];
+-
+ struct ieee80211_vif *mbssid_tx_vif;
+
+ /* must be last */
+--- a/net/mac80211/debugfs_sta.c
++++ b/net/mac80211/debugfs_sta.c
+@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file
+ continue;
+ txqi = to_txq_info(sta->sta.txq[i]);
+ p += scnprintf(p, bufsz + buf - p,
+- "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
++ "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
+ txqi->txq.tid,
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file
+ txqi->flags,
+ test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
+ test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
+- test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
++ test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
++ test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
+ }
+
+ rcu_read_unlock();
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1183,7 +1183,7 @@ static inline void drv_wake_tx_queue(str
+
+ /* In reconfig don't transmit now, but mark for waking later */
+ if (local->in_reconfig) {
+- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
++ set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
+ return;
+ }
+
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -836,7 +836,7 @@ enum txq_info_flags {
+ IEEE80211_TXQ_STOP,
+ IEEE80211_TXQ_AMPDU,
+ IEEE80211_TXQ_NO_AMSDU,
+- IEEE80211_TXQ_STOP_NETIF_TX,
++ IEEE80211_TXQ_DIRTY,
+ };
+
+ /**
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3709,13 +3709,15 @@ struct sk_buff *ieee80211_tx_dequeue(str
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct txq_info *txqi = container_of(txq, struct txq_info, txq);
+ struct ieee80211_hdr *hdr;
+- struct sk_buff *skb = NULL;
+ struct fq *fq = &local->fq;
+ struct fq_tin *tin = &txqi->tin;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_tx_data tx;
++ struct sk_buff *skb;
+ ieee80211_tx_result r;
+ struct ieee80211_vif *vif = txq->vif;
++ int q = vif->hw_queue[txq->ac];
++ bool q_stopped;
+
+ WARN_ON_ONCE(softirq_count() == 0);
+
+@@ -3723,16 +3725,21 @@ struct sk_buff *ieee80211_tx_dequeue(str
+ return NULL;
+
+ begin:
+- spin_lock_bh(&fq->lock);
++ skb = NULL;
++ spin_lock(&local->queue_stop_reason_lock);
++ q_stopped = local->queue_stop_reasons[q];
++ spin_unlock(&local->queue_stop_reason_lock);
++
++ if (unlikely(q_stopped)) {
++ /* mark for waking later */
++ set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
++ return NULL;
++ }
+
+- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
+- test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
+- goto out;
++ spin_lock_bh(&fq->lock);
+
+- if (vif->txqs_stopped[txq->ac]) {
+- set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
++ if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
+ goto out;
+- }
+
+ /* Make sure fragments stay together. */
+ skb = __skb_dequeue(&txqi->frags);
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -301,8 +301,6 @@ static void __ieee80211_wake_txqs(struct
+ local_bh_disable();
+ spin_lock(&fq->lock);
+
+- sdata->vif.txqs_stopped[ac] = false;
+-
+ if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
+ goto out;
+
+@@ -324,7 +322,7 @@ static void __ieee80211_wake_txqs(struct
+ if (ac != txq->ac)
+ continue;
+
+- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
+ &txqi->flags))
+ continue;
+
+@@ -339,7 +337,7 @@ static void __ieee80211_wake_txqs(struct
+
+ txqi = to_txq_info(vif->txq);
+
+- if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
++ if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
+ (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ goto out;
+
+@@ -537,16 +535,10 @@ static void __ieee80211_stop_queue(struc
+ continue;
+
+ for (ac = 0; ac < n_acs; ac++) {
+- if (sdata->vif.hw_queue[ac] == queue ||
+- sdata->vif.cab_queue == queue) {
+- if (!local->ops->wake_tx_queue) {
+- netif_stop_subqueue(sdata->dev, ac);
+- continue;
+- }
+- spin_lock(&local->fq.lock);
+- sdata->vif.txqs_stopped[ac] = true;
+- spin_unlock(&local->fq.lock);
+- }
++ if (!local->ops->wake_tx_queue &&
++ (sdata->vif.hw_queue[ac] == queue ||
++ sdata->vif.cab_queue == queue))
++ netif_stop_subqueue(sdata->dev, ac);
+ }
+ }
+ rcu_read_unlock();