--- /dev/null
+From bbdf904b13a62bb8b1272d92a7dde082dff86fbb Mon Sep 17 00:00:00 2001
+From: Bard Liao <yung-chuan.liao@linux.intel.com>
+Date: Mon, 6 Mar 2023 15:41:01 +0800
+Subject: ALSA: hda: intel-dsp-config: add MTL PCI id
+
+From: Bard Liao <yung-chuan.liao@linux.intel.com>
+
+commit bbdf904b13a62bb8b1272d92a7dde082dff86fbb upstream.
+
+Use SOF as default audio driver.
+
+Signed-off-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Gongjun Song <gongjun.song@intel.com>
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230306074101.3906707-1-yung-chuan.liao@linux.intel.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/hda/intel-dsp-config.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -472,6 +472,15 @@ static const struct config_entry config_
+ },
+ #endif
+
++/* Meteor Lake */
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE)
++ /* Meteorlake-P */
++ {
++ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++ .device = 0x7e28,
++ },
++#endif
++
+ };
+
+ static const struct config_entry *snd_intel_dsp_find_config
--- /dev/null
+From 7bb62340951a9af20235a3bde8c98e2e292915df Mon Sep 17 00:00:00 2001
+From: Jeremy Szu <jeremy.szu@canonical.com>
+Date: Tue, 7 Mar 2023 21:53:16 +0800
+Subject: ALSA: hda/realtek: fix speaker, mute/micmute LEDs not work on a HP platform
+
+From: Jeremy Szu <jeremy.szu@canonical.com>
+
+commit 7bb62340951a9af20235a3bde8c98e2e292915df upstream.
+
+There is a HP platform needs ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED quirk to
+make mic-mute/audio-mute/speaker working.
+
+Signed-off-by: Jeremy Szu <jeremy.szu@canonical.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230307135317.37621-1-jeremy.szu@canonical.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9446,6 +9446,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
--- /dev/null
+From a86e79e3015f5dd8e1b01ccfa49bd5c6e41047a1 Mon Sep 17 00:00:00 2001
+From: "Hamidreza H. Fard" <nitocris@posteo.net>
+Date: Tue, 7 Mar 2023 16:37:41 +0000
+Subject: ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book2 Pro
+
+From: Hamidreza H. Fard <nitocris@posteo.net>
+
+commit a86e79e3015f5dd8e1b01ccfa49bd5c6e41047a1 upstream.
+
+Samsung Galaxy Book2 Pro (13" 2022 NP930XED-KA1DE) with codec SSID
+144d:c868 requires the same workaround for enabling the speaker amp
+like other Samsung models with ALC298 code.
+
+Signed-off-by: Hamidreza H. Fard <nitocris@posteo.net>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230307163741.3878-1-nitocris@posteo.net
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9538,6 +9538,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
++ SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
--- /dev/null
+From 211baef0eabf4169ce4f73ebd917749d1a7edd74 Mon Sep 17 00:00:00 2001
+From: Volker Lendecke <vl@samba.org>
+Date: Mon, 13 Mar 2023 16:09:54 +0100
+Subject: cifs: Fix smb2_set_path_size()
+
+From: Volker Lendecke <vl@samba.org>
+
+commit 211baef0eabf4169ce4f73ebd917749d1a7edd74 upstream.
+
+If cifs_get_writable_path() finds a writable file, smb2_compound_op()
+must use that file's FID and not the COMPOUND_FID.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Volker Lendecke <vl@samba.org>
+Reviewed-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/smb2inode.c | 31 ++++++++++++++++++++++++-------
+ 1 file changed, 24 insertions(+), 7 deletions(-)
+
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -234,15 +234,32 @@ static int smb2_compound_op(const unsign
+ size[0] = 8; /* sizeof __le64 */
+ data[0] = ptr;
+
+- rc = SMB2_set_info_init(tcon, server,
+- &rqst[num_rqst], COMPOUND_FID,
+- COMPOUND_FID, current->tgid,
+- FILE_END_OF_FILE_INFORMATION,
+- SMB2_O_INFO_FILE, 0, data, size);
++ if (cfile) {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ cfile->fid.persistent_fid,
++ cfile->fid.volatile_fid,
++ current->tgid,
++ FILE_END_OF_FILE_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ data, size);
++ } else {
++ rc = SMB2_set_info_init(tcon, server,
++ &rqst[num_rqst],
++ COMPOUND_FID,
++ COMPOUND_FID,
++ current->tgid,
++ FILE_END_OF_FILE_INFORMATION,
++ SMB2_O_INFO_FILE, 0,
++ data, size);
++ if (!rc) {
++ smb2_set_next_command(tcon, &rqst[num_rqst]);
++ smb2_set_related(&rqst[num_rqst]);
++ }
++ }
+ if (rc)
+ goto finished;
+- smb2_set_next_command(tcon, &rqst[num_rqst]);
+- smb2_set_related(&rqst[num_rqst++]);
++ num_rqst++;
+ trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+ break;
+ case SMB2_OP_SET_INFO:
--- /dev/null
+From 396935de145589c8bfe552fa03a5e38604071829 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 14 Mar 2023 20:32:54 -0300
+Subject: cifs: fix use-after-free bug in refresh_cache_worker()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 396935de145589c8bfe552fa03a5e38604071829 upstream.
+
+The UAF bug occurred because we were putting DFS root sessions in
+cifs_umount() while DFS cache refresher was being executed.
+
+Make DFS root sessions have same lifetime as DFS tcons so we can avoid
+the use-after-free bug is DFS cache refresher and other places that
+require IPCs to get new DFS referrals on. Also, get rid of mount
+group handling in DFS cache as we no longer need it.
+
+This fixes below use-after-free bug catched by KASAN
+
+[ 379.946955] BUG: KASAN: use-after-free in __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
+[ 379.947642] Read of size 8 at addr ffff888018f57030 by task kworker/u4:3/56
+[ 379.948096]
+[ 379.948208] CPU: 0 PID: 56 Comm: kworker/u4:3 Not tainted 6.2.0-rc7-lku #23
+[ 379.948661] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS
+rel-1.16.0-0-gd239552-rebuilt.opensuse.org 04/01/2014
+[ 379.949368] Workqueue: cifs-dfscache refresh_cache_worker [cifs]
+[ 379.949942] Call Trace:
+[ 379.950113] <TASK>
+[ 379.950260] dump_stack_lvl+0x50/0x67
+[ 379.950510] print_report+0x16a/0x48e
+[ 379.950759] ? __virt_addr_valid+0xd8/0x160
+[ 379.951040] ? __phys_addr+0x41/0x80
+[ 379.951285] kasan_report+0xdb/0x110
+[ 379.951533] ? __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
+[ 379.952056] ? __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
+[ 379.952585] __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
+[ 379.953096] ? __pfx___refresh_tcon.isra.0+0x10/0x10 [cifs]
+[ 379.953637] ? __pfx___mutex_lock+0x10/0x10
+[ 379.953915] ? lock_release+0xb6/0x720
+[ 379.954167] ? __pfx_lock_acquire+0x10/0x10
+[ 379.954443] ? refresh_cache_worker+0x34e/0x6d0 [cifs]
+[ 379.954960] ? __pfx_wb_workfn+0x10/0x10
+[ 379.955239] refresh_cache_worker+0x4ad/0x6d0 [cifs]
+[ 379.955755] ? __pfx_refresh_cache_worker+0x10/0x10 [cifs]
+[ 379.956323] ? __pfx_lock_acquired+0x10/0x10
+[ 379.956615] ? read_word_at_a_time+0xe/0x20
+[ 379.956898] ? lockdep_hardirqs_on_prepare+0x12/0x220
+[ 379.957235] process_one_work+0x535/0x990
+[ 379.957509] ? __pfx_process_one_work+0x10/0x10
+[ 379.957812] ? lock_acquired+0xb7/0x5f0
+[ 379.958069] ? __list_add_valid+0x37/0xd0
+[ 379.958341] ? __list_add_valid+0x37/0xd0
+[ 379.958611] worker_thread+0x8e/0x630
+[ 379.958861] ? __pfx_worker_thread+0x10/0x10
+[ 379.959148] kthread+0x17d/0x1b0
+[ 379.959369] ? __pfx_kthread+0x10/0x10
+[ 379.959630] ret_from_fork+0x2c/0x50
+[ 379.959879] </TASK>
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Cc: stable@vger.kernel.org # 6.2
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/cifs_fs_sb.h | 2
+ fs/cifs/cifsglob.h | 3 -
+ fs/cifs/connect.c | 9 +--
+ fs/cifs/dfs.c | 52 ++++++++++++++----
+ fs/cifs/dfs.h | 16 +++++
+ fs/cifs/dfs_cache.c | 140 ---------------------------------------------------
+ fs/cifs/dfs_cache.h | 2
+ fs/cifs/misc.c | 7 ++
+ 8 files changed, 67 insertions(+), 164 deletions(-)
+
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -61,8 +61,6 @@ struct cifs_sb_info {
+ /* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
+ char *prepath;
+
+- /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
+- uuid_t dfs_mount_id;
+ /*
+ * Indicate whether serverino option was turned off later
+ * (cifs_autodisable_serverino) in order to match new mounts.
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1239,6 +1239,7 @@ struct cifs_tcon {
+ /* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ struct list_head ulist; /* cache update list */
++ struct list_head dfs_ses_list;
+ #endif
+ struct delayed_work query_interfaces; /* query interfaces workqueue job */
+ };
+@@ -1767,8 +1768,8 @@ struct cifs_mount_ctx {
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- uuid_t mount_id;
+ char *origin_fullpath, *leaf_fullpath;
++ struct list_head dfs_ses_list;
+ };
+
+ static inline void free_dfs_info_param(struct dfs_info3_param *param)
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3457,7 +3457,8 @@ int cifs_mount(struct cifs_sb_info *cifs
+ bool isdfs;
+ int rc;
+
+- uuid_gen(&mnt_ctx.mount_id);
++ INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
++
+ rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ if (rc)
+ goto error;
+@@ -3477,7 +3478,6 @@ int cifs_mount(struct cifs_sb_info *cifs
+ kfree(cifs_sb->prepath);
+ cifs_sb->prepath = ctx->prepath;
+ ctx->prepath = NULL;
+- uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
+
+ out:
+ cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
+@@ -3489,7 +3489,7 @@ out:
+ return rc;
+
+ error:
+- dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
++ dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+ kfree(mnt_ctx.origin_fullpath);
+ kfree(mnt_ctx.leaf_fullpath);
+ cifs_mount_put_conns(&mnt_ctx);
+@@ -3687,9 +3687,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ kfree(cifs_sb->prepath);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+- dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
+-#endif
+ call_rcu(&cifs_sb->rcu, delayed_free);
+ }
+
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -99,18 +99,27 @@ static int get_session(struct cifs_mount
+ return rc;
+ }
+
+-static void set_root_ses(struct cifs_mount_ctx *mnt_ctx)
++static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++ struct dfs_root_ses *root_ses;
+ struct cifs_ses *ses = mnt_ctx->ses;
+
+ if (ses) {
++ root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
++ if (!root_ses)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&root_ses->list);
++
+ spin_lock(&cifs_tcp_ses_lock);
+ ses->ses_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+- dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, ses);
++ root_ses->ses = ses;
++ list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
+ }
+- ctx->dfs_root_ses = mnt_ctx->ses;
++ ctx->dfs_root_ses = ses;
++ return 0;
+ }
+
+ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path,
+@@ -118,7 +127,8 @@ static int get_dfs_conn(struct cifs_moun
+ {
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_info3_param ref = {};
+- int rc;
++ bool is_refsrv = false;
++ int rc, rc2;
+
+ rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
+ if (rc)
+@@ -133,8 +143,7 @@ static int get_dfs_conn(struct cifs_moun
+ if (rc)
+ goto out;
+
+- if (ref.flags & DFSREF_REFERRAL_SERVER)
+- set_root_ses(mnt_ctx);
++ is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER);
+
+ rc = -EREMOTE;
+ if (ref.flags & DFSREF_STORAGE_SERVER) {
+@@ -143,13 +152,17 @@ static int get_dfs_conn(struct cifs_moun
+ goto out;
+
+ /* some servers may not advertise referral capability under ref.flags */
+- if (!(ref.flags & DFSREF_REFERRAL_SERVER) &&
+- is_tcon_dfs(mnt_ctx->tcon))
+- set_root_ses(mnt_ctx);
++ is_refsrv |= is_tcon_dfs(mnt_ctx->tcon);
+
+ rc = cifs_is_path_remote(mnt_ctx);
+ }
+
++ if (rc == -EREMOTE && is_refsrv) {
++ rc2 = get_root_smb_session(mnt_ctx);
++ if (rc2)
++ rc = rc2;
++ }
++
+ out:
+ free_dfs_info_param(&ref);
+ return rc;
+@@ -162,6 +175,7 @@ static int __dfs_mount_share(struct cifs
+ char *ref_path = NULL, *full_path = NULL;
+ struct dfs_cache_tgt_iterator *tit;
+ struct TCP_Server_Info *server;
++ struct cifs_tcon *tcon;
+ char *origin_fullpath = NULL;
+ int num_links = 0;
+ int rc;
+@@ -231,12 +245,22 @@ static int __dfs_mount_share(struct cifs
+
+ if (!rc) {
+ server = mnt_ctx->server;
++ tcon = mnt_ctx->tcon;
+
+ mutex_lock(&server->refpath_lock);
+- server->origin_fullpath = origin_fullpath;
+- server->current_fullpath = server->leaf_fullpath;
++ if (!server->origin_fullpath) {
++ server->origin_fullpath = origin_fullpath;
++ server->current_fullpath = server->leaf_fullpath;
++ origin_fullpath = NULL;
++ }
+ mutex_unlock(&server->refpath_lock);
+- origin_fullpath = NULL;
++
++ if (list_empty(&tcon->dfs_ses_list)) {
++ list_replace_init(&mnt_ctx->dfs_ses_list,
++ &tcon->dfs_ses_list);
++ } else {
++ dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
++ }
+ }
+
+ out:
+@@ -277,7 +301,9 @@ int dfs_mount_share(struct cifs_mount_ct
+ }
+
+ *isdfs = true;
+- set_root_ses(mnt_ctx);
++ rc = get_root_smb_session(mnt_ctx);
++ if (rc)
++ return rc;
+
+ return __dfs_mount_share(mnt_ctx);
+ }
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -10,6 +10,11 @@
+ #include "fs_context.h"
+ #include "cifs_unicode.h"
+
++struct dfs_root_ses {
++ struct list_head list;
++ struct cifs_ses *ses;
++};
++
+ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
+ struct smb3_fs_context *ctx);
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+@@ -44,4 +49,15 @@ static inline char *dfs_get_automount_de
+ true);
+ }
+
++static inline void dfs_put_root_smb_sessions(struct list_head *head)
++{
++ struct dfs_root_ses *root, *tmp;
++
++ list_for_each_entry_safe(root, tmp, head, list) {
++ list_del_init(&root->list);
++ cifs_put_smb_ses(root->ses);
++ kfree(root);
++ }
++}
++
+ #endif /* _CIFS_DFS_H */
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -49,17 +49,6 @@ struct cache_entry {
+ struct cache_dfs_tgt *tgthint;
+ };
+
+-/* List of referral server sessions per dfs mount */
+-struct mount_group {
+- struct list_head list;
+- uuid_t id;
+- struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
+- int num_sessions;
+- spinlock_t lock;
+- struct list_head refresh_list;
+- struct kref refcount;
+-};
+-
+ static struct kmem_cache *cache_slab __read_mostly;
+ static struct workqueue_struct *dfscache_wq __read_mostly;
+
+@@ -76,85 +65,10 @@ static atomic_t cache_count;
+ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+ static DECLARE_RWSEM(htable_rw_lock);
+
+-static LIST_HEAD(mount_group_list);
+-static DEFINE_MUTEX(mount_group_list_lock);
+-
+ static void refresh_cache_worker(struct work_struct *work);
+
+ static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+
+-static void __mount_group_release(struct mount_group *mg)
+-{
+- int i;
+-
+- for (i = 0; i < mg->num_sessions; i++)
+- cifs_put_smb_ses(mg->sessions[i]);
+- kfree(mg);
+-}
+-
+-static void mount_group_release(struct kref *kref)
+-{
+- struct mount_group *mg = container_of(kref, struct mount_group, refcount);
+-
+- mutex_lock(&mount_group_list_lock);
+- list_del(&mg->list);
+- mutex_unlock(&mount_group_list_lock);
+- __mount_group_release(mg);
+-}
+-
+-static struct mount_group *find_mount_group_locked(const uuid_t *id)
+-{
+- struct mount_group *mg;
+-
+- list_for_each_entry(mg, &mount_group_list, list) {
+- if (uuid_equal(&mg->id, id))
+- return mg;
+- }
+- return ERR_PTR(-ENOENT);
+-}
+-
+-static struct mount_group *__get_mount_group_locked(const uuid_t *id)
+-{
+- struct mount_group *mg;
+-
+- mg = find_mount_group_locked(id);
+- if (!IS_ERR(mg))
+- return mg;
+-
+- mg = kmalloc(sizeof(*mg), GFP_KERNEL);
+- if (!mg)
+- return ERR_PTR(-ENOMEM);
+- kref_init(&mg->refcount);
+- uuid_copy(&mg->id, id);
+- mg->num_sessions = 0;
+- spin_lock_init(&mg->lock);
+- list_add(&mg->list, &mount_group_list);
+- return mg;
+-}
+-
+-static struct mount_group *get_mount_group(const uuid_t *id)
+-{
+- struct mount_group *mg;
+-
+- mutex_lock(&mount_group_list_lock);
+- mg = __get_mount_group_locked(id);
+- if (!IS_ERR(mg))
+- kref_get(&mg->refcount);
+- mutex_unlock(&mount_group_list_lock);
+-
+- return mg;
+-}
+-
+-static void free_mount_group_list(void)
+-{
+- struct mount_group *mg, *tmp_mg;
+-
+- list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
+- list_del_init(&mg->list);
+- __mount_group_release(mg);
+- }
+-}
+-
+ /**
+ * dfs_cache_canonical_path - get a canonical DFS path
+ *
+@@ -704,7 +618,6 @@ void dfs_cache_destroy(void)
+ {
+ cancel_delayed_work_sync(&refresh_task);
+ unload_nls(cache_cp);
+- free_mount_group_list();
+ flush_cache_ents();
+ kmem_cache_destroy(cache_slab);
+ destroy_workqueue(dfscache_wq);
+@@ -1111,54 +1024,6 @@ out_unlock:
+ return rc;
+ }
+
+-/**
+- * dfs_cache_add_refsrv_session - add SMB session of referral server
+- *
+- * @mount_id: mount group uuid to lookup.
+- * @ses: reference counted SMB session of referral server.
+- */
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
+-{
+- struct mount_group *mg;
+-
+- if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
+- return;
+-
+- mg = get_mount_group(mount_id);
+- if (WARN_ON_ONCE(IS_ERR(mg)))
+- return;
+-
+- spin_lock(&mg->lock);
+- if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
+- mg->sessions[mg->num_sessions++] = ses;
+- spin_unlock(&mg->lock);
+- kref_put(&mg->refcount, mount_group_release);
+-}
+-
+-/**
+- * dfs_cache_put_refsrv_sessions - put all referral server sessions
+- *
+- * Put all SMB sessions from the given mount group id.
+- *
+- * @mount_id: mount group uuid to lookup.
+- */
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
+-{
+- struct mount_group *mg;
+-
+- if (!mount_id || uuid_is_null(mount_id))
+- return;
+-
+- mutex_lock(&mount_group_list_lock);
+- mg = find_mount_group_locked(mount_id);
+- if (IS_ERR(mg)) {
+- mutex_unlock(&mount_group_list_lock);
+- return;
+- }
+- mutex_unlock(&mount_group_list_lock);
+- kref_put(&mg->refcount, mount_group_release);
+-}
+-
+ /* Extract share from DFS target and return a pointer to prefix path or NULL */
+ static const char *parse_target_share(const char *target, char **share)
+ {
+@@ -1384,11 +1249,6 @@ int dfs_cache_remount_fs(struct cifs_sb_
+ cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
+ return 0;
+ }
+-
+- if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+- cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
+- return -EINVAL;
+- }
+ /*
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+--- a/fs/cifs/dfs_cache.h
++++ b/fs/cifs/dfs_cache.h
+@@ -40,8 +40,6 @@ int dfs_cache_get_tgt_referral(const cha
+ struct dfs_info3_param *ref);
+ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+ char **prefix);
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
+ char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
+
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -22,6 +22,7 @@
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ #include "dns_resolve.h"
+ #include "dfs_cache.h"
++#include "dfs.h"
+ #endif
+ #include "fs_context.h"
+ #include "cached_dir.h"
+@@ -134,6 +135,9 @@ tconInfoAlloc(void)
+ spin_lock_init(&ret_buf->stat_lock);
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
++#endif
+
+ return ret_buf;
+ }
+@@ -149,6 +153,9 @@ tconInfoFree(struct cifs_tcon *tcon)
+ atomic_dec(&tconInfoAllocCount);
+ kfree(tcon->nativeFileSystem);
+ kfree_sensitive(tcon->password);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
++#endif
+ kfree(tcon);
+ }
+
--- /dev/null
+From f446a630802f154ef0087771683bd4f8e9d08384 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 14 Mar 2023 20:32:55 -0300
+Subject: cifs: return DFS root session id in DebugData
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit f446a630802f154ef0087771683bd4f8e9d08384 upstream.
+
+Return the DFS root session id in /proc/fs/cifs/DebugData to make it
+easier to track which IPC tcon was used to get new DFS referrals for a
+specific connection, and aids in debugging.
+
+A simple output of it would be
+
+ Sessions:
+ 1) Address: 192.168.1.13 Uses: 1 Capability: 0x300067 Session Status: 1
+ Security type: RawNTLMSSP SessionId: 0xd80000000009
+ User: 0 Cred User: 0
+ DFS root session id: 0x128006c000035
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Cc: stable@vger.kernel.org # 6.2
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/cifs_debug.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -419,6 +419,11 @@ skip_rdma:
+ from_kuid(&init_user_ns, ses->linux_uid),
+ from_kuid(&init_user_ns, ses->cred_uid));
+
++ if (ses->dfs_root_ses) {
++ seq_printf(m, "\n\tDFS root session id: 0x%llx",
++ ses->dfs_root_ses->Suid);
++ }
++
+ spin_lock(&ses->chan_lock);
+ if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
+ seq_puts(m, "\tPrimary channel: DISCONNECTED ");
--- /dev/null
+From b56bce502f55505a97e381d546ee881928183126 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 14 Mar 2023 20:32:53 -0300
+Subject: cifs: set DFS root session in cifs_get_smb_ses()
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit b56bce502f55505a97e381d546ee881928183126 upstream.
+
+Set the DFS root session pointer earlier when creating a new SMB
+session to prevent racing with smb2_reconnect(), cifs_reconnect_tcon()
+and DFS cache refresher.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Cc: stable@vger.kernel.org # 6.2
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/cifs_dfs_ref.c | 1 +
+ fs/cifs/cifsglob.h | 1 -
+ fs/cifs/connect.c | 1 +
+ fs/cifs/dfs.c | 19 ++++++++-----------
+ fs/cifs/dfs.h | 3 ++-
+ fs/cifs/fs_context.h | 1 +
+ 6 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -179,6 +179,7 @@ static struct vfsmount *cifs_dfs_do_auto
+ tmp.source = full_path;
+ tmp.leaf_fullpath = NULL;
+ tmp.UNC = tmp.prepath = NULL;
++ tmp.dfs_root_ses = NULL;
+
+ rc = smb3_fs_context_dup(ctx, &tmp);
+ if (rc) {
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1767,7 +1767,6 @@ struct cifs_mount_ctx {
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+- struct cifs_ses *root_ses;
+ uuid_t mount_id;
+ char *origin_fullpath, *leaf_fullpath;
+ };
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2278,6 +2278,7 @@ cifs_get_smb_ses(struct TCP_Server_Info
+ * need to lock before changing something in the session.
+ */
+ spin_lock(&cifs_tcp_ses_lock);
++ ses->dfs_root_ses = ctx->dfs_root_ses;
+ list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -95,25 +95,22 @@ static int get_session(struct cifs_mount
+ ctx->leaf_fullpath = (char *)full_path;
+ rc = cifs_mount_get_session(mnt_ctx);
+ ctx->leaf_fullpath = NULL;
+- if (!rc) {
+- struct cifs_ses *ses = mnt_ctx->ses;
+
+- mutex_lock(&ses->session_mutex);
+- ses->dfs_root_ses = mnt_ctx->root_ses;
+- mutex_unlock(&ses->session_mutex);
+- }
+ return rc;
+ }
+
+ static void set_root_ses(struct cifs_mount_ctx *mnt_ctx)
+ {
+- if (mnt_ctx->ses) {
++ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++ struct cifs_ses *ses = mnt_ctx->ses;
++
++ if (ses) {
+ spin_lock(&cifs_tcp_ses_lock);
+- mnt_ctx->ses->ses_count++;
++ ses->ses_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+- dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
++ dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, ses);
+ }
+- mnt_ctx->root_ses = mnt_ctx->ses;
++ ctx->dfs_root_ses = mnt_ctx->ses;
+ }
+
+ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path,
+@@ -260,7 +257,7 @@ int dfs_mount_share(struct cifs_mount_ct
+ rc = get_session(mnt_ctx, NULL);
+ if (rc)
+ return rc;
+- mnt_ctx->root_ses = mnt_ctx->ses;
++ ctx->dfs_root_ses = mnt_ctx->ses;
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -22,9 +22,10 @@ static inline char *dfs_get_path(struct
+ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path,
+ struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl)
+ {
++ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+
+- return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
++ return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), path, ref, tl);
+ }
+
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -265,6 +265,7 @@ struct smb3_fs_context {
+ bool rootfs:1; /* if it's a SMB root file system */
+ bool witness:1; /* use witness protocol */
+ char *leaf_fullpath;
++ struct cifs_ses *dfs_root_ses;
+ };
+
+ extern const struct fs_parameter_spec smb3_fs_parameters[];
--- /dev/null
+From 6284e46bdd47743a064fe6ac834a7ac05b1fd206 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Tue, 14 Mar 2023 20:32:56 -0300
+Subject: cifs: use DFS root session instead of tcon ses
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 6284e46bdd47743a064fe6ac834a7ac05b1fd206 upstream.
+
+Use DFS root session whenever possible to get new DFS referrals
+otherwise we might end up with an IPC tcon (tcon->ses->tcon_ipc) that
+doesn't respond to them. It should be safe accessing
+@ses->dfs_root_ses directly in cifs_inval_name_dfs_link_error() as it
+has same lifetime as of @tcon.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Cc: stable@vger.kernel.org # 6.2
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/misc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1364,6 +1364,7 @@ int cifs_inval_name_dfs_link_error(const
+ * removing cached DFS targets that the client would eventually
+ * need during failover.
+ */
++ ses = CIFS_DFS_ROOT_SES(ses);
+ if (ses->server->ops->get_dfs_refer &&
+ !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
+ &num_refs, cifs_sb->local_nls,
--- /dev/null
+From 7304ee979b6b6422f41a1312391a5e505fc29ccd Mon Sep 17 00:00:00 2001
+From: Ayush Gupta <ayugupta@amd.com>
+Date: Thu, 2 Mar 2023 09:58:05 -0500
+Subject: drm/amd/display: disconnect MPCC only on OTG change
+
+From: Ayush Gupta <ayugupta@amd.com>
+
+commit 7304ee979b6b6422f41a1312391a5e505fc29ccd upstream.
+
+[Why]
+Framedrops are observed while playing Vp9 and Av1 10 bit
+video on 8k resolution using VSR while playback controls
+are disappeared/appeared
+
+[How]
+Now ODM 2 to 1 is disabled for 5k or greater resolutions on VSR.
+
+Cc: stable@vger.kernel.org
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Alvin Lee <Alvin.Lee2@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Ayush Gupta <ayugupta@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1915,6 +1915,7 @@ int dcn32_populate_dml_pipes_from_contex
+ bool subvp_in_use = false;
+ uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
+ struct dc_crtc_timing *timing;
++ bool vsr_odm_support = false;
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+@@ -1932,12 +1933,15 @@ int dcn32_populate_dml_pipes_from_contex
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
++ vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
++ res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
+ if (context->stream_count == 1 &&
+ context->stream_status[0].plane_count == 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+- dc->debug.enable_single_display_2to1_odm_policy) {
++ dc->debug.enable_single_display_2to1_odm_policy &&
++ !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
+ pipe_cnt++;
--- /dev/null
+From 56574f89dbd84004c3fd6485bcaafb5aa9b8be14 Mon Sep 17 00:00:00 2001
+From: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Date: Thu, 3 Nov 2022 22:29:31 -0400
+Subject: drm/amd/display: Do not set DRR on pipe Commit
+
+From: Wesley Chalmers <Wesley.Chalmers@amd.com>
+
+commit 56574f89dbd84004c3fd6485bcaafb5aa9b8be14 upstream.
+
+[WHY]
+Writing to DRR registers such as OTG_V_TOTAL_MIN on the same frame as a
+pipe commit can cause underflow.
+
+Cc: stable@vger.kernel.org
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Reviewed-by: Jun Lei <Jun.Lei@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Wesley Chalmers <Wesley.Chalmers@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -992,8 +992,5 @@ void dcn30_prepare_bandwidth(struct dc *
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+ dcn20_prepare_bandwidth(dc, context);
+-
+- dc_dmub_srv_p_state_delegate(dc,
+- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+
--- /dev/null
+From 751281c55579f0cb0e56c9797d4663f689909681 Mon Sep 17 00:00:00 2001
+From: Benjamin Cheng <ben@bcheng.me>
+Date: Sun, 12 Mar 2023 20:47:39 -0400
+Subject: drm/amd/display: Write to correct dirty_rect
+
+From: Benjamin Cheng <ben@bcheng.me>
+
+commit 751281c55579f0cb0e56c9797d4663f689909681 upstream.
+
+When FB_DAMAGE_CLIPS are provided in a non-MPO scenario, the loop does
+not use the counter i. This causes the fill_dc_dity_rect() to always
+fill dirty_rects[0], causing graphical artifacts when a damage clip
+aware DRM client sends more than 1 damage clip.
+
+Instead, use the flip_addrs->dirty_rect_count which is incremented by
+fill_dc_dirty_rect() on a successful fill.
+
+Fixes: 30ebe41582d1 ("drm/amd/display: add FB_DAMAGE_CLIPS support")
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2453
+Signed-off-by: Benjamin Cheng <ben@bcheng.me>
+Signed-off-by: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4985,9 +4985,9 @@ static void fill_dc_dirty_rects(struct d
+
+ for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+- &dirty_rects[i], clips->x1,
+- clips->y1, clips->x2 - clips->x1,
+- clips->y2 - clips->y1,
++ &dirty_rects[flip_addrs->dirty_rect_count],
++ clips->x1, clips->y1,
++ clips->x2 - clips->x1, clips->y2 - clips->y1,
+ &flip_addrs->dirty_rect_count,
+ false);
+ return;
--- /dev/null
+From ab9bdb1213b4b40942af6a383f555d0c14874c1b Mon Sep 17 00:00:00 2001
+From: Tim Huang <tim.huang@amd.com>
+Date: Wed, 1 Mar 2023 10:53:03 +0800
+Subject: drm/amd/pm: bump SMU 13.0.4 driver_if header version
+
+From: Tim Huang <tim.huang@amd.com>
+
+commit ab9bdb1213b4b40942af6a383f555d0c14874c1b upstream.
+
+Align the SMU driver interface version with PMFW to
+suppress the version mismatch message on driver loading.
+
+Signed-off-by: Tim Huang <tim.huang@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h | 4 ++--
+ drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+@@ -27,7 +27,7 @@
+ // *** IMPORTANT ***
+ // SMU TEAM: Always increment the interface version if
+ // any structure is changed in this file
+-#define PMFW_DRIVER_IF_VERSION 7
++#define PMFW_DRIVER_IF_VERSION 8
+
+ typedef struct {
+ int32_t value;
+@@ -198,7 +198,7 @@ typedef struct {
+ uint16_t SkinTemp;
+ uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+- uint16_t spare2;
++ uint16_t FilterAlphaValue;
+
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageFclkFrequency;
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -29,7 +29,7 @@
+ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
+ #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
+-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
--- /dev/null
+From a9386ee9681585794dbab95d4ce6826f73d19af6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?B=C5=82a=C5=BCej=20Szczygie=C5=82?= <mumei6102@gmail.com>
+Date: Sun, 5 Mar 2023 00:44:31 +0100
+Subject: drm/amd/pm: Fix sienna cichlid incorrect OD volage after resume
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Błażej Szczygieł <mumei6102@gmail.com>
+
+commit a9386ee9681585794dbab95d4ce6826f73d19af6 upstream.
+
+Always setup overdrive tables after resume. Preserve only some
+user-defined settings in user_overdrive_table if they're set.
+
+Copy restored user_overdrive_table into od_table to get correct
+values.
+
+On cold boot, BTC was triggered and GfxVfCurve was calibrated. We
+got VfCurve settings (a). On resuming back, BTC will be triggered
+again and GfxVfCurve will be recalibrated. VfCurve settings (b)
+got may be different from those of cold boot. So if we reuse
+those VfCurve settings (a) got on cold boot on suspend, we can
+run into discrepencies.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1897
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2276
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Błażej Szczygieł <mumei6102@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 43 ++++++++++++----
+ 1 file changed, 33 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od
+ (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ OverDriveTable_t *user_od_table =
+ (OverDriveTable_t *)smu->smu_table.user_overdrive_table;
++ OverDriveTable_t user_od_table_bak;
+ int ret = 0;
+
+- /*
+- * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
+- * - either they already have the default OD settings got during cold bootup
+- * - or they have some user customized OD settings which cannot be overwritten
+- */
+- if (smu->adev->in_suspend)
+- return 0;
+-
+ ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 0, (void *)boot_od_table, false);
+ if (ret) {
+@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od
+ sienna_cichlid_dump_od_table(smu, boot_od_table);
+
+ memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
+- memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++
++ /*
++ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
++ * but we have to preserve user defined values in "user_od_table".
++ */
++ if (!smu->adev->in_suspend) {
++ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++ smu->user_dpm_profile.user_od = false;
++ } else if (smu->user_dpm_profile.user_od) {
++ memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
++ memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++ user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
++ user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
++ user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
++ user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
++ user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
++ }
+
+ return 0;
+ }
+@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_ta
+ return ret;
+ }
+
++static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
++{
++ struct smu_table_context *table_context = &smu->smu_table;
++ OverDriveTable_t *od_table = table_context->overdrive_table;
++ OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
++ int res;
++
++ res = smu_v11_0_restore_user_od_settings(smu);
++ if (res == 0)
++ memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
++
++ return res;
++}
++
+ static int sienna_cichlid_run_btc(struct smu_context *smu)
+ {
+ int res;
+@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
+- .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
++ .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
+ .run_btc = sienna_cichlid_run_btc,
+ .set_power_source = smu_v11_0_set_power_source,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
--- /dev/null
+From f3921a9a641483784448fb982b2eb738b383d9b9 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Mon, 13 Mar 2023 20:03:08 -0400
+Subject: drm/amdgpu: Don't resume IOMMU after incomplete init
+
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+
+commit f3921a9a641483784448fb982b2eb738b383d9b9 upstream.
+
+Check kfd->init_complete in kgd2kfd_iommu_resume, consistent with other
+kgd2kfd calls. This should fix IOMMU errors on resume from suspend when
+KFD IOMMU initialization failed.
+
+Reported-by: Matt Fagnani <matt.fagnani@bell.net>
+Link: https://lore.kernel.org/r/4a3b225c-2ffd-e758-4de1-447375e34cad@bell.net/
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217170
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2454
+Cc: Vasant Hegde <vasant.hegde@amd.com>
+Cc: Linux regression tracking (Thorsten Leemhuis) <regressions@leemhuis.info>
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Matt Fagnani <matt.fagnani@bell.net>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_de
+ unsigned int chunk_size);
+ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+
++static int kfd_resume_iommu(struct kfd_dev *kfd);
+ static int kfd_resume(struct kfd_dev *kfd);
+
+ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
+@@ -635,7 +636,7 @@ bool kgd2kfd_device_init(struct kfd_dev
+
+ svm_migrate_init(kfd->adev);
+
+- if (kgd2kfd_resume_iommu(kfd))
++ if (kfd_resume_iommu(kfd))
+ goto device_iommu_error;
+
+ if (kfd_resume(kfd))
+@@ -784,6 +785,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd,
+
+ int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+ {
++ if (!kfd->init_complete)
++ return 0;
++
++ return kfd_resume_iommu(kfd);
++}
++
++static int kfd_resume_iommu(struct kfd_dev *kfd)
++{
+ int err = 0;
+
+ err = kfd_iommu_resume(kfd);
--- /dev/null
+From 542a56e8eb4467ae654eefab31ff194569db39cd Mon Sep 17 00:00:00 2001
+From: "Guilherme G. Piccoli" <gpiccoli@igalia.com>
+Date: Sun, 12 Mar 2023 13:51:00 -0300
+Subject: drm/amdgpu/vcn: Disable indirect SRAM on Vangogh broken BIOSes
+
+From: Guilherme G. Piccoli <gpiccoli@igalia.com>
+
+commit 542a56e8eb4467ae654eefab31ff194569db39cd upstream.
+
+The VCN firmware loading path enables the indirect SRAM mode if it's
+advertised as supported. We might have some cases of FW issues that
+prevents this mode to working properly though, ending-up in a failed
+probe. An example below, observed in the Steam Deck:
+
+[...]
+[drm] failed to load ucode VCN0_RAM(0x3A)
+[drm] psp gfx command LOAD_IP_FW(0x6) failed and response status is (0xFFFF0000)
+amdgpu 0000:04:00.0: [drm:amdgpu_ring_test_helper [amdgpu]] *ERROR* ring vcn_dec_0 test failed (-110)
+[drm:amdgpu_device_init.cold [amdgpu]] *ERROR* hw_init of IP block <vcn_v3_0> failed -110
+amdgpu 0000:04:00.0: amdgpu: amdgpu_device_ip_init failed
+amdgpu 0000:04:00.0: amdgpu: Fatal error during GPU init
+[...]
+
+Disabling the VCN block circumvents this, but it's a very invasive
+workaround that turns off the entire feature. So, let's add a quirk
+on VCN loading that checks for known problematic BIOSes on Vangogh,
+so we can proactively disable the indirect SRAM mode and allow the
+HW proper probe and VCN IP block to work fine.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/2385
+Fixes: 82132ecc5432 ("drm/amdgpu: enable Vangogh VCN indirect sram mode")
+Cc: stable@vger.kernel.org
+Cc: James Zhu <James.Zhu@amd.com>
+Cc: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Guilherme G. Piccoli <gpiccoli@igalia.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -26,6 +26,7 @@
+
+ #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/debugfs.h>
+ #include <drm/drm_drv.h>
+@@ -222,6 +223,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_dev
+ return r;
+ }
+
++ /*
++ * Some Steam Deck's BIOS versions are incompatible with the
++ * indirect SRAM mode, leading to amdgpu being unable to get
++ * properly probed (and even potentially crashing the kernel).
++ * Hence, check for these versions here - notice this is
++ * restricted to Vangogh (Deck's APU).
++ */
++ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
++ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
++
++ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
++ !strncmp("F7A0114", bios_ver, 7))) {
++ adev->vcn.indirect_sram = false;
++ dev_info(adev->dev,
++ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
++ }
++ }
++
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+
--- /dev/null
+From 4d8457fe0eb9c80ff7795cf8a30962128b71d853 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Thu, 2 Mar 2023 08:47:04 +0100
+Subject: drm/edid: fix info leak when failing to get panel id
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 4d8457fe0eb9c80ff7795cf8a30962128b71d853 upstream.
+
+Make sure to clear the transfer buffer before fetching the EDID to
+avoid leaking slab data to the logs on errors that leave the buffer
+unchanged.
+
+Fixes: 69c7717c20cc ("drm/edid: Dump the EDID when drm_edid_get_panel_id() has an error")
+Cc: stable@vger.kernel.org # 6.2
+Cc: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230302074704.11371-1-johan+linaro@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_edid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 3841aba17abd..8707fe72a028 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -2797,7 +2797,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+ * the EDID then we'll just return 0.
+ */
+
+- base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
++ base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!base_block)
+ return 0;
+
+--
+2.40.0
+
--- /dev/null
+From e0e6b416b25ee14716f3549e0cbec1011b193809 Mon Sep 17 00:00:00 2001
+From: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Date: Thu, 2 Mar 2023 13:08:20 +0100
+Subject: drm/i915/active: Fix misuse of non-idle barriers as fence trackers
+
+From: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+
+commit e0e6b416b25ee14716f3549e0cbec1011b193809 upstream.
+
+Users reported oopses on list corruptions when using i915 perf with a
+number of concurrently running graphics applications. Root cause analysis
+pointed at an issue in barrier processing code -- a race among perf open /
+close replacing active barriers with perf requests on kernel context and
+concurrent barrier preallocate / acquire operations performed during user
+context first pin / last unpin.
+
+When adding a request to a composite tracker, we try to reuse an existing
+fence tracker, already allocated and registered with that composite. The
+tracker we obtain may already track another fence, may be an idle barrier,
+or an active barrier.
+
+If the tracker we get occurs a non-idle barrier then we try to delete that
+barrier from a list of barrier tasks it belongs to. However, while doing
+that we don't respect return value from a function that performs the
+barrier deletion. Should the deletion ever fail, we would end up reusing
+the tracker still registered as a barrier task. Since the same structure
+field is reused with both fence callback lists and barrier tasks list,
+list corruptions would likely occur.
+
+Barriers are now deleted from a barrier tasks list by temporarily removing
+the list content, traversing that content with skip over the node to be
+deleted, then populating the list back with the modified content. Should
+that intentionally racy concurrent deletion attempts be not serialized,
+one or more of those may fail because of the list being temporary empty.
+
+Related code that ignores the results of barrier deletion was initially
+introduced in v5.4 by commit d8af05ff38ae ("drm/i915: Allow sharing the
+idle-barrier from other kernel requests"). However, all users of the
+barrier deletion routine were apparently serialized at that time, then the
+issue didn't exhibit itself. Results of git bisect with help of a newly
+developed igt@gem_barrier_race@remote-request IGT test indicate that list
+corruptions might start to appear after commit 311770173fac ("drm/i915/gt:
+Schedule request retirement when timeline idles"), introduced in v5.5.
+
+Respect results of barrier deletion attempts -- mark the barrier as idle
+only if successfully deleted from the list. Then, before proceeding with
+setting our fence as the one currently tracked, make sure that the tracker
+we've got is not a non-idle barrier. If that check fails then don't use
+that tracker but go back and try to acquire a new, usable one.
+
+v3: use unlikely() to document what outcome we expect (Andi),
+ - fix bad grammar in commit description.
+v2: no code changes,
+ - blame commit 311770173fac ("drm/i915/gt: Schedule request retirement
+ when timeline idles"), v5.5, not commit d8af05ff38ae ("drm/i915: Allow
+ sharing the idle-barrier from other kernel requests"), v5.4,
+ - reword commit description.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/6333
+Fixes: 311770173fac ("drm/i915/gt: Schedule request retirement when timeline idles")
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: stable@vger.kernel.org # v5.5
+Cc: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230302120820.48740-1-janusz.krzysztofik@linux.intel.com
+(cherry picked from commit 506006055769b10d1b2b4e22f636f3b45e0e9fc7)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_active.c | 27 +++++++++++++++------------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -422,12 +422,12 @@ replace_barrier(struct i915_active *ref,
+ * we can use it to substitute for the pending idle-barrer
+ * request that we want to emit on the kernel_context.
+ */
+- __active_del_barrier(ref, node_from_active(active));
+- return true;
++ return __active_del_barrier(ref, node_from_active(active));
+ }
+
+ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ {
++ u64 idx = i915_request_timeline(rq)->fence_context;
+ struct dma_fence *fence = &rq->fence;
+ struct i915_active_fence *active;
+ int err;
+@@ -437,16 +437,19 @@ int i915_active_add_request(struct i915_
+ if (err)
+ return err;
+
+- active = active_instance(ref, i915_request_timeline(rq)->fence_context);
+- if (!active) {
+- err = -ENOMEM;
+- goto out;
+- }
+-
+- if (replace_barrier(ref, active)) {
+- RCU_INIT_POINTER(active->fence, NULL);
+- atomic_dec(&ref->count);
+- }
++ do {
++ active = active_instance(ref, idx);
++ if (!active) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ if (replace_barrier(ref, active)) {
++ RCU_INIT_POINTER(active->fence, NULL);
++ atomic_dec(&ref->count);
++ }
++ } while (unlikely(is_barrier(active)));
++
+ if (!__i915_active_fence_set(active, fence))
+ __i915_active_acquire(ref);
+
--- /dev/null
+From 46bc23dcd94569270d02c4c1f7e62ae01ebd53bb Mon Sep 17 00:00:00 2001
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Date: Thu, 23 Feb 2023 10:06:19 +0530
+Subject: drm/i915/dg2: Add HDMI pixel clock frequencies 267.30 and 319.89 MHz
+
+From: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+
+commit 46bc23dcd94569270d02c4c1f7e62ae01ebd53bb upstream.
+
+Add snps phy table values for HDMI pixel clocks 267.30 MHz and
+319.89 MHz. Values are based on the Bspec algorithm for
+PLL programming for HDMI.
+
+Cc: stable@vger.kernel.org
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8008
+Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal@intel.com>
+Reviewed-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Uma Shankar <uma.shankar@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230223043619.3941382-1-ankit.k.nautiyal@intel.com
+(cherry picked from commit d46746b8b13cbd377ffc733e465d25800459a31b)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_snps_phy.c | 62 ++++++++++++++++++++++++++
+ 1 file changed, 62 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
+@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+
++static const struct intel_mpllb_state dg2_hdmi_267300 = {
++ .clock = 267300,
++ .ref_control =
++ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++ .mpllb_cp =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++ .mpllb_div =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
++ .mpllb_div2 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++ .mpllb_fracn1 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++ .mpllb_fracn2 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
++ .mpllb_sscen =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ .clock = 268500,
+ .ref_control =
+@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+
++static const struct intel_mpllb_state dg2_hdmi_319890 = {
++ .clock = 319890,
++ .ref_control =
++ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++ .mpllb_cp =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++ .mpllb_div =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
++ .mpllb_div2 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++ .mpllb_fracn1 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++ .mpllb_fracn2 =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
++ .mpllb_sscen =
++ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ .clock = 497750,
+ .ref_control =
+@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state *
+ &dg2_hdmi_209800,
+ &dg2_hdmi_241500,
+ &dg2_hdmi_262750,
++ &dg2_hdmi_267300,
+ &dg2_hdmi_268500,
+ &dg2_hdmi_296703,
++ &dg2_hdmi_319890,
+ &dg2_hdmi_497750,
+ &dg2_hdmi_592000,
+ &dg2_hdmi_593407,
--- /dev/null
+From ee9adb7a45516cfa536ca92253d7ae59d56db9e4 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Mon, 9 Jan 2023 00:13:11 +0300
+Subject: drm/shmem-helper: Remove another errant put in error path
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit ee9adb7a45516cfa536ca92253d7ae59d56db9e4 upstream.
+
+drm_gem_shmem_mmap() doesn't own reference in error code path, resulting
+in the dma-buf shmem GEM object getting prematurely freed leading to a
+later use-after-free.
+
+Fixes: f49a51bfdc8e ("drm/shme-helpers: Fix dma_buf_mmap forwarding bug")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230108211311.3950107-1-dmitry.osipenko@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_gem_shmem_helper.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -622,11 +622,14 @@ int drm_gem_shmem_mmap(struct drm_gem_sh
+ int ret;
+
+ if (obj->import_attach) {
+- /* Drop the reference drm_gem_mmap_obj() acquired.*/
+- drm_gem_object_put(obj);
+ vma->vm_private_data = NULL;
++ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
++
++ /* Drop the reference drm_gem_mmap_obj() acquired.*/
++ if (!ret)
++ drm_gem_object_put(obj);
+
+- return dma_buf_mmap(obj->dma_buf, vma, 0);
++ return ret;
+ }
+
+ ret = drm_gem_shmem_get_pages(shmem);
--- /dev/null
+From c22f2ff8724b49dce2ae797e9fbf4bc0fa91112f Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Mon, 6 Mar 2023 11:32:42 +0100
+Subject: drm/sun4i: fix missing component unbind on bind errors
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit c22f2ff8724b49dce2ae797e9fbf4bc0fa91112f upstream.
+
+Make sure to unbind all subcomponents when binding the aggregate device
+fails.
+
+Fixes: 9026e0d122ac ("drm: Add Allwinner A10 Display Engine support")
+Cc: stable@vger.kernel.org # 4.7
+Cc: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230306103242.4775-1-johan+linaro@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/sun4i/sun4i_drv.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret)
+- goto cleanup_mode_config;
++ goto unbind_all;
+
+ /* Remove early framebuffers (ie. simplefb) */
+ ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
+ if (ret)
+- goto cleanup_mode_config;
++ goto unbind_all;
+
+ sun4i_framebuffer_init(drm);
+
+@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device
+
+ finish_poll:
+ drm_kms_helper_poll_fini(drm);
++unbind_all:
++ component_unbind_all(dev, NULL);
+ cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ of_reserved_mem_device_release(dev);
--- /dev/null
+From 112e66017bff7f2837030f34c2bc19501e9212d5 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 10 Mar 2023 11:10:56 -0500
+Subject: KVM: nVMX: add missing consistency checks for CR0 and CR4
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 112e66017bff7f2837030f34c2bc19501e9212d5 upstream.
+
+The effective values of the guest CR0 and CR4 registers may differ from
+those included in the VMCS12. In particular, disabling EPT forces
+CR4.PAE=1 and disabling unrestricted guest mode forces CR0.PG=CR0.PE=1.
+
+Therefore, checks on these bits cannot be delegated to the processor
+and must be performed by KVM.
+
+Reported-by: Reima ISHII <ishiir@g.ecc.u-tokyo.ac.jp>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3021,7 +3021,7 @@ static int nested_vmx_check_guest_state(
+ struct vmcs12 *vmcs12,
+ enum vm_entry_failure_code *entry_failure_code)
+ {
+- bool ia32e;
++ bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
+
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+
+@@ -3047,6 +3047,13 @@ static int nested_vmx_check_guest_state(
+ vmcs12->guest_ia32_perf_global_ctrl)))
+ return -EINVAL;
+
++ if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
++ return -EINVAL;
++
++ if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
++ CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
++ return -EINVAL;
++
+ /*
+ * If the load IA32_EFER VM-entry control is 1, the following checks
+ * are performed on the field for the IA32_EFER MSR:
+@@ -3058,7 +3065,6 @@ static int nested_vmx_check_guest_state(
+ */
+ if (to_vmx(vcpu)->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
+- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
+ CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
+ CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
--- /dev/null
+From 3ec7a1b2743c07c45f4a0c508114f6cb410ddef3 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 7 Feb 2023 00:21:54 +0000
+Subject: KVM: SVM: Fix a benign off-by-one bug in AVIC physical table mask
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 3ec7a1b2743c07c45f4a0c508114f6cb410ddef3 upstream.
+
+Define the "physical table max index mask" as bits 8:0, not 9:0. x2AVIC
+currently supports a max of 512 entries, i.e. the max index is 511, and
+the inputs to GENMASK_ULL() are inclusive. The bug is benign as bit 9 is
+reserved and never set by KVM, i.e. KVM is just clearing bits that are
+guaranteed to be zero.
+
+Note, as of this writing, APM "Rev. 3.39-October 2022" incorrectly states
+that bits 11:8 are reserved in Table B-1. VMCB Layout, Control Area. I.e.
+that table wasn't updated when x2AVIC support was added.
+
+Opportunistically fix the comment for the max AVIC ID to align with the
+code, and clean up comment formatting too.
+
+Fixes: 4d1d7942e36a ("KVM: SVM: Introduce logic to (de)activate x2AVIC mode")
+Cc: stable@vger.kernel.org
+Cc: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Message-Id: <20230207002156.521736-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/svm.h | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -261,20 +261,22 @@ enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+ };
+
+-#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0)
++#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
+
+ /*
+- * For AVIC, the max index allowed for physical APIC ID
+- * table is 0xff (255).
++ * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
++ * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
+ */
+ #define AVIC_MAX_PHYSICAL_ID 0XFEULL
+
+ /*
+- * For x2AVIC, the max index allowed for physical APIC ID
+- * table is 0x1ff (511).
++ * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
+ */
+ #define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
+
++static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
++static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
++
+ #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+ #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
--- /dev/null
+From 5999715922c5a3ede5d8fe2a6b17aba58a157d41 Mon Sep 17 00:00:00 2001
+From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Date: Tue, 7 Feb 2023 00:21:55 +0000
+Subject: KVM: SVM: Modify AVIC GATag to support max number of 512 vCPUs
+
+From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+
+commit 5999715922c5a3ede5d8fe2a6b17aba58a157d41 upstream.
+
+Define AVIC_VCPU_ID_MASK based on AVIC_PHYSICAL_MAX_INDEX, i.e. the mask
+that effectively controls the largest guest physical APIC ID supported by
+x2AVIC, instead of hardcoding the number of bits to 8 (and the number of
+VM bits to 24).
+
+The AVIC GATag is programmed into the AMD IOMMU IRTE to provide a
+reference back to KVM in case the IOMMU cannot inject an interrupt into a
+non-running vCPU. In such a case, the IOMMU notifies software by creating
+a GALog entry with the corresponded GATag, and KVM then uses the GATag to
+find the correct VM+vCPU to kick. Dropping bit 8 from the GATag results
+in kicking the wrong vCPU when targeting vCPUs with x2APIC ID > 255.
+
+Fixes: 4d1d7942e36a ("KVM: SVM: Introduce logic to (de)activate x2AVIC mode")
+Cc: stable@vger.kernel.org
+Reported-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Message-Id: <20230207002156.521736-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/avic.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -27,19 +27,29 @@
+ #include "irq.h"
+ #include "svm.h"
+
+-/* AVIC GATAG is encoded using VM and VCPU IDs */
+-#define AVIC_VCPU_ID_BITS 8
+-#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
++/*
++ * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
++ * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
++ * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
++ *
++ * For the vCPU ID, use however many bits are currently allowed for the max
++ * guest physical APIC ID (limited by the size of the physical ID table), and
++ * use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
++ * size of the GATag is defined by hardware (32 bits), but is an opaque value
++ * as far as hardware is concerned.
++ */
++#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
+
+-#define AVIC_VM_ID_BITS 24
+-#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
+-#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
++#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
++#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
+
+-#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
++#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
+ (y & AVIC_VCPU_ID_MASK))
+-#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
++#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
+ #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
+
++static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
++
+ static bool force_avic;
+ module_param_unsafe(force_avic, bool, 0444);
+
--- /dev/null
+From 822467a48e938e661965d09df5fcac66f7291050 Mon Sep 17 00:00:00 2001
+From: Geliang Tang <geliang.tang@suse.com>
+Date: Thu, 9 Mar 2023 15:50:02 +0100
+Subject: mptcp: add ro_after_init for tcp{,v6}_prot_override
+
+From: Geliang Tang <geliang.tang@suse.com>
+
+commit 822467a48e938e661965d09df5fcac66f7291050 upstream.
+
+Add __ro_after_init labels for the variables tcp_prot_override and
+tcpv6_prot_override, just like other variables adjacent to them, to
+indicate that they are initialised from the init hooks and no writes
+occur afterwards.
+
+Fixes: b19bc2945b40 ("mptcp: implement delegated actions")
+Cc: stable@vger.kernel.org
+Fixes: 51fa7f8ebf0e ("mptcp: mark ops structures as ro_after_init")
+Signed-off-by: Geliang Tang <geliang.tang@suse.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/subflow.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -627,7 +627,7 @@ static struct request_sock_ops mptcp_sub
+ static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
+-static struct proto tcpv6_prot_override;
++static struct proto tcpv6_prot_override __ro_after_init;
+
+ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -925,7 +925,7 @@ dispose_child:
+ }
+
+ static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
+-static struct proto tcp_prot_override;
++static struct proto tcp_prot_override __ro_after_init;
+
+ enum mapping_status {
+ MAPPING_OK,
--- /dev/null
+From 3ba14528684f528566fb7d956bfbfb958b591d86 Mon Sep 17 00:00:00 2001
+From: Matthieu Baerts <matthieu.baerts@tessares.net>
+Date: Thu, 9 Mar 2023 15:50:03 +0100
+Subject: mptcp: avoid setting TCP_CLOSE state twice
+
+From: Matthieu Baerts <matthieu.baerts@tessares.net>
+
+commit 3ba14528684f528566fb7d956bfbfb958b591d86 upstream.
+
+tcp_set_state() is called from tcp_done() already.
+
+There is then no need to first set the state to TCP_CLOSE, then call
+tcp_done().
+
+Fixes: d582484726c4 ("mptcp: fix fallback for MP_JOIN subflows")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/362
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/subflow.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -405,7 +405,6 @@ void mptcp_subflow_reset(struct sock *ss
+ /* must hold: tcp_done() could drop last reference on parent */
+ sock_hold(sk);
+
+- tcp_set_state(ssk, TCP_CLOSE);
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ tcp_done(ssk);
+ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
--- /dev/null
+From cee4034a3db1d30c3243dd51506a9d4ab1a849fa Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 9 Mar 2023 15:50:04 +0100
+Subject: mptcp: fix lockdep false positive in mptcp_pm_nl_create_listen_socket()
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit cee4034a3db1d30c3243dd51506a9d4ab1a849fa upstream.
+
+Christoph reports a lockdep splat in the mptcp_subflow_create_socket()
+error path, when such function is invoked by
+mptcp_pm_nl_create_listen_socket().
+
+Such code path acquires two separates, nested socket lock, with the
+internal lock operation lacking the "nested" annotation. Adding that
+in sock_release() for mptcp's sake only could be confusing.
+
+Instead just add a new lockclass to the in-kernel msk socket,
+re-initializing the lockdep infra after the socket creation.
+
+Fixes: ad2171009d96 ("mptcp: fix locking for in-kernel listener creation")
+Cc: stable@vger.kernel.org
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/354
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Tested-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -993,9 +993,13 @@ out:
+ return ret;
+ }
+
++static struct lock_class_key mptcp_slock_keys[2];
++static struct lock_class_key mptcp_keys[2];
++
+ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ struct mptcp_pm_addr_entry *entry)
+ {
++ bool is_ipv6 = sk->sk_family == AF_INET6;
+ int addrlen = sizeof(struct sockaddr_in);
+ struct sockaddr_storage addr;
+ struct socket *ssock;
+@@ -1012,6 +1016,18 @@ static int mptcp_pm_nl_create_listen_soc
+ if (!newsk)
+ return -EINVAL;
+
++ /* The subflow socket lock is acquired in a nested to the msk one
++ * in several places, even by the TCP stack, and this msk is a kernel
++ * socket: lockdep complains. Instead of propagating the _nested
++ * modifiers in several places, re-init the lock class for the msk
++ * socket to an mptcp specific one.
++ */
++ sock_lock_init_class_and_name(newsk,
++ is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
++ &mptcp_slock_keys[is_ipv6],
++ is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
++ &mptcp_keys[is_ipv6]);
++
+ lock_sock(newsk);
+ ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+ release_sock(newsk);
--- /dev/null
+From b7a679ba7c652587b85294f4953f33ac0b756d40 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 9 Mar 2023 15:49:57 +0100
+Subject: mptcp: fix possible deadlock in subflow_error_report
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit b7a679ba7c652587b85294f4953f33ac0b756d40 upstream.
+
+Christoph reported a possible deadlock while the TCP stack
+destroys an unaccepted subflow due to an incoming reset: the
+MPTCP socket error path tries to acquire the msk-level socket
+lock while TCP still owns the listener socket accept queue
+spinlock, and the reverse dependency already exists in the
+TCP stack.
+
+Note that the above is actually a lockdep false positive, as
+the chain involves two separate sockets. A different per-socket
+lockdep key will address the issue, but such a change will be
+quite invasive.
+
+Instead, we can simply stop earlier the socket error handling
+for orphaned or unaccepted subflows, breaking the critical
+lockdep chain. Error handling in such a scenario is a no-op.
+
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Fixes: 15cc10453398 ("mptcp: deliver ssk errors to msk")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/355
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/subflow.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1431,6 +1431,13 @@ static void subflow_error_report(struct
+ {
+ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+
++ /* bail early if this is a no-op, so that we avoid introducing a
++ * problematic lockdep dependency between TCP accept queue lock
++ * and msk socket spinlock
++ */
++ if (!sk->sk_socket)
++ return;
++
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_error_report(sk);
--- /dev/null
+From 0a3f4f1f9c27215e4ddcd312558342e57b93e518 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 9 Mar 2023 15:50:00 +0100
+Subject: mptcp: fix UaF in listener shutdown
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 0a3f4f1f9c27215e4ddcd312558342e57b93e518 upstream.
+
+As reported by Christoph after having refactored the passive
+socket initialization, the mptcp listener shutdown path is prone
+to an UaF issue.
+
+ BUG: KASAN: use-after-free in _raw_spin_lock_bh+0x73/0xe0
+ Write of size 4 at addr ffff88810cb23098 by task syz-executor731/1266
+
+ CPU: 1 PID: 1266 Comm: syz-executor731 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x6e/0x91
+ print_report+0x16a/0x46f
+ kasan_report+0xad/0x130
+ kasan_check_range+0x14a/0x1a0
+ _raw_spin_lock_bh+0x73/0xe0
+ subflow_error_report+0x6d/0x110
+ sk_error_report+0x3b/0x190
+ tcp_disconnect+0x138c/0x1aa0
+ inet_child_forget+0x6f/0x2e0
+ inet_csk_listen_stop+0x209/0x1060
+ __mptcp_close_ssk+0x52d/0x610
+ mptcp_destroy_common+0x165/0x640
+ mptcp_destroy+0x13/0x80
+ __mptcp_destroy_sock+0xe7/0x270
+ __mptcp_close+0x70e/0x9b0
+ mptcp_close+0x2b/0x150
+ inet_release+0xe9/0x1f0
+ __sock_release+0xd2/0x280
+ sock_close+0x15/0x20
+ __fput+0x252/0xa20
+ task_work_run+0x169/0x250
+ exit_to_user_mode_prepare+0x113/0x120
+ syscall_exit_to_user_mode+0x1d/0x40
+ do_syscall_64+0x48/0x90
+ entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+The msk grace period can legitly expire in between the last
+reference count dropped in mptcp_subflow_queue_clean() and
+the later eventual access in inet_csk_listen_stop()
+
+After the previous patch we don't need anymore special-casing
+msk listener socket cleanup: the mptcp worker will process each
+of the unaccepted msk sockets.
+
+Just drop the now unnecessary code.
+
+Please note this commit depends on the two parent ones:
+
+ mptcp: refactor passive socket initialization
+ mptcp: use the workqueue to destroy unaccepted sockets
+
+Fixes: 6aeed9045071 ("mptcp: fix race on unaccepted mptcp sockets")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/346
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 7 +---
+ net/mptcp/protocol.h | 1
+ net/mptcp/subflow.c | 72 ---------------------------------------------------
+ 3 files changed, 2 insertions(+), 78 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2366,12 +2366,9 @@ static void __mptcp_close_ssk(struct soc
+ mptcp_subflow_drop_ctx(ssk);
+ } else {
+ /* otherwise tcp will dispose of the ssk and subflow ctx */
+- if (ssk->sk_state == TCP_LISTEN) {
+- tcp_set_state(ssk, TCP_CLOSE);
+- mptcp_subflow_queue_clean(sk, ssk);
+- inet_csk_listen_stop(ssk);
++ if (ssk->sk_state == TCP_LISTEN)
+ mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
+- }
++
+ __tcp_close(ssk, 0);
+
+ /* close acquired an extra ref */
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -629,7 +629,6 @@ void mptcp_close_ssk(struct sock *sk, st
+ struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1823,78 +1823,6 @@ static void subflow_state_change(struct
+ }
+ }
+
+-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+-{
+- struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+- struct mptcp_sock *msk, *next, *head = NULL;
+- struct request_sock *req;
+-
+- /* build a list of all unaccepted mptcp sockets */
+- spin_lock_bh(&queue->rskq_lock);
+- for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+- struct mptcp_subflow_context *subflow;
+- struct sock *ssk = req->sk;
+- struct mptcp_sock *msk;
+-
+- if (!sk_is_mptcp(ssk))
+- continue;
+-
+- subflow = mptcp_subflow_ctx(ssk);
+- if (!subflow || !subflow->conn)
+- continue;
+-
+- /* skip if already in list */
+- msk = mptcp_sk(subflow->conn);
+- if (msk->dl_next || msk == head)
+- continue;
+-
+- msk->dl_next = head;
+- head = msk;
+- }
+- spin_unlock_bh(&queue->rskq_lock);
+- if (!head)
+- return;
+-
+- /* can't acquire the msk socket lock under the subflow one,
+- * or will cause ABBA deadlock
+- */
+- release_sock(listener_ssk);
+-
+- for (msk = head; msk; msk = next) {
+- struct sock *sk = (struct sock *)msk;
+- bool do_cancel_work;
+-
+- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+- next = msk->dl_next;
+- msk->first = NULL;
+- msk->dl_next = NULL;
+-
+- do_cancel_work = __mptcp_close(sk, 0);
+- release_sock(sk);
+- if (do_cancel_work) {
+- /* lockdep will report a false positive ABBA deadlock
+- * between cancel_work_sync and the listener socket.
+- * The involved locks belong to different sockets WRT
+- * the existing AB chain.
+- * Using a per socket key is problematic as key
+- * deregistration requires process context and must be
+- * performed at socket disposal time, in atomic
+- * context.
+- * Just tell lockdep to consider the listener socket
+- * released here.
+- */
+- mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+- mptcp_cancel_work(sk);
+- mutex_acquire(&listener_sk->sk_lock.dep_map,
+- SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+- }
+- sock_put(sk);
+- }
+-
+- /* we are still under the listener msk socket lock */
+- lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+-}
+-
+ static int subflow_ulp_init(struct sock *sk)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
--- /dev/null
+From 3a236aef280ed5122b2d47087eb514d0921ae033 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 9 Mar 2023 15:49:58 +0100
+Subject: mptcp: refactor passive socket initialization
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 3a236aef280ed5122b2d47087eb514d0921ae033 upstream.
+
+After commit 30e51b923e43 ("mptcp: fix unreleased socket in accept queue")
+unaccepted msk sockets go throu complete shutdown, we don't need anymore
+to delay inserting the first subflow into the subflow lists.
+
+The reference counting deserve some extra care, as __mptcp_close() is
+unaware of the request socket linkage to the first subflow.
+
+Please note that this is more a refactoring than a fix but because this
+modification is needed to include other corrections, see the following
+commits. Then a Fixes tag has been added here to help the stable team.
+
+Fixes: 30e51b923e43 ("mptcp: fix unreleased socket in accept queue")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Tested-by: Christoph Paasch <cpaasch@apple.com>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 17 -----------------
+ net/mptcp/subflow.c | 27 +++++++++++++++++++++------
+ 2 files changed, 21 insertions(+), 23 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct m
+ if (sk->sk_socket && !ssk->sk_socket)
+ mptcp_sock_graft(ssk, sk->sk_socket);
+
+- mptcp_propagate_sndbuf((struct sock *)msk, ssk);
+ mptcp_sockopt_sync_locked(msk, ssk);
+ return true;
+ }
+@@ -3699,22 +3698,6 @@ static int mptcp_stream_accept(struct so
+
+ lock_sock(newsk);
+
+- /* PM/worker can now acquire the first subflow socket
+- * lock without racing with listener queue cleanup,
+- * we can notify it, if needed.
+- *
+- * Even if remote has reset the initial subflow by now
+- * the refcnt is still at least one.
+- */
+- subflow = mptcp_subflow_ctx(msk->first);
+- list_add(&subflow->node, &msk->conn_list);
+- sock_hold(msk->first);
+- if (mptcp_is_fully_established(newsk))
+- mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
+-
+- mptcp_rcv_space_init(msk, msk->first);
+- mptcp_propagate_sndbuf(newsk, msk->first);
+-
+ /* set ssk->sk_socket of accept()ed flows to mptcp socket.
+ * This is needed so NOSPACE flag can be set from tcp stack.
+ */
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -396,6 +396,12 @@ void mptcp_subflow_reset(struct sock *ss
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct sock *sk = subflow->conn;
+
++ /* mptcp_mp_fail_no_response() can reach here on an already closed
++ * socket
++ */
++ if (ssk->sk_state == TCP_CLOSE)
++ return;
++
+ /* must hold: tcp_done() could drop last reference on parent */
+ sock_hold(sk);
+
+@@ -749,6 +755,7 @@ static struct sock *subflow_syn_recv_soc
+ struct mptcp_options_received mp_opt;
+ bool fallback, fallback_is_fatal;
+ struct sock *new_msk = NULL;
++ struct mptcp_sock *owner;
+ struct sock *child;
+
+ pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+@@ -823,6 +830,8 @@ create_child:
+ ctx->setsockopt_seq = listener->setsockopt_seq;
+
+ if (ctx->mp_capable) {
++ owner = mptcp_sk(new_msk);
++
+ /* this can't race with mptcp_close(), as the msk is
+ * not yet exposted to user-space
+ */
+@@ -831,14 +840,14 @@ create_child:
+ /* record the newly created socket as the first msk
+ * subflow, but don't link it yet into conn_list
+ */
+- WRITE_ONCE(mptcp_sk(new_msk)->first, child);
++ WRITE_ONCE(owner->first, child);
+
+ /* new mpc subflow takes ownership of the newly
+ * created mptcp socket
+ */
+ mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
+- mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
+- mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
++ mptcp_pm_new_connection(owner, child, 1);
++ mptcp_token_accept(subflow_req, owner);
+ ctx->conn = new_msk;
+ new_msk = NULL;
+
+@@ -846,15 +855,21 @@ create_child:
+ * uses the correct data
+ */
+ mptcp_copy_inaddrs(ctx->conn, child);
++ mptcp_propagate_sndbuf(ctx->conn, child);
++
++ mptcp_rcv_space_init(owner, child);
++ list_add(&ctx->node, &owner->conn_list);
++ sock_hold(child);
+
+ /* with OoO packets we can reach here without ingress
+ * mpc option
+ */
+- if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
++ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
+ mptcp_subflow_fully_established(ctx, &mp_opt);
++ mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
++ ctx->pm_notified = 1;
++ }
+ } else if (ctx->mp_join) {
+- struct mptcp_sock *owner;
+-
+ owner = subflow_req->msk;
+ if (!owner) {
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
--- /dev/null
+From b6985b9b82954caa53f862d6059d06c0526254f0 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Thu, 9 Mar 2023 15:49:59 +0100
+Subject: mptcp: use the workqueue to destroy unaccepted sockets
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit b6985b9b82954caa53f862d6059d06c0526254f0 upstream.
+
+Christoph reported a UaF at token lookup time after having
+refactored the passive socket initialization part:
+
+ BUG: KASAN: use-after-free in __token_bucket_busy+0x253/0x260
+ Read of size 4 at addr ffff88810698d5b0 by task syz-executor653/3198
+
+ CPU: 1 PID: 3198 Comm: syz-executor653 Not tainted 6.2.0-rc59af4eaa31c1f6c00c8f1e448ed99a45c66340dd5 #6
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x6e/0x91
+ print_report+0x16a/0x46f
+ kasan_report+0xad/0x130
+ __token_bucket_busy+0x253/0x260
+ mptcp_token_new_connect+0x13d/0x490
+ mptcp_connect+0x4ed/0x860
+ __inet_stream_connect+0x80e/0xd90
+ tcp_sendmsg_fastopen+0x3ce/0x710
+ mptcp_sendmsg+0xff1/0x1a20
+ inet_sendmsg+0x11d/0x140
+ __sys_sendto+0x405/0x490
+ __x64_sys_sendto+0xdc/0x1b0
+ do_syscall_64+0x3b/0x90
+ entry_SYSCALL_64_after_hwframe+0x72/0xdc
+
+We need to properly clean-up all the paired MPTCP-level
+resources and be sure to release the msk last, even when
+the unaccepted subflow is destroyed by the TCP internals
+via inet_child_forget().
+
+We can re-use the existing MPTCP_WORK_CLOSE_SUBFLOW infra,
+explicitly checking that for the critical scenario: the
+closed subflow is the MPC one, the msk is not accepted and
+eventually going through full cleanup.
+
+With such change, __mptcp_destroy_sock() is always called
+on msk sockets, even on accepted ones. We don't need anymore
+to transiently drop one sk reference at msk clone time.
+
+Please note this commit depends on the parent one:
+
+ mptcp: refactor passive socket initialization
+
+Fixes: 58b09919626b ("mptcp: create msk early")
+Cc: stable@vger.kernel.org
+Reported-and-tested-by: Christoph Paasch <cpaasch@apple.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/347
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 40 ++++++++++++++++++++++++++++++----------
+ net/mptcp/protocol.h | 5 ++++-
+ net/mptcp/subflow.c | 17 ++++++++++++-----
+ 3 files changed, 46 insertions(+), 16 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2343,7 +2343,6 @@ static void __mptcp_close_ssk(struct soc
+ goto out;
+ }
+
+- sock_orphan(ssk);
+ subflow->disposable = 1;
+
+ /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
+@@ -2351,7 +2350,20 @@ static void __mptcp_close_ssk(struct soc
+ * reference owned by msk;
+ */
+ if (!inet_csk(ssk)->icsk_ulp_ops) {
++ WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+ kfree_rcu(subflow, rcu);
++ } else if (msk->in_accept_queue && msk->first == ssk) {
++ /* if the first subflow moved to a close state, e.g. due to
++ * incoming reset and we reach here before inet_child_forget()
++ * the TCP stack could later try to close it via
++ * inet_csk_listen_stop(), or deliver it to the user space via
++ * accept().
++ * We can't delete the subflow - or risk a double free - nor let
++ * the msk survive - or will be leaked in the non accept scenario:
++ * fallback and let TCP cope with the subflow cleanup.
++ */
++ WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
++ mptcp_subflow_drop_ctx(ssk);
+ } else {
+ /* otherwise tcp will dispose of the ssk and subflow ctx */
+ if (ssk->sk_state == TCP_LISTEN) {
+@@ -2399,9 +2411,10 @@ static unsigned int mptcp_sync_mss(struc
+ return 0;
+ }
+
+-static void __mptcp_close_subflow(struct mptcp_sock *msk)
++static void __mptcp_close_subflow(struct sock *sk)
+ {
+ struct mptcp_subflow_context *subflow, *tmp;
++ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ might_sleep();
+
+@@ -2415,7 +2428,15 @@ static void __mptcp_close_subflow(struct
+ if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
+ continue;
+
+- mptcp_close_ssk((struct sock *)msk, ssk, subflow);
++ mptcp_close_ssk(sk, ssk, subflow);
++ }
++
++ /* if the MPC subflow has been closed before the msk is accepted,
++ * msk will never be accept-ed, close it now
++ */
++ if (!msk->first && msk->in_accept_queue) {
++ sock_set_flag(sk, SOCK_DEAD);
++ inet_sk_state_store(sk, TCP_CLOSE);
+ }
+ }
+
+@@ -2624,6 +2645,9 @@ static void mptcp_worker(struct work_str
+ __mptcp_check_send_data_fin(sk);
+ mptcp_check_data_fin(sk);
+
++ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++ __mptcp_close_subflow(sk);
++
+ /* There is no point in keeping around an orphaned sk timedout or
+ * closed, but we need the msk around to reply to incoming DATA_FIN,
+ * even if it is orphaned and in FIN_WAIT2 state
+@@ -2639,9 +2663,6 @@ static void mptcp_worker(struct work_str
+ }
+ }
+
+- if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+- __mptcp_close_subflow(msk);
+-
+ if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+ __mptcp_retrans(sk);
+
+@@ -3072,6 +3093,7 @@ struct sock *mptcp_sk_clone(const struct
+ msk->local_key = subflow_req->local_key;
+ msk->token = subflow_req->token;
+ msk->subflow = NULL;
++ msk->in_accept_queue = 1;
+ WRITE_ONCE(msk->fully_established, false);
+ if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
+ WRITE_ONCE(msk->csum_enabled, true);
+@@ -3089,8 +3111,7 @@ struct sock *mptcp_sk_clone(const struct
+ security_inet_csk_clone(nsk, req);
+ bh_unlock_sock(nsk);
+
+- /* keep a single reference */
+- __sock_put(nsk);
++ /* note: the newly allocated socket refcount is 2 now */
+ return nsk;
+ }
+
+@@ -3146,8 +3167,6 @@ static struct sock *mptcp_accept(struct
+ goto out;
+ }
+
+- /* acquire the 2nd reference for the owning socket */
+- sock_hold(new_mptcp_sock);
+ newsk = new_mptcp_sock;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+ } else {
+@@ -3695,6 +3714,7 @@ static int mptcp_stream_accept(struct so
+ struct sock *newsk = newsock->sk;
+
+ set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
++ msk->in_accept_queue = 0;
+
+ lock_sock(newsk);
+
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -295,7 +295,8 @@ struct mptcp_sock {
+ u8 recvmsg_inq:1,
+ cork:1,
+ nodelay:1,
+- fastopening:1;
++ fastopening:1,
++ in_accept_queue:1;
+ int connect_flags;
+ struct work_struct work;
+ struct sk_buff *ooo_last_skb;
+@@ -666,6 +667,8 @@ void mptcp_subflow_set_active(struct mpt
+
+ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
+
++void mptcp_subflow_drop_ctx(struct sock *ssk);
++
+ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+ struct mptcp_subflow_context *ctx)
+ {
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -698,9 +698,10 @@ static bool subflow_hmac_valid(const str
+
+ static void mptcp_force_close(struct sock *sk)
+ {
+- /* the msk is not yet exposed to user-space */
++ /* the msk is not yet exposed to user-space, and refcount is 2 */
+ inet_sk_state_store(sk, TCP_CLOSE);
+ sk_common_release(sk);
++ sock_put(sk);
+ }
+
+ static void subflow_ulp_fallback(struct sock *sk,
+@@ -716,7 +717,7 @@ static void subflow_ulp_fallback(struct
+ mptcp_subflow_ops_undo_override(sk);
+ }
+
+-static void subflow_drop_ctx(struct sock *ssk)
++void mptcp_subflow_drop_ctx(struct sock *ssk)
+ {
+ struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+
+@@ -822,7 +823,7 @@ create_child:
+
+ if (new_msk)
+ mptcp_copy_inaddrs(new_msk, child);
+- subflow_drop_ctx(child);
++ mptcp_subflow_drop_ctx(child);
+ goto out;
+ }
+
+@@ -913,7 +914,7 @@ out:
+ return child;
+
+ dispose_child:
+- subflow_drop_ctx(child);
++ mptcp_subflow_drop_ctx(child);
+ tcp_rsk(req)->drop_req = true;
+ inet_csk_prepare_for_destroy_sock(child);
+ tcp_done(child);
+@@ -1863,7 +1864,6 @@ void mptcp_subflow_queue_clean(struct so
+ struct sock *sk = (struct sock *)msk;
+ bool do_cancel_work;
+
+- sock_hold(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ next = msk->dl_next;
+ msk->first = NULL;
+@@ -1951,6 +1951,13 @@ static void subflow_ulp_release(struct s
+ * when the subflow is still unaccepted
+ */
+ release = ctx->disposable || list_empty(&ctx->node);
++
++ /* inet_child_forget() does not call sk_state_change(),
++ * explicitly trigger the socket close machinery
++ */
++ if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
++ &mptcp_sk(sk)->flags))
++ mptcp_schedule_work(sk);
+ sock_put(sk);
+ }
+
--- /dev/null
+From e921050022f1f12d5029d1487a7dfc46cde15523 Mon Sep 17 00:00:00 2001
+From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+Date: Sun, 26 Feb 2023 18:01:36 +0300
+Subject: Revert "riscv: mm: notify remote harts about mmu cache updates"
+
+From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+
+commit e921050022f1f12d5029d1487a7dfc46cde15523 upstream.
+
+This reverts the remaining bits of commit 4bd1d80efb5a ("riscv: mm:
+notify remote harts harts about mmu cache updates").
+
+According to bug reports, suggested approach to fix stale TLB entries
+is not sufficient. It needs to be replaced by a more robust solution.
+
+Fixes: 4bd1d80efb5a ("riscv: mm: notify remote harts about mmu cache updates")
+Reported-by: Zong Li <zong.li@sifive.com>
+Reported-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Guo Ren <guoren@kernel.org>
+Link: https://lore.kernel.org/r/20230226150137.1919750-2-geomatsi@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/include/asm/mmu.h | 2 --
+ arch/riscv/include/asm/tlbflush.h | 18 ------------------
+ arch/riscv/mm/context.c | 10 ----------
+ arch/riscv/mm/tlbflush.c | 28 +++++++++++++++++-----------
+ 4 files changed, 17 insertions(+), 41 deletions(-)
+
+--- a/arch/riscv/include/asm/mmu.h
++++ b/arch/riscv/include/asm/mmu.h
+@@ -19,8 +19,6 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ /* A local icache flush is needed before user execution can resume. */
+ cpumask_t icache_stale_mask;
+- /* A local tlb flush is needed before user execution can resume. */
+- cpumask_t tlb_stale_mask;
+ #endif
+ } mm_context_t;
+
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(
+ {
+ ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+ }
+-
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+- __asm__ __volatile__ ("sfence.vma x0, %0"
+- :
+- : "r" (asid)
+- : "memory");
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+- unsigned long asid)
+-{
+- __asm__ __volatile__ ("sfence.vma %0, %1"
+- :
+- : "r" (addr), "r" (asid)
+- : "memory");
+-}
+-
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all() do { } while (0)
+ #define local_flush_tlb_page(addr) do { } while (0)
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -196,16 +196,6 @@ switch_mm_fast:
+
+ if (need_flush_tlb)
+ local_flush_tlb_all();
+-#ifdef CONFIG_SMP
+- else {
+- cpumask_t *mask = &mm->context.tlb_stale_mask;
+-
+- if (cpumask_test_cpu(cpu, mask)) {
+- cpumask_clear_cpu(cpu, mask);
+- local_flush_tlb_all_asid(cntx & asid_mask);
+- }
+- }
+-#endif
+ }
+
+ static void set_mm_noasid(struct mm_struct *mm)
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -5,7 +5,23 @@
+ #include <linux/sched.h>
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+-#include <asm/tlbflush.h>
++
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++ __asm__ __volatile__ ("sfence.vma x0, %0"
++ :
++ : "r" (asid)
++ : "memory");
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++ unsigned long asid)
++{
++ __asm__ __volatile__ ("sfence.vma %0, %1"
++ :
++ : "r" (addr), "r" (asid)
++ : "memory");
++}
+
+ void flush_tlb_all(void)
+ {
+@@ -15,7 +31,6 @@ void flush_tlb_all(void)
+ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ unsigned long size, unsigned long stride)
+ {
+- struct cpumask *pmask = &mm->context.tlb_stale_mask;
+ struct cpumask *cmask = mm_cpumask(mm);
+ unsigned int cpuid;
+ bool broadcast;
+@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct
+ if (static_branch_unlikely(&use_asid_allocator)) {
+ unsigned long asid = atomic_long_read(&mm->context.id);
+
+- /*
+- * TLB will be immediately flushed on harts concurrently
+- * executing this MM context. TLB flush on other harts
+- * is deferred until this MM context migrates there.
+- */
+- cpumask_setall(pmask);
+- cpumask_clear_cpu(cpuid, pmask);
+- cpumask_andnot(pmask, pmask, cmask);
+-
+ if (broadcast) {
+ sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+ } else if (size <= stride) {
--- /dev/null
+From 82dd33fde0268cc622d3d1ac64971f3f61634142 Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Sun, 26 Feb 2023 18:01:37 +0300
+Subject: riscv: asid: Fixup stale TLB entry cause application crash
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit 82dd33fde0268cc622d3d1ac64971f3f61634142 upstream.
+
+After use_asid_allocator is enabled, the userspace application will
+crash by stale TLB entries. Because only using cpumask_clear_cpu without
+local_flush_tlb_all couldn't guarantee CPU's TLB entries were fresh.
+Then set_mm_asid would cause the user space application to get a stale
+value by stale TLB entry, but set_mm_noasid is okay.
+
+Here is the symptom of the bug:
+unhandled signal 11 code 0x1 (coredump)
+ 0x0000003fd6d22524 <+4>: auipc s0,0x70
+ 0x0000003fd6d22528 <+8>: ld s0,-148(s0) # 0x3fd6d92490
+=> 0x0000003fd6d2252c <+12>: ld a5,0(s0)
+(gdb) i r s0
+s0 0x8082ed1cc3198b21 0x8082ed1cc3198b21
+(gdb) x /2x 0x3fd6d92490
+0x3fd6d92490: 0xd80ac8a8 0x0000003f
+The core dump file shows that register s0 is wrong, but the value in
+memory is correct. Because 'ld s0, -148(s0)' used a stale mapping entry
+in TLB and got a wrong result from an incorrect physical address.
+
+When the task ran on CPU0, which loaded/speculative-loaded the value of
+address(0x3fd6d92490), then the first version of the mapping entry was
+PTWed into CPU0's TLB.
+When the task switched from CPU0 to CPU1 (No local_tlb_flush_all here by
+asid), it happened to write a value on the address (0x3fd6d92490). It
+caused do_page_fault -> wp_page_copy -> ptep_clear_flush ->
+ptep_get_and_clear & flush_tlb_page.
+The flush_tlb_page used mm_cpumask(mm) to determine which CPUs need TLB
+flush, but CPU0 had cleared the CPU0's mm_cpumask in the previous
+switch_mm. So we only flushed the CPU1 TLB and set the second version
+mapping of the PTE. When the task switched from CPU1 to CPU0 again, CPU0
+still used a stale TLB mapping entry which contained a wrong target
+physical address. It raised a bug when the task happened to read that
+value.
+
+ CPU0 CPU1
+ - switch 'task' in
+ - read addr (Fill stale mapping
+ entry into TLB)
+ - switch 'task' out (no tlb_flush)
+ - switch 'task' in (no tlb_flush)
+ - write addr cause pagefault
+ do_page_fault() (change to
+ new addr mapping)
+ wp_page_copy()
+ ptep_clear_flush()
+ ptep_get_and_clear()
+ & flush_tlb_page()
+ write new value into addr
+ - switch 'task' out (no tlb_flush)
+ - switch 'task' in (no tlb_flush)
+ - read addr again (Use stale
+ mapping entry in TLB)
+ get wrong value from old phyical
+ addr, BUG!
+
+The solution is to keep all CPUs' footmarks of cpumask(mm) in switch_mm,
+which could guarantee to invalidate all stale TLB entries during TLB
+flush.
+
+Fixes: 65d4b9c53017 ("RISC-V: Implement ASID allocator")
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Guo Ren <guoren@kernel.org>
+Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Tested-by: Zong Li <zong.li@sifive.com>
+Tested-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+Cc: Anup Patel <apatel@ventanamicro.com>
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Link: https://lore.kernel.org/r/20230226150137.1919750-3-geomatsi@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/mm/context.c | 30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -205,12 +205,24 @@ static void set_mm_noasid(struct mm_stru
+ local_flush_tlb_all();
+ }
+
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++ struct mm_struct *next, unsigned int cpu)
+ {
+- if (static_branch_unlikely(&use_asid_allocator))
+- set_mm_asid(mm, cpu);
+- else
+- set_mm_noasid(mm);
++ /*
++ * The mm_cpumask indicates which harts' TLBs contain the virtual
++ * address mapping of the mm. Compared to noasid, using asid
++ * can't guarantee that stale TLB entries are invalidated because
++ * the asid mechanism wouldn't flush TLB for every switch_mm for
++ * performance. So when using asid, keep all CPUs footmarks in
++ * cpumask() until mm reset.
++ */
++ cpumask_set_cpu(cpu, mm_cpumask(next));
++ if (static_branch_unlikely(&use_asid_allocator)) {
++ set_mm_asid(next, cpu);
++ } else {
++ cpumask_clear_cpu(cpu, mm_cpumask(prev));
++ set_mm_noasid(next);
++ }
+ }
+
+ static int __init asids_init(void)
+@@ -264,7 +276,8 @@ static int __init asids_init(void)
+ }
+ early_initcall(asids_init);
+ #else
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++ struct mm_struct *next, unsigned int cpu)
+ {
+ /* Nothing to do here when there is no MMU */
+ }
+@@ -317,10 +330,7 @@ void switch_mm(struct mm_struct *prev, s
+ */
+ cpu = smp_processor_id();
+
+- cpumask_clear_cpu(cpu, mm_cpumask(prev));
+- cpumask_set_cpu(cpu, mm_cpumask(next));
+-
+- set_mm(next, cpu);
++ set_mm(prev, next, cpu);
+
+ flush_icache_deferred(next, cpu);
+ }
interconnect-exynos-fix-registration-race.patch
md-select-block_legacy_autoload.patch
cifs-generate-signkey-for-the-channel-that-s-reconnecting.patch
+tracing-make-splice_read-available-again.patch
+tracing-do-not-let-histogram-values-have-some-modifiers.patch
+tracing-check-field-value-in-hist_field_name.patch
+tracing-make-tracepoint-lockdep-check-actually-test-something.patch
+cifs-fix-smb2_set_path_size.patch
+cifs-set-dfs-root-session-in-cifs_get_smb_ses.patch
+cifs-fix-use-after-free-bug-in-refresh_cache_worker.patch
+cifs-return-dfs-root-session-id-in-debugdata.patch
+cifs-use-dfs-root-session-instead-of-tcon-ses.patch
+kvm-svm-fix-a-benign-off-by-one-bug-in-avic-physical-table-mask.patch
+kvm-svm-modify-avic-gatag-to-support-max-number-of-512-vcpus.patch
+kvm-nvmx-add-missing-consistency-checks-for-cr0-and-cr4.patch
+alsa-hda-intel-dsp-config-add-mtl-pci-id.patch
+alsa-hda-realtek-fix-the-speaker-output-on-samsung-galaxy-book2-pro.patch
+alsa-hda-realtek-fix-speaker-mute-micmute-leds-not-work-on-a-hp-platform.patch
+revert-riscv-mm-notify-remote-harts-about-mmu-cache-updates.patch
+riscv-asid-fixup-stale-tlb-entry-cause-application-crash.patch
+drm-edid-fix-info-leak-when-failing-to-get-panel-id.patch
+drm-shmem-helper-remove-another-errant-put-in-error-path.patch
+drm-sun4i-fix-missing-component-unbind-on-bind-errors.patch
+drm-i915-active-fix-misuse-of-non-idle-barriers-as-fence-trackers.patch
+drm-i915-dg2-add-hdmi-pixel-clock-frequencies-267.30-and-319.89-mhz.patch
+drm-amdgpu-don-t-resume-iommu-after-incomplete-init.patch
+drm-amd-pm-fix-sienna-cichlid-incorrect-od-volage-after-resume.patch
+drm-amdgpu-vcn-disable-indirect-sram-on-vangogh-broken-bioses.patch
+drm-amd-pm-bump-smu-13.0.4-driver_if-header-version.patch
+drm-amd-display-do-not-set-drr-on-pipe-commit.patch
+drm-amd-display-disconnect-mpcc-only-on-otg-change.patch
+drm-amd-display-write-to-correct-dirty_rect.patch
+mptcp-fix-possible-deadlock-in-subflow_error_report.patch
+mptcp-refactor-passive-socket-initialization.patch
+mptcp-use-the-workqueue-to-destroy-unaccepted-sockets.patch
+mptcp-fix-uaf-in-listener-shutdown.patch
+mptcp-add-ro_after_init-for-tcp-v6-_prot_override.patch
+mptcp-avoid-setting-tcp_close-state-twice.patch
+mptcp-fix-lockdep-false-positive-in-mptcp_pm_nl_create_listen_socket.patch
--- /dev/null
+From 9f116f76fa8c04c81aef33ad870dbf9a158e5b70 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 1 Mar 2023 20:00:53 -0500
+Subject: tracing: Check field value in hist_field_name()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 9f116f76fa8c04c81aef33ad870dbf9a158e5b70 upstream.
+
+The function hist_field_name() cannot handle being passed a NULL field
+parameter. It should never be NULL, but due to a previous bug, NULL was
+passed to the function and the kernel crashed due to a NULL dereference.
+Mark Rutland reported this to me on IRC.
+
+The bug was fixed, but to prevent future bugs from crashing the kernel,
+check the field and add a WARN_ON() if it is NULL.
+
+Link: https://lkml.kernel.org/r/20230302020810.762384440@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Fixes: c6afad49d127f ("tracing: Add hist trigger 'sym' and 'sym-offset' modifiers")
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events_hist.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1334,6 +1334,9 @@ static const char *hist_field_name(struc
+ {
+ const char *field_name = "";
+
++ if (WARN_ON_ONCE(!field))
++ return field_name;
++
+ if (level > 1)
+ return field_name;
+
--- /dev/null
+From e0213434fe3e4a0d118923dc98d31e7ff1cd9e45 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 1 Mar 2023 20:00:52 -0500
+Subject: tracing: Do not let histogram values have some modifiers
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit e0213434fe3e4a0d118923dc98d31e7ff1cd9e45 upstream.
+
+Histogram values can not be strings, stacktraces, graphs, symbols,
+syscalls, or grouped in buckets or log. Give an error if a value is set to
+do so.
+
+Note, the histogram code was not prepared to handle these modifiers for
+histograms and caused a bug.
+
+Mark Rutland reported:
+
+ # echo 'p:copy_to_user __arch_copy_to_user n=$arg2' >> /sys/kernel/tracing/kprobe_events
+ # echo 'hist:keys=n:vals=hitcount.buckets=8:sort=hitcount' > /sys/kernel/tracing/events/kprobes/copy_to_user/trigger
+ # cat /sys/kernel/tracing/events/kprobes/copy_to_user/hist
+[ 143.694628] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
+[ 143.695190] Mem abort info:
+[ 143.695362] ESR = 0x0000000096000004
+[ 143.695604] EC = 0x25: DABT (current EL), IL = 32 bits
+[ 143.695889] SET = 0, FnV = 0
+[ 143.696077] EA = 0, S1PTW = 0
+[ 143.696302] FSC = 0x04: level 0 translation fault
+[ 143.702381] Data abort info:
+[ 143.702614] ISV = 0, ISS = 0x00000004
+[ 143.702832] CM = 0, WnR = 0
+[ 143.703087] user pgtable: 4k pages, 48-bit VAs, pgdp=00000000448f9000
+[ 143.703407] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000
+[ 143.704137] Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP
+[ 143.704714] Modules linked in:
+[ 143.705273] CPU: 0 PID: 133 Comm: cat Not tainted 6.2.0-00003-g6fc512c10a7c #3
+[ 143.706138] Hardware name: linux,dummy-virt (DT)
+[ 143.706723] pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 143.707120] pc : hist_field_name.part.0+0x14/0x140
+[ 143.707504] lr : hist_field_name.part.0+0x104/0x140
+[ 143.707774] sp : ffff800008333a30
+[ 143.707952] x29: ffff800008333a30 x28: 0000000000000001 x27: 0000000000400cc0
+[ 143.708429] x26: ffffd7a653b20260 x25: 0000000000000000 x24: ffff10d303ee5800
+[ 143.708776] x23: ffffd7a6539b27b0 x22: ffff10d303fb8c00 x21: 0000000000000001
+[ 143.709127] x20: ffff10d303ec2000 x19: 0000000000000000 x18: 0000000000000000
+[ 143.709478] x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000
+[ 143.709824] x14: 0000000000000000 x13: 203a6f666e692072 x12: 6567676972742023
+[ 143.710179] x11: 0a230a6d6172676f x10: 000000000000002c x9 : ffffd7a6521e018c
+[ 143.710584] x8 : 000000000000002c x7 : 7f7f7f7f7f7f7f7f x6 : 000000000000002c
+[ 143.710915] x5 : ffff10d303b0103e x4 : ffffd7a653b20261 x3 : 000000000000003d
+[ 143.711239] x2 : 0000000000020001 x1 : 0000000000000001 x0 : 0000000000000000
+[ 143.711746] Call trace:
+[ 143.712115] hist_field_name.part.0+0x14/0x140
+[ 143.712642] hist_field_name.part.0+0x104/0x140
+[ 143.712925] hist_field_print+0x28/0x140
+[ 143.713125] event_hist_trigger_print+0x174/0x4d0
+[ 143.713348] hist_show+0xf8/0x980
+[ 143.713521] seq_read_iter+0x1bc/0x4b0
+[ 143.713711] seq_read+0x8c/0xc4
+[ 143.713876] vfs_read+0xc8/0x2a4
+[ 143.714043] ksys_read+0x70/0xfc
+[ 143.714218] __arm64_sys_read+0x24/0x30
+[ 143.714400] invoke_syscall+0x50/0x120
+[ 143.714587] el0_svc_common.constprop.0+0x4c/0x100
+[ 143.714807] do_el0_svc+0x44/0xd0
+[ 143.714970] el0_svc+0x2c/0x84
+[ 143.715134] el0t_64_sync_handler+0xbc/0x140
+[ 143.715334] el0t_64_sync+0x190/0x194
+[ 143.715742] Code: a9bd7bfd 910003fd a90153f3 aa0003f3 (f9400000)
+[ 143.716510] ---[ end trace 0000000000000000 ]---
+Segmentation fault
+
+Link: https://lkml.kernel.org/r/20230302020810.559462599@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: c6afad49d127f ("tracing: Add hist trigger 'sym' and 'sym-offset' modifiers")
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events_hist.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4199,6 +4199,15 @@ static int __create_val_field(struct his
+ goto out;
+ }
+
++ /* Some types cannot be a value */
++ if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
++ HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
++ HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
++ HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
++ hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
++ ret = -EINVAL;
++ }
++
+ hist_data->fields[val_idx] = hist_field;
+
+ ++hist_data->n_vals;
--- /dev/null
+From e400be674a1a40e9dcb2e95f84d6c1fd2d88f31d Mon Sep 17 00:00:00 2001
+From: Sung-hun Kim <sfoon.kim@samsung.com>
+Date: Tue, 14 Mar 2023 10:37:07 +0900
+Subject: tracing: Make splice_read available again
+
+From: Sung-hun Kim <sfoon.kim@samsung.com>
+
+commit e400be674a1a40e9dcb2e95f84d6c1fd2d88f31d upstream.
+
+Since the commit 36e2c7421f02 ("fs: don't allow splice read/write
+without explicit ops") is applied to the kernel, splice() and
+sendfile() calls on the trace file (/sys/kernel/debug/tracing
+/trace) return EINVAL.
+
+This patch restores these system calls by initializing splice_read
+in file_operations of the trace file. This patch only enables such
+functionalities for the read case.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230314013707.28814-1-sfoon.kim@samsung.com
+
+Cc: stable@vger.kernel.org
+Fixes: 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
+Signed-off-by: Sung-hun Kim <sfoon.kim@samsung.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5119,6 +5119,8 @@ loff_t tracing_lseek(struct file *file,
+ static const struct file_operations tracing_fops = {
+ .open = tracing_open,
+ .read = seq_read,
++ .read_iter = seq_read_iter,
++ .splice_read = generic_file_splice_read,
+ .write = tracing_write_stub,
+ .llseek = tracing_lseek,
+ .release = tracing_release,
--- /dev/null
+From c2679254b9c9980d9045f0f722cf093a2b1f7590 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Fri, 10 Mar 2023 17:28:56 -0500
+Subject: tracing: Make tracepoint lockdep check actually test something
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit c2679254b9c9980d9045f0f722cf093a2b1f7590 upstream.
+
+A while ago where the trace events had the following:
+
+ rcu_read_lock_sched_notrace();
+ rcu_dereference_sched(...);
+ rcu_read_unlock_sched_notrace();
+
+If the tracepoint is enabled, it could trigger RCU issues if called in
+the wrong place. And this warning was only triggered if lockdep was
+enabled. If the tracepoint was never enabled with lockdep, the bug would
+not be caught. To handle this, the above sequence was done when lockdep
+was enabled regardless if the tracepoint was enabled or not (although the
+always enabled code really didn't do anything, it would still trigger a
+warning).
+
+But a lot has changed since that lockdep code was added. One is, that
+sequence no longer triggers any warning. Another is, the tracepoint when
+enabled doesn't even do that sequence anymore.
+
+The main check we care about today is whether RCU is "watching" or not.
+So if lockdep is enabled, always check if rcu_is_watching() which will
+trigger a warning if it is not (tracepoints require RCU to be watching).
+
+Note, that old sequence did add a bit of overhead when lockdep was enabled,
+and with the latest kernel updates, would cause the system to slow down
+enough to trigger kernel "stalled" warnings.
+
+Link: http://lore.kernel.org/lkml/20140806181801.GA4605@redhat.com
+Link: http://lore.kernel.org/lkml/20140807175204.C257CAC5@viggo.jf.intel.com
+Link: https://lore.kernel.org/lkml/20230307184645.521db5c9@gandalf.local.home/
+Link: https://lore.kernel.org/linux-trace-kernel/20230310172856.77406446@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Fixes: e6753f23d961 ("tracepoint: Make rcuidle tracepoint callers use SRCU")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/tracepoint.h | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -231,12 +231,11 @@ static inline struct tracepoint *tracepo
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ *
+- * When lockdep is enabled, we make sure to always do the RCU portions of
+- * the tracepoint code, regardless of whether tracing is on. However,
+- * don't check if the condition is false, due to interaction with idle
+- * instrumentation. This lets us find RCU issues triggered with tracepoints
+- * even when this tracepoint is off. This code has no purpose other than
+- * poking RCU a bit.
++ * When lockdep is enabled, we make sure to always test if RCU is
++ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
++ * require RCU to be active, and it should always warn at the tracepoint
++ * site if it is not watching, as it will need to be active when the
++ * tracepoint is enabled.
+ */
+ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+ extern int __traceiter_##name(data_proto); \
+@@ -249,9 +248,7 @@ static inline struct tracepoint *tracepo
+ TP_ARGS(args), \
+ TP_CONDITION(cond), 0); \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
+- rcu_read_lock_sched_notrace(); \
+- rcu_dereference_sched(__tracepoint_##name.funcs);\
+- rcu_read_unlock_sched_notrace(); \
++ WARN_ON_ONCE(!rcu_is_watching()); \
+ } \
+ } \
+ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \