--- /dev/null
+From 1ee5aa765c22a0577ec552d460bf2035300b4b51 Mon Sep 17 00:00:00 2001
+From: Hoku Ishibe <me@hokuishi.be>
+Date: Sun, 23 Feb 2025 21:05:17 -0500
+Subject: ALSA: hda: intel: Add Dell ALC3271 to power_save denylist
+
+From: Hoku Ishibe <me@hokuishi.be>
+
+commit 1ee5aa765c22a0577ec552d460bf2035300b4b51 upstream.
+
+Dell XPS 13 7390 with the Realtek ALC3271 codec experiences
+persistent humming noise when the power_save mode is enabled.
+This issue occurs when the codec enters power saving mode,
+leading to unwanted noise from the speakers.
+
+This patch adds the affected model (PCI ID 0x1028:0x0962) to the
+power_save denylist to ensure power_save is disabled by default,
+preventing power-off related noise issues.
+
+Steps to Reproduce
+1. Boot the system with `snd_hda_intel` loaded.
+2. Verify that `power_save` mode is enabled:
+```sh
+cat /sys/module/snd_hda_intel/parameters/power_save
+````
+output: 10 (default power save timeout)
+3. Wait for the power save timeout
+4. Observe a persistent humming noise from the speakers
+5. Disable `power_save` manually:
+```sh
+echo 0 | sudo tee /sys/module/snd_hda_intel/parameters/power_save
+````
+6. Confirm that the noise disappears immediately.
+
+This issue has been observed on my system, and this patch
+successfully eliminates the unwanted noise. If other users
+experience similar issues, additional reports would be helpful.
+
+Signed-off-by: Hoku Ishibe <me@hokuishi.be>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20250224020517.51035-1-me@hokuishi.be
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/hda_intel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2232,6 +2232,8 @@ static const struct snd_pci_quirk power_
+ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ /* KONTRON SinglePC may cause a stall at runtime resume */
+ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
++ /* Dell ALC3271 */
++ SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0),
+ {}
+ };
+
--- /dev/null
+From f603b159231b0c58f0c27ab39348534063d38223 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Mon, 3 Mar 2025 14:56:10 +0800
+Subject: ALSA: hda/realtek - add supported Mic Mute LED for Lenovo platform
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit f603b159231b0c58f0c27ab39348534063d38223 upstream.
+
+Support Mic Mute LED for ThinkCentre M series.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/c211a2702f1f411e86bd7420d7eebc03@realtek.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5055,6 +5055,16 @@ static void alc269_fixup_hp_line1_mic1_l
+ }
+ }
+
++static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
++ spec->micmute_led_polarity = 1;
++ alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action);
++}
++
+ static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
+ {
+ if (delay <= 0)
+@@ -7608,6 +7618,7 @@ enum {
+ ALC275_FIXUP_DELL_XPS,
+ ALC293_FIXUP_LENOVO_SPK_NOISE,
+ ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++ ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED,
+ ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DISABLE_MIC_VREF,
+ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -8596,6 +8607,10 @@ static const struct hda_fixup alc269_fix
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ },
++ [ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc233_fixup_lenovo_low_en_micmute_led,
++ },
+ [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic,
+@@ -10884,6 +10899,9 @@ static const struct hda_quirk alc269_fix
+ SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
++ SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
++ SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
+ SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C),
--- /dev/null
+From ca0dedaff92307591f66c9206933fbdfe87add10 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Wed, 5 Mar 2025 13:54:34 +0800
+Subject: ALSA: hda/realtek: update ALC222 depop optimize
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit ca0dedaff92307591f66c9206933fbdfe87add10 upstream.
+
+Add ALC222 its own depop functions for alc_init and alc_shutup.
+
+[note: this fixes pop noise issues on the models with two headphone
+ jacks -- tiwai ]
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 76 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3845,6 +3845,79 @@ static void alc225_shutup(struct hda_cod
+ }
+ }
+
++static void alc222_init(struct hda_codec *codec)
++{
++ struct alc_spec *spec = codec->spec;
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
++ bool hp1_pin_sense, hp2_pin_sense;
++
++ if (!hp_pin)
++ return;
++
++ msleep(30);
++
++ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
++ hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
++
++ if (hp1_pin_sense || hp2_pin_sense) {
++ msleep(2);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x14, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ msleep(75);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x14, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++
++ msleep(75);
++ }
++}
++
++static void alc222_shutup(struct hda_codec *codec)
++{
++ struct alc_spec *spec = codec->spec;
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
++ bool hp1_pin_sense, hp2_pin_sense;
++
++ if (!hp_pin)
++ hp_pin = 0x21;
++
++ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
++ hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
++
++ if (hp1_pin_sense || hp2_pin_sense) {
++ msleep(2);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x14, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
++
++ msleep(75);
++
++ if (hp1_pin_sense)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ if (hp2_pin_sense)
++ snd_hda_codec_write(codec, 0x14, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++
++ msleep(75);
++ }
++ alc_auto_setup_eapd(codec, false);
++ alc_shutup_pins(codec);
++}
++
+ static void alc_default_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -11888,8 +11961,11 @@ static int patch_alc269(struct hda_codec
+ spec->codec_variant = ALC269_TYPE_ALC300;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
+ break;
++ case 0x10ec0222:
+ case 0x10ec0623:
+ spec->codec_variant = ALC269_TYPE_ALC623;
++ spec->shutup = alc222_shutup;
++ spec->init_hook = alc222_init;
+ break;
+ case 0x10ec0700:
+ case 0x10ec0701:
--- /dev/null
+From c9ce148ea753bef66686460fa3cec6641cdfbb9f Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sat, 1 Mar 2025 12:45:29 +0100
+Subject: ALSA: seq: Avoid module auto-load handling at event delivery
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit c9ce148ea753bef66686460fa3cec6641cdfbb9f upstream.
+
+snd_seq_client_use_ptr() is supposed to return the snd_seq_client
+object for the given client ID, and it tries to handle the module
+auto-loading when no matching object is found. Although the module
+handling is performed only conditionally with "!in_interrupt()", this
+condition may be fragile, e.g. when the code is called from the ALSA
+timer callback where the spinlock is temporarily disabled while the
+irq is disabled. Then his doesn't fit well and spews the error about
+sleep from invalid context, as complained recently by syzbot.
+
+Also, in general, handling the module-loading at each time if no
+matching object is found is really an overkill. It can be still
+useful when performed at the top-level ioctl or proc reads, but it
+shouldn't be done at event delivery at all.
+
+For addressing the issues above, this patch disables the module
+handling in snd_seq_client_use_ptr() in normal cases like event
+deliveries, but allow only in limited and safe situations.
+A new function client_load_and_use_ptr() is used for the cases where
+the module loading can be done safely, instead.
+
+Reported-by: syzbot+4cb9fad083898f54c517@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/67c272e5.050a0220.dc10f.0159.GAE@google.com
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20250301114530.8975-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/seq/seq_clientmgr.c | 46 ++++++++++++++++++++++++++---------------
+ 1 file changed, 30 insertions(+), 16 deletions(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -106,7 +106,7 @@ static struct snd_seq_client *clientptr(
+ return clienttab[clientid];
+ }
+
+-struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
++static struct snd_seq_client *client_use_ptr(int clientid, bool load_module)
+ {
+ unsigned long flags;
+ struct snd_seq_client *client;
+@@ -126,7 +126,7 @@ struct snd_seq_client *snd_seq_client_us
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+ #ifdef CONFIG_MODULES
+- if (!in_interrupt()) {
++ if (load_module) {
+ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
+ static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
+
+@@ -168,6 +168,20 @@ struct snd_seq_client *snd_seq_client_us
+ return client;
+ }
+
++/* get snd_seq_client object for the given id quickly */
++struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
++{
++ return client_use_ptr(clientid, false);
++}
++
++/* get snd_seq_client object for the given id;
++ * if not found, retry after loading the modules
++ */
++static struct snd_seq_client *client_load_and_use_ptr(int clientid)
++{
++ return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES));
++}
++
+ /* Take refcount and perform ioctl_mutex lock on the given client;
+ * used only for OSS sequencer
+ * Unlock via snd_seq_client_ioctl_unlock() below
+@@ -176,7 +190,7 @@ bool snd_seq_client_ioctl_lock(int clien
+ {
+ struct snd_seq_client *client;
+
+- client = snd_seq_client_use_ptr(clientid);
++ client = client_load_and_use_ptr(clientid);
+ if (!client)
+ return false;
+ mutex_lock(&client->ioctl_mutex);
+@@ -1195,7 +1209,7 @@ static int snd_seq_ioctl_running_mode(st
+ int err = 0;
+
+ /* requested client number */
+- cptr = snd_seq_client_use_ptr(info->client);
++ cptr = client_load_and_use_ptr(info->client);
+ if (cptr == NULL)
+ return -ENOENT; /* don't change !!! */
+
+@@ -1257,7 +1271,7 @@ static int snd_seq_ioctl_get_client_info
+ struct snd_seq_client *cptr;
+
+ /* requested client number */
+- cptr = snd_seq_client_use_ptr(client_info->client);
++ cptr = client_load_and_use_ptr(client_info->client);
+ if (cptr == NULL)
+ return -ENOENT; /* don't change !!! */
+
+@@ -1392,7 +1406,7 @@ static int snd_seq_ioctl_get_port_info(s
+ struct snd_seq_client *cptr;
+ struct snd_seq_client_port *port;
+
+- cptr = snd_seq_client_use_ptr(info->addr.client);
++ cptr = client_load_and_use_ptr(info->addr.client);
+ if (cptr == NULL)
+ return -ENXIO;
+
+@@ -1496,10 +1510,10 @@ static int snd_seq_ioctl_subscribe_port(
+ struct snd_seq_client *receiver = NULL, *sender = NULL;
+ struct snd_seq_client_port *sport = NULL, *dport = NULL;
+
+- receiver = snd_seq_client_use_ptr(subs->dest.client);
++ receiver = client_load_and_use_ptr(subs->dest.client);
+ if (!receiver)
+ goto __end;
+- sender = snd_seq_client_use_ptr(subs->sender.client);
++ sender = client_load_and_use_ptr(subs->sender.client);
+ if (!sender)
+ goto __end;
+ sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+@@ -1864,7 +1878,7 @@ static int snd_seq_ioctl_get_client_pool
+ struct snd_seq_client_pool *info = arg;
+ struct snd_seq_client *cptr;
+
+- cptr = snd_seq_client_use_ptr(info->client);
++ cptr = client_load_and_use_ptr(info->client);
+ if (cptr == NULL)
+ return -ENOENT;
+ memset(info, 0, sizeof(*info));
+@@ -1968,7 +1982,7 @@ static int snd_seq_ioctl_get_subscriptio
+ struct snd_seq_client_port *sport = NULL;
+
+ result = -EINVAL;
+- sender = snd_seq_client_use_ptr(subs->sender.client);
++ sender = client_load_and_use_ptr(subs->sender.client);
+ if (!sender)
+ goto __end;
+ sport = snd_seq_port_use_ptr(sender, subs->sender.port);
+@@ -1999,7 +2013,7 @@ static int snd_seq_ioctl_query_subs(stru
+ struct list_head *p;
+ int i;
+
+- cptr = snd_seq_client_use_ptr(subs->root.client);
++ cptr = client_load_and_use_ptr(subs->root.client);
+ if (!cptr)
+ goto __end;
+ port = snd_seq_port_use_ptr(cptr, subs->root.port);
+@@ -2066,7 +2080,7 @@ static int snd_seq_ioctl_query_next_clie
+ if (info->client < 0)
+ info->client = 0;
+ for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
+- cptr = snd_seq_client_use_ptr(info->client);
++ cptr = client_load_and_use_ptr(info->client);
+ if (cptr)
+ break; /* found */
+ }
+@@ -2089,7 +2103,7 @@ static int snd_seq_ioctl_query_next_port
+ struct snd_seq_client *cptr;
+ struct snd_seq_client_port *port = NULL;
+
+- cptr = snd_seq_client_use_ptr(info->addr.client);
++ cptr = client_load_and_use_ptr(info->addr.client);
+ if (cptr == NULL)
+ return -ENXIO;
+
+@@ -2186,7 +2200,7 @@ static int snd_seq_ioctl_client_ump_info
+ size = sizeof(struct snd_ump_endpoint_info);
+ else
+ size = sizeof(struct snd_ump_block_info);
+- cptr = snd_seq_client_use_ptr(client);
++ cptr = client_load_and_use_ptr(client);
+ if (!cptr)
+ return -ENOENT;
+
+@@ -2458,7 +2472,7 @@ int snd_seq_kernel_client_enqueue(int cl
+ if (check_event_type_and_length(ev))
+ return -EINVAL;
+
+- cptr = snd_seq_client_use_ptr(client);
++ cptr = client_load_and_use_ptr(client);
+ if (cptr == NULL)
+ return -EINVAL;
+
+@@ -2690,7 +2704,7 @@ void snd_seq_info_clients_read(struct sn
+
+ /* list the client table */
+ for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
+- client = snd_seq_client_use_ptr(c);
++ client = client_load_and_use_ptr(c);
+ if (client == NULL)
+ continue;
+ if (client->type == NO_CLIENT) {
--- /dev/null
+From 35d99c68af40a8ca175babc5a89ef7e2226fb3ca Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <haoxiang_li2024@163.com>
+Date: Mon, 3 Mar 2025 10:42:33 +0800
+Subject: btrfs: fix a leaked chunk map issue in read_one_chunk()
+
+From: Haoxiang Li <haoxiang_li2024@163.com>
+
+commit 35d99c68af40a8ca175babc5a89ef7e2226fb3ca upstream.
+
+Add btrfs_free_chunk_map() to free the memory allocated
+by btrfs_alloc_chunk_map() if btrfs_add_chunk_map() fails.
+
+Fixes: 7dc66abb5a47 ("btrfs: use a dedicated data structure for chunk maps")
+CC: stable@vger.kernel.org
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Haoxiang Li <haoxiang_li2024@163.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7076,6 +7076,7 @@ static int read_one_chunk(struct btrfs_k
+ btrfs_err(fs_info,
+ "failed to add chunk map, start=%llu len=%llu: %d",
+ map->start, map->chunk_len, ret);
++ btrfs_free_chunk_map(map);
+ }
+
+ return ret;
--- /dev/null
+From 5a4041f2c47247575a6c2e53ce14f7b0ac946c33 Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Wed, 19 Feb 2025 16:02:11 +0900
+Subject: btrfs: zoned: fix extent range end unlock in cow_file_range()
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit 5a4041f2c47247575a6c2e53ce14f7b0ac946c33 upstream.
+
+Running generic/751 on the for-next branch often results in a hang like
+below. They are both stack by locking an extent. This suggests someone
+forget to unlock an extent.
+
+ INFO: task kworker/u128:1:12 blocked for more than 323 seconds.
+ Not tainted 6.13.0-BTRFS-ZNS+ #503
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ task:kworker/u128:1 state:D stack:0 pid:12 tgid:12 ppid:2 flags:0x00004000
+ Workqueue: btrfs-fixup btrfs_work_helper [btrfs]
+ Call Trace:
+ <TASK>
+ __schedule+0x534/0xdd0
+ schedule+0x39/0x140
+ __lock_extent+0x31b/0x380 [btrfs]
+ ? __pfx_autoremove_wake_function+0x10/0x10
+ btrfs_writepage_fixup_worker+0xf1/0x3a0 [btrfs]
+ btrfs_work_helper+0xff/0x480 [btrfs]
+ ? lock_release+0x178/0x2c0
+ process_one_work+0x1ee/0x570
+ ? srso_return_thunk+0x5/0x5f
+ worker_thread+0x1d1/0x3b0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0x10b/0x230
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x30/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+ INFO: task kworker/u134:0:184 blocked for more than 323 seconds.
+ Not tainted 6.13.0-BTRFS-ZNS+ #503
+ "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+ task:kworker/u134:0 state:D stack:0 pid:184 tgid:184 ppid:2 flags:0x00004000
+ Workqueue: writeback wb_workfn (flush-btrfs-4)
+ Call Trace:
+ <TASK>
+ __schedule+0x534/0xdd0
+ schedule+0x39/0x140
+ __lock_extent+0x31b/0x380 [btrfs]
+ ? __pfx_autoremove_wake_function+0x10/0x10
+ find_lock_delalloc_range+0xdb/0x260 [btrfs]
+ writepage_delalloc+0x12f/0x500 [btrfs]
+ ? srso_return_thunk+0x5/0x5f
+ extent_write_cache_pages+0x232/0x840 [btrfs]
+ btrfs_writepages+0x72/0x130 [btrfs]
+ do_writepages+0xe7/0x260
+ ? srso_return_thunk+0x5/0x5f
+ ? lock_acquire+0xd2/0x300
+ ? srso_return_thunk+0x5/0x5f
+ ? find_held_lock+0x2b/0x80
+ ? wbc_attach_and_unlock_inode.part.0+0x102/0x250
+ ? wbc_attach_and_unlock_inode.part.0+0x102/0x250
+ __writeback_single_inode+0x5c/0x4b0
+ writeback_sb_inodes+0x22d/0x550
+ __writeback_inodes_wb+0x4c/0xe0
+ wb_writeback+0x2f6/0x3f0
+ wb_workfn+0x32a/0x510
+ process_one_work+0x1ee/0x570
+ ? srso_return_thunk+0x5/0x5f
+ worker_thread+0x1d1/0x3b0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0x10b/0x230
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x30/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+This happens because we have another success path for the zoned mode. When
+there is no active zone available, btrfs_reserve_extent() returns
+-EAGAIN. In this case, we have two reactions.
+
+(1) If the given range is never allocated, we can only wait for someone
+ to finish a zone, so wait on BTRFS_FS_NEED_ZONE_FINISH bit and retry
+ afterward.
+
+(2) Or, if some allocations are already done, we must bail out and let
+ the caller to send IOs for the allocation. This is because these IOs
+ may be necessary to finish a zone.
+
+The commit 06f364284794 ("btrfs: do proper folio cleanup when
+cow_file_range() failed") moved the unlock code from the inside of the
+loop to the outside. So, previously, the allocated extents are unlocked
+just after the allocation and so before returning from the function.
+However, they are no longer unlocked on the case (2) above. That caused
+the hang issue.
+
+Fix the issue by modifying the 'end' to the end of the allocated
+range. Then, we can exit the loop and the same unlock code can properly
+handle the case.
+
+Reported-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Tested-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Fixes: 06f364284794 ("btrfs: do proper folio cleanup when cow_file_range() failed")
+CC: stable@vger.kernel.org
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1426,8 +1426,13 @@ static noinline int cow_file_range(struc
+ continue;
+ }
+ if (done_offset) {
+- *done_offset = start - 1;
+- return 0;
++ /*
++ * Move @end to the end of the processed range,
++ * and exit the loop to unlock the processed extents.
++ */
++ end = start - 1;
++ ret = 0;
++ break;
+ }
+ ret = -ENOSPC;
+ }
--- /dev/null
+From 374c9faac5a763a05bc3f68ad9f73dab3c6aec90 Mon Sep 17 00:00:00 2001
+From: Ma Ke <make24@iscas.ac.cn>
+Date: Wed, 26 Feb 2025 16:37:31 +0800
+Subject: drm/amd/display: Fix null check for pipe_ctx->plane_state in resource_build_scaling_params
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+commit 374c9faac5a763a05bc3f68ad9f73dab3c6aec90 upstream.
+
+Null pointer dereference issue could occur when pipe_ctx->plane_state
+is null. The fix adds a check to ensure 'pipe_ctx->plane_state' is not
+null before accessing. This prevents a null pointer dereference.
+
+Found by code review.
+
+Fixes: 3be5262e353b ("drm/amd/display: Rename more dc_surface stuff to plane_state")
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 63e6a77ccf239337baa9b1e7787cde9fa0462092)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struc
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ /* Invalid input */
+- if (!plane_state->dst_rect.width ||
++ if (!plane_state ||
++ !plane_state->dst_rect.width ||
+ !plane_state->dst_rect.height ||
+ !plane_state->src_rect.width ||
+ !plane_state->src_rect.height) {
--- /dev/null
+From da552bda987420e877500fdd90bd0172e3bf412b Mon Sep 17 00:00:00 2001
+From: Kenneth Feng <kenneth.feng@amd.com>
+Date: Fri, 28 Feb 2025 17:02:11 +0800
+Subject: drm/amd/pm: always allow ih interrupt from fw
+
+From: Kenneth Feng <kenneth.feng@amd.com>
+
+commit da552bda987420e877500fdd90bd0172e3bf412b upstream.
+
+always allow ih interrupt from fw on smu v14 based on
+the interface requirement
+
+Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
+Reviewed-by: Yang Wang <kevinyang.wang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit a3199eba46c54324193607d9114a1e321292d7a1)
+Cc: stable@vger.kernel.org # 6.12.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+@@ -1899,16 +1899,6 @@ static int smu_v14_0_allow_ih_interrupt(
+ NULL);
+ }
+
+-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
+-{
+- int ret = 0;
+-
+- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
+- ret = smu_v14_0_allow_ih_interrupt(smu);
+-
+- return ret;
+-}
+-
+ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
+ {
+ int ret = 0;
+@@ -1920,7 +1910,7 @@ int smu_v14_0_enable_thermal_alert(struc
+ if (ret)
+ return ret;
+
+- return smu_v14_0_process_pending_interrupt(smu);
++ return smu_v14_0_allow_ih_interrupt(smu);
+ }
+
+ int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
--- /dev/null
+From fd617ea3b79d2116d53f76cdb5a3601c0ba6e42f Mon Sep 17 00:00:00 2001
+From: Andrew Martin <Andrew.Martin@amd.com>
+Date: Fri, 28 Feb 2025 11:26:48 -0500
+Subject: drm/amdkfd: Fix NULL Pointer Dereference in KFD queue
+
+From: Andrew Martin <Andrew.Martin@amd.com>
+
+commit fd617ea3b79d2116d53f76cdb5a3601c0ba6e42f upstream.
+
+Through KFD IOCTL Fuzzing we encountered a NULL pointer derefrence
+when calling kfd_queue_acquire_buffers.
+
+Fixes: 629568d25fea ("drm/amdkfd: Validate queue cwsr area and eop buffer size")
+Signed-off-by: Andrew Martin <Andrew.Martin@amd.com>
+Reviewed-by: Philip Yang <Philip.Yang@amd.com>
+Signed-off-by: Andrew Martin <Andrew.Martin@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 049e5bf3c8406f87c3d8e1958e0a16804fa1d530)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+@@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd
+ /* EOP buffer is not required for all ASICs */
+ if (properties->eop_ring_buffer_address) {
+ if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
+- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
+- properties->eop_buf_bo->tbo.base.size,
++ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
++ properties->eop_ring_buffer_size,
+ topo_dev->node_props.eop_buffer_size);
+ err = -EINVAL;
+ goto out_err_unreserve;
--- /dev/null
+From df1a1ed5e1bdd9cc13148e0e5549f5ebcf76cf13 Mon Sep 17 00:00:00 2001
+From: Brendan King <Brendan.King@imgtec.com>
+Date: Wed, 26 Feb 2025 15:42:19 +0000
+Subject: drm/imagination: avoid deadlock on fence release
+
+From: Brendan King <Brendan.King@imgtec.com>
+
+commit df1a1ed5e1bdd9cc13148e0e5549f5ebcf76cf13 upstream.
+
+Do scheduler queue fence release processing on a workqueue, rather
+than in the release function itself.
+
+Fixes deadlock issues such as the following:
+
+[ 607.400437] ============================================
+[ 607.405755] WARNING: possible recursive locking detected
+[ 607.415500] --------------------------------------------
+[ 607.420817] weston:zfq0/24149 is trying to acquire lock:
+[ 607.426131] ffff000017d041a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: pvr_gem_object_vunmap+0x40/0xc0 [powervr]
+[ 607.436728]
+ but task is already holding lock:
+[ 607.442554] ffff000017d105a0 (reservation_ww_class_mutex){+.+.}-{3:3}, at: dma_buf_ioctl+0x250/0x554
+[ 607.451727]
+ other info that might help us debug this:
+[ 607.458245] Possible unsafe locking scenario:
+
+[ 607.464155] CPU0
+[ 607.466601] ----
+[ 607.469044] lock(reservation_ww_class_mutex);
+[ 607.473584] lock(reservation_ww_class_mutex);
+[ 607.478114]
+ *** DEADLOCK ***
+
+Cc: stable@vger.kernel.org
+Fixes: eaf01ee5ba28 ("drm/imagination: Implement job submission and scheduling")
+Signed-off-by: Brendan King <brendan.king@imgtec.com>
+Reviewed-by: Matt Coster <matt.coster@imgtec.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250226-fence-release-deadlock-v2-1-6fed2fc1fe88@imgtec.com
+Signed-off-by: Matt Coster <matt.coster@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imagination/pvr_queue.c | 13 +++++++++++--
+ drivers/gpu/drm/imagination/pvr_queue.h | 4 ++++
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/imagination/pvr_queue.c
++++ b/drivers/gpu/drm/imagination/pvr_queue.c
+@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct d
+ return PVR_DRIVER_NAME;
+ }
+
++static void pvr_queue_fence_release_work(struct work_struct *w)
++{
++ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
++
++ pvr_context_put(fence->queue->ctx);
++ dma_fence_free(&fence->base);
++}
++
+ static void pvr_queue_fence_release(struct dma_fence *f)
+ {
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
++ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
+
+- pvr_context_put(fence->queue->ctx);
+- dma_fence_free(f);
++ queue_work(pvr_dev->sched_wq, &fence->release_work);
+ }
+
+ static const char *
+@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f
+
+ pvr_context_get(queue->ctx);
+ fence->queue = queue;
++ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
+ dma_fence_init(&fence->base, fence_ops,
+ &fence_ctx->lock, fence_ctx->id,
+ atomic_inc_return(&fence_ctx->seqno));
+--- a/drivers/gpu/drm/imagination/pvr_queue.h
++++ b/drivers/gpu/drm/imagination/pvr_queue.h
+@@ -5,6 +5,7 @@
+ #define PVR_QUEUE_H
+
+ #include <drm/gpu_scheduler.h>
++#include <linux/workqueue.h>
+
+ #include "pvr_cccb.h"
+ #include "pvr_device.h"
+@@ -63,6 +64,9 @@ struct pvr_queue_fence {
+
+ /** @queue: Queue that created this fence. */
+ struct pvr_queue *queue;
++
++ /** @release_work: Fence release work structure. */
++ struct work_struct release_work;
+ };
+
+ /**
--- /dev/null
+From a5c4c3ba95a52d66315acdfbaba9bd82ed39c250 Mon Sep 17 00:00:00 2001
+From: Brendan King <Brendan.King@imgtec.com>
+Date: Wed, 26 Feb 2025 15:43:06 +0000
+Subject: drm/imagination: Hold drm_gem_gpuva lock for unmap
+
+From: Brendan King <Brendan.King@imgtec.com>
+
+commit a5c4c3ba95a52d66315acdfbaba9bd82ed39c250 upstream.
+
+Avoid a warning from drm_gem_gpuva_assert_lock_held in drm_gpuva_unlink.
+
+The Imagination driver uses the GEM object reservation lock to protect
+the gpuva list, but the GEM object was not always known in the code
+paths that ended up calling drm_gpuva_unlink. When the GEM object isn't
+known, it is found by calling drm_gpuva_find to lookup the object
+associated with a given virtual address range, or by calling
+drm_gpuva_find_first when removing all mappings.
+
+Cc: stable@vger.kernel.org
+Fixes: 4bc736f890ce ("drm/imagination: vm: make use of GPUVM's drm_exec helper")
+Signed-off-by: Brendan King <brendan.king@imgtec.com>
+Reviewed-by: Matt Coster <matt.coster@imgtec.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250226-hold-drm_gem_gpuva-lock-for-unmap-v2-1-3fdacded227f@imgtec.com
+Signed-off-by: Matt Coster <matt.coster@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imagination/pvr_fw_meta.c | 6 -
+ drivers/gpu/drm/imagination/pvr_vm.c | 134 ++++++++++++++++++++++++------
+ drivers/gpu/drm/imagination/pvr_vm.h | 3
+ 3 files changed, 115 insertions(+), 28 deletions(-)
+
+--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
++++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
+@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_d
+ static void
+ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+ {
+- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
+- fw_obj->fw_mm_node.size);
++ struct pvr_gem_object *pvr_obj = fw_obj->gem;
++
++ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
++ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+ }
+
+ static bool
+--- a/drivers/gpu/drm/imagination/pvr_vm.c
++++ b/drivers/gpu/drm/imagination/pvr_vm.c
+@@ -293,8 +293,9 @@ err_bind_op_fini:
+
+ static int
+ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
+- struct pvr_vm_context *vm_ctx, u64 device_addr,
+- u64 size)
++ struct pvr_vm_context *vm_ctx,
++ struct pvr_gem_object *pvr_obj,
++ u64 device_addr, u64 size)
+ {
+ int err;
+
+@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_
+ goto err_bind_op_fini;
+ }
+
++ bind_op->pvr_obj = pvr_obj;
+ bind_op->vm_ctx = vm_ctx;
+ bind_op->device_addr = device_addr;
+ bind_op->size = size;
+@@ -598,20 +600,6 @@ err_free:
+ }
+
+ /**
+- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+- * @vm_ctx: Target VM context.
+- *
+- * This function ensures that no mappings are left dangling by unmapping them
+- * all in order of ascending device-virtual address.
+- */
+-void
+-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+-{
+- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+- vm_ctx->gpuvm_mgr.mm_range));
+-}
+-
+-/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec
+ struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+ struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+
+- /* Unmap operations don't have an object to lock. */
+- if (!pvr_obj)
+- return 0;
+-
+- /* Acquire lock on the GEM being mapped. */
++ /* Acquire lock on the GEM object being mapped/unmapped. */
+ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+ }
+
+@@ -772,8 +756,10 @@ err_cleanup:
+ }
+
+ /**
+- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
++ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
++ * memory.
+ * @vm_ctx: Target VM context.
++ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+@@ -784,9 +770,13 @@ err_cleanup:
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_unmap or
+ * pvr_vm_gpuva_remap).
++ *
++ * The vm_ctx->lock must be held when calling this function.
+ */
+-int
+-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
++static int
++pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
++ struct pvr_gem_object *pvr_obj,
++ u64 device_addr, u64 size)
+ {
+ struct pvr_vm_bind_op bind_op = {0};
+ struct drm_gpuvm_exec vm_exec = {
+@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_c
+ },
+ };
+
+- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
+- size);
++ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
++ device_addr, size);
+ if (err)
+ return err;
+
++ pvr_gem_object_get(pvr_obj);
++
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto err_cleanup;
+@@ -818,6 +810,96 @@ err_cleanup:
+ return err;
+ }
+
++/**
++ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
++ * memory.
++ * @vm_ctx: Target VM context.
++ * @pvr_obj: Target PowerVR memory object.
++ * @device_addr: Virtual device address at the start of the target mapping.
++ * @size: Size of the target mapping.
++ *
++ * Return:
++ * * 0 on success,
++ * * Any error encountered by pvr_vm_unmap_obj_locked.
++ */
++int
++pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
++ u64 device_addr, u64 size)
++{
++ int err;
++
++ mutex_lock(&vm_ctx->lock);
++ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
++ mutex_unlock(&vm_ctx->lock);
++
++ return err;
++}
++
++/**
++ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
++ * @vm_ctx: Target VM context.
++ * @device_addr: Virtual device address at the start of the target mapping.
++ * @size: Size of the target mapping.
++ *
++ * Return:
++ * * 0 on success,
++ * * Any error encountered by drm_gpuva_find,
++ * * Any error encountered by pvr_vm_unmap_obj_locked.
++ */
++int
++pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
++{
++ struct pvr_gem_object *pvr_obj;
++ struct drm_gpuva *va;
++ int err;
++
++ mutex_lock(&vm_ctx->lock);
++
++ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
++ if (va) {
++ pvr_obj = gem_to_pvr_gem(va->gem.obj);
++ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
++ va->va.addr, va->va.range);
++ } else {
++ err = -ENOENT;
++ }
++
++ mutex_unlock(&vm_ctx->lock);
++
++ return err;
++}
++
++/**
++ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
++ * @vm_ctx: Target VM context.
++ *
++ * This function ensures that no mappings are left dangling by unmapping them
++ * all in order of ascending device-virtual address.
++ */
++void
++pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
++{
++ mutex_lock(&vm_ctx->lock);
++
++ for (;;) {
++ struct pvr_gem_object *pvr_obj;
++ struct drm_gpuva *va;
++
++ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
++ vm_ctx->gpuvm_mgr.mm_start,
++ vm_ctx->gpuvm_mgr.mm_range);
++ if (!va)
++ break;
++
++ pvr_obj = gem_to_pvr_gem(va->gem.obj);
++
++ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
++ va->va.addr, va->va.range));
++ }
++
++ mutex_unlock(&vm_ctx->lock);
++}
++
+ /* Static data areas are determined by firmware. */
+ static const struct drm_pvr_static_data_area static_data_areas[] = {
+ {
+--- a/drivers/gpu/drm/imagination/pvr_vm.h
++++ b/drivers/gpu/drm/imagination/pvr_vm.h
+@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_con
+ int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ u64 device_addr, u64 size);
++int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
++ struct pvr_gem_object *pvr_obj,
++ u64 device_addr, u64 size);
+ int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+ void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
+
--- /dev/null
+From 68c3de7f707e8a70e0a6d8087cf0fe4a3d5dbfb0 Mon Sep 17 00:00:00 2001
+From: Brendan King <Brendan.King@imgtec.com>
+Date: Wed, 26 Feb 2025 15:43:54 +0000
+Subject: drm/imagination: only init job done fences once
+
+From: Brendan King <Brendan.King@imgtec.com>
+
+commit 68c3de7f707e8a70e0a6d8087cf0fe4a3d5dbfb0 upstream.
+
+Ensure job done fences are only initialised once.
+
+This fixes a memory manager not clean warning from drm_mm_takedown
+on module unload.
+
+Cc: stable@vger.kernel.org
+Fixes: eaf01ee5ba28 ("drm/imagination: Implement job submission and scheduling")
+Signed-off-by: Brendan King <brendan.king@imgtec.com>
+Reviewed-by: Matt Coster <matt.coster@imgtec.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250226-init-done-fences-once-v2-1-c1b2f556b329@imgtec.com
+Signed-off-by: Matt Coster <matt.coster@imgtec.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imagination/pvr_queue.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/imagination/pvr_queue.c
++++ b/drivers/gpu/drm/imagination/pvr_queue.c
+@@ -313,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fen
+ static void
+ pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+ {
+- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+- &queue->job_fence_ctx);
++ if (!fence->ops)
++ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
++ &queue->job_fence_ctx);
+ }
+
+ /**
--- /dev/null
+From 29ffeb73b216ce3eff10229eb077cf9b7812119d Mon Sep 17 00:00:00 2001
+From: Richard Thier <u9vata@gmail.com>
+Date: Mon, 17 Jun 2019 23:46:27 +0200
+Subject: drm/radeon: Fix rs400_gpu_init for ATI mobility radeon Xpress 200M
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Richard Thier <u9vata@gmail.com>
+
+commit 29ffeb73b216ce3eff10229eb077cf9b7812119d upstream.
+
+num_gb_pipes was set to a wrong value using r420_pipe_config
+
+This have lead to HyperZ glitches on fast Z clearing.
+
+Closes: https://bugs.freedesktop.org/show_bug.cgi?id=110897
+Reviewed-by: Marek Olšák <marek.olsak@amd.com>
+Signed-off-by: Richard Thier <u9vata@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 044e59a85c4d84e3c8d004c486e5c479640563a6)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/r300.c | 3 ++-
+ drivers/gpu/drm/radeon/radeon_asic.h | 1 +
+ drivers/gpu/drm/radeon/rs400.c | 18 ++++++++++++++++--
+ 3 files changed, 19 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_
+ return -1;
+ }
+
+-static void r300_gpu_init(struct radeon_device *rdev)
++/* rs400_gpu_init also calls this! */
++void r300_gpu_init(struct radeon_device *rdev)
+ {
+ uint32_t gb_tile_config, tmp;
+
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct rade
+ */
+ extern int r300_init(struct radeon_device *rdev);
+ extern void r300_fini(struct radeon_device *rdev);
++extern void r300_gpu_init(struct radeon_device *rdev);
+ extern int r300_suspend(struct radeon_device *rdev);
+ extern int r300_resume(struct radeon_device *rdev);
+ extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon
+
+ static void rs400_gpu_init(struct radeon_device *rdev)
+ {
+- /* FIXME: is this correct ? */
+- r420_pipes_init(rdev);
++ /* Earlier code was calling r420_pipes_init and then
++ * rs400_mc_wait_for_idle(rdev). The problem is that
++ * at least on my Mobility Radeon Xpress 200M RC410 card
++ * that ends up in this code path ends up num_gb_pipes == 3
++ * while the card seems to have only one pipe. With the
++ * r420 pipe initialization method.
++ *
++ * Problems shown up as HyperZ glitches, see:
++ * https://bugs.freedesktop.org/show_bug.cgi?id=110897
++ *
++ * Delegating initialization to r300 code seems to work
++ * and results in proper pipe numbers. The rs400 cards
++ * are said to be not r400, but r300 kind of cards.
++ */
++ r300_gpu_init(rdev);
++
+ if (rs400_mc_wait_for_idle(rdev)) {
+ pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
+ RREG32(RADEON_MC_STATUS));
--- /dev/null
+From ae482ec8cd1a85bde3307f71921a7780086fbec0 Mon Sep 17 00:00:00 2001
+From: Matthew Brost <matthew.brost@intel.com>
+Date: Fri, 28 Feb 2025 08:30:58 +0100
+Subject: drm/xe: Add staging tree for VM binds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+commit ae482ec8cd1a85bde3307f71921a7780086fbec0 upstream.
+
+Concurrent VM bind staging and zapping of PTEs from a userptr notifier
+do not work because the view of PTEs is not stable. VM binds cannot
+acquire the notifier lock during staging, as memory allocations are
+required. To resolve this race condition, use a staging tree for VM
+binds that is committed only under the userptr notifier lock during the
+final step of the bind. This ensures a consistent view of the PTEs in
+the userptr notifier.
+
+A follow up may only use staging for VM in fault mode as this is the
+only mode in which the above race exists.
+
+v3:
+ - Drop zap PTE change (Thomas)
+ - s/xe_pt_entry/xe_pt_entry_staging (Thomas)
+
+Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Fixes: e8babb280b5e ("drm/xe: Convert multiple bind ops into single job")
+Fixes: a708f6501c69 ("drm/xe: Update PT layer with better error handling")
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250228073058.59510-5-thomas.hellstrom@linux.intel.com
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+(cherry picked from commit 6f39b0c5ef0385eae586760d10b9767168037aa5)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_pt.c | 58 +++++++++++++++++++++++++++-------------
+ drivers/gpu/drm/xe/xe_pt_walk.c | 3 +-
+ drivers/gpu/drm/xe/xe_pt_walk.h | 4 ++
+ 3 files changed, 46 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -28,6 +28,8 @@ struct xe_pt_dir {
+ struct xe_pt pt;
+ /** @children: Array of page-table child nodes */
+ struct xe_ptw *children[XE_PDES];
++ /** @staging: Array of page-table staging nodes */
++ struct xe_ptw *staging[XE_PDES];
+ };
+
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+@@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(st
+ return container_of(pt, struct xe_pt_dir, pt);
+ }
+
+-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
++static struct xe_pt *
++xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
+ {
+- return container_of(pt_dir->children[index], struct xe_pt, base);
++ return container_of(pt_dir->staging[index], struct xe_pt, base);
+ }
+
+ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
+@@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm
+ }
+ pt->bo = bo;
+ pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
++ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
+
+ if (vm->xef)
+ xe_drm_client_add_bo(vm->xef->client, pt->bo);
+@@ -205,8 +209,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ for (i = 0; i < XE_PDES; i++) {
+- if (xe_pt_entry(pt_dir, i))
+- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
++ if (xe_pt_entry_staging(pt_dir, i))
++ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
+ deferred);
+ }
+ }
+@@ -375,8 +379,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bi
+ /* Continue building a non-connected subtree. */
+ struct iosys_map *map = &parent->bo->vmap;
+
+- if (unlikely(xe_child))
++ if (unlikely(xe_child)) {
+ parent->base.children[offset] = &xe_child->base;
++ parent->base.staging[offset] = &xe_child->base;
++ }
+
+ xe_pt_write(xe_walk->vm->xe, map, offset, pte);
+ parent->num_live++;
+@@ -613,6 +619,7 @@ xe_pt_stage_bind(struct xe_tile *tile, s
+ .ops = &xe_pt_stage_bind_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
++ .staging = true,
+ },
+ .vm = xe_vma_vm(vma),
+ .tile = tile,
+@@ -872,7 +879,7 @@ static void xe_pt_cancel_bind(struct xe_
+ }
+ }
+
+-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
++static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
+ {
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+@@ -884,6 +891,16 @@ static void xe_pt_commit_locks_assert(st
+ xe_vm_assert_held(vm);
+ }
+
++static void xe_pt_commit_locks_assert(struct xe_vma *vma)
++{
++ struct xe_vm *vm = xe_vma_vm(vma);
++
++ xe_pt_commit_prepare_locks_assert(vma);
++
++ if (xe_vma_is_userptr(vma))
++ lockdep_assert_held_read(&vm->userptr.notifier_lock);
++}
++
+ static void xe_pt_commit(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, struct llist_head *deferred)
+@@ -894,13 +911,17 @@ static void xe_pt_commit(struct xe_vma *
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
++ struct xe_pt_dir *pt_dir;
+
+ if (!pt->level)
+ continue;
+
++ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
++ int j_ = j + entries[i].ofs;
+
++ pt_dir->children[j_] = pt_dir->staging[j_];
+ xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ }
+ }
+@@ -912,7 +933,7 @@ static void xe_pt_abort_bind(struct xe_v
+ {
+ int i, j;
+
+- xe_pt_commit_locks_assert(vma);
++ xe_pt_commit_prepare_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_pt *pt = entries[i].pt;
+@@ -927,10 +948,10 @@ static void xe_pt_abort_bind(struct xe_v
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
++ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
++ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
+ xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
+ }
+ }
+@@ -942,7 +963,7 @@ static void xe_pt_commit_prepare_bind(st
+ {
+ u32 i, j;
+
+- xe_pt_commit_locks_assert(vma);
++ xe_pt_commit_prepare_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+@@ -960,10 +981,10 @@ static void xe_pt_commit_prepare_bind(st
+ struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ struct xe_pt *oldpte = NULL;
+
+- if (xe_pt_entry(pt_dir, j_))
+- oldpte = xe_pt_entry(pt_dir, j_);
++ if (xe_pt_entry_staging(pt_dir, j_))
++ oldpte = xe_pt_entry_staging(pt_dir, j_);
+
+- pt_dir->children[j_] = &newpte->base;
++ pt_dir->staging[j_] = &newpte->base;
+ entries[i].pt_entries[j].pt = oldpte;
+ }
+ }
+@@ -1513,6 +1534,7 @@ static unsigned int xe_pt_stage_unbind(s
+ .ops = &xe_pt_stage_unbind_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
++ .staging = true,
+ },
+ .tile = tile,
+ .modified_start = xe_vma_start(vma),
+@@ -1554,7 +1576,7 @@ static void xe_pt_abort_unbind(struct xe
+ {
+ int i, j;
+
+- xe_pt_commit_locks_assert(vma);
++ xe_pt_commit_prepare_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+@@ -1567,7 +1589,7 @@ static void xe_pt_abort_unbind(struct xe
+ continue;
+
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+- pt_dir->children[j] =
++ pt_dir->staging[j] =
+ entries[i].pt_entries[j - entry->ofs].pt ?
+ &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ }
+@@ -1580,7 +1602,7 @@ xe_pt_commit_prepare_unbind(struct xe_vm
+ {
+ int i, j;
+
+- xe_pt_commit_locks_assert(vma);
++ xe_pt_commit_prepare_locks_assert(vma);
+
+ for (i = 0; i < num_entries; ++i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+@@ -1594,8 +1616,8 @@ xe_pt_commit_prepare_unbind(struct xe_vm
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ entry->pt_entries[j - entry->ofs].pt =
+- xe_pt_entry(pt_dir, j);
+- pt_dir->children[j] = NULL;
++ xe_pt_entry_staging(pt_dir, j);
++ pt_dir->staging[j] = NULL;
+ }
+ }
+ }
+--- a/drivers/gpu/drm/xe/xe_pt_walk.c
++++ b/drivers/gpu/drm/xe/xe_pt_walk.c
+@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *pare
+ u64 addr, u64 end, struct xe_pt_walk *walk)
+ {
+ pgoff_t offset = xe_pt_offset(addr, level, walk);
+- struct xe_ptw **entries = parent->children ? parent->children : NULL;
++ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
++ (parent->children ?: NULL);
+ const struct xe_pt_walk_ops *ops = walk->ops;
+ enum page_walk_action action;
+ struct xe_ptw *child;
+--- a/drivers/gpu/drm/xe/xe_pt_walk.h
++++ b/drivers/gpu/drm/xe/xe_pt_walk.h
+@@ -11,12 +11,14 @@
+ /**
+ * struct xe_ptw - base class for driver pagetable subclassing.
+ * @children: Pointer to an array of children if any.
++ * @staging: Pointer to an array of staging if any.
+ *
+ * Drivers could subclass this, and if it's a page-directory, typically
+ * embed an array of xe_ptw pointers.
+ */
+ struct xe_ptw {
+ struct xe_ptw **children;
++ struct xe_ptw **staging;
+ };
+
+ /**
+@@ -41,6 +43,8 @@ struct xe_pt_walk {
+ * as shared pagetables.
+ */
+ bool shared_pt_mode;
++ /** @staging: Walk staging PT structure */
++ bool staging;
+ };
+
+ /**
--- /dev/null
+From 84211b1c0db6b9dbe0020fa97192fb9661617f24 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Fri, 28 Feb 2025 08:30:57 +0100
+Subject: drm/xe: Fix fault mode invalidation with unbind
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 84211b1c0db6b9dbe0020fa97192fb9661617f24 upstream.
+
+Fix fault mode invalidation racing with unbind leading to the
+PTE zapping potentially traversing an invalid page-table tree.
+Do this by holding the notifier lock across PTE zapping. This
+might transfer any contention waiting on the notifier seqlock
+read side to the notifier lock read side, but that shouldn't be
+a major problem.
+
+At the same time get rid of the open-coded invalidation in the bind
+code by relying on the notifier even when the vma bind is not
+yet committed.
+
+Finally let userptr invalidation call a dedicated xe_vm function
+performing a full invalidation.
+
+Fixes: e8babb280b5e ("drm/xe: Convert multiple bind ops into single job")
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v6.12+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250228073058.59510-4-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 100a5b8dadfca50d91d9a4c9fc01431b42a25cab)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_pt.c | 38 ++++-------------
+ drivers/gpu/drm/xe/xe_vm.c | 85 +++++++++++++++++++++++++--------------
+ drivers/gpu/drm/xe/xe_vm.h | 8 +++
+ drivers/gpu/drm/xe/xe_vm_types.h | 4 -
+ 4 files changed, 75 insertions(+), 60 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_pt.c
++++ b/drivers/gpu/drm/xe/xe_pt.c
+@@ -1233,42 +1233,22 @@ static int vma_check_userptr(struct xe_v
+ return 0;
+
+ uvma = to_userptr_vma(vma);
+- notifier_seq = uvma->userptr.notifier_seq;
++ if (xe_pt_userptr_inject_eagain(uvma))
++ xe_vma_userptr_force_invalidate(uvma);
+
+- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+- return 0;
++ notifier_seq = uvma->userptr.notifier_seq;
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+- notifier_seq) &&
+- !xe_pt_userptr_inject_eagain(uvma))
++ notifier_seq))
+ return 0;
+
+- if (xe_vm_in_fault_mode(vm)) {
++ if (xe_vm_in_fault_mode(vm))
+ return -EAGAIN;
+- } else {
+- spin_lock(&vm->userptr.invalidated_lock);
+- list_move_tail(&uvma->userptr.invalidate_link,
+- &vm->userptr.invalidated);
+- spin_unlock(&vm->userptr.invalidated_lock);
+-
+- if (xe_vm_in_preempt_fence_mode(vm)) {
+- struct dma_resv_iter cursor;
+- struct dma_fence *fence;
+- long err;
+-
+- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+- DMA_RESV_USAGE_BOOKKEEP);
+- dma_resv_for_each_fence_unlocked(&cursor, fence)
+- dma_fence_enable_sw_signaling(fence);
+- dma_resv_iter_end(&cursor);
+-
+- err = dma_resv_wait_timeout(xe_vm_resv(vm),
+- DMA_RESV_USAGE_BOOKKEEP,
+- false, MAX_SCHEDULE_TIMEOUT);
+- XE_WARN_ON(err <= 0);
+- }
+- }
+
++ /*
++ * Just continue the operation since exec or rebind worker
++ * will take care of rebinding.
++ */
+ return 0;
+ }
+
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -580,51 +580,26 @@ out_unlock_outer:
+ trace_xe_vm_rebind_worker_exit(vm);
+ }
+
+-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+- const struct mmu_notifier_range *range,
+- unsigned long cur_seq)
++static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
+ {
+- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
++ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+- struct xe_vm *vm = xe_vma_vm(vma);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+- xe_assert(vm->xe, xe_vma_is_userptr(vma));
+- trace_xe_vma_userptr_invalidate(vma);
+-
+- if (!mmu_notifier_range_blockable(range))
+- return false;
+-
+- vm_dbg(&xe_vma_vm(vma)->xe->drm,
+- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+- xe_vma_start(vma), xe_vma_size(vma));
+-
+- down_write(&vm->userptr.notifier_lock);
+- mmu_interval_set_seq(mni, cur_seq);
+-
+- /* No need to stop gpu access if the userptr is not yet bound. */
+- if (!userptr->initial_bind) {
+- up_write(&vm->userptr.notifier_lock);
+- return true;
+- }
+-
+ /*
+ * Tell exec and rebind worker they need to repin and rebind this
+ * userptr.
+ */
+ if (!xe_vm_in_fault_mode(vm) &&
+- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
++ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&userptr->invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+
+- up_write(&vm->userptr.notifier_lock);
+-
+ /*
+ * Preempt fences turn into schedule disables, pipeline these.
+ * Note that even in fault mode, we need to wait for binds and
+@@ -642,11 +617,35 @@ static bool vma_userptr_invalidate(struc
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+- if (xe_vm_in_fault_mode(vm)) {
++ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
++}
++
++static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
++ const struct mmu_notifier_range *range,
++ unsigned long cur_seq)
++{
++ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
++ struct xe_vma *vma = &uvma->vma;
++ struct xe_vm *vm = xe_vma_vm(vma);
++
++ xe_assert(vm->xe, xe_vma_is_userptr(vma));
++ trace_xe_vma_userptr_invalidate(vma);
+
++ if (!mmu_notifier_range_blockable(range))
++ return false;
++
++ vm_dbg(&xe_vma_vm(vma)->xe->drm,
++ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
++ xe_vma_start(vma), xe_vma_size(vma));
++
++ down_write(&vm->userptr.notifier_lock);
++ mmu_interval_set_seq(mni, cur_seq);
++
++ __vma_userptr_invalidate(vm, uvma);
++ up_write(&vm->userptr.notifier_lock);
+ trace_xe_vma_userptr_invalidate_complete(vma);
+
+ return true;
+@@ -656,6 +655,34 @@ static const struct mmu_interval_notifie
+ .invalidate = vma_userptr_invalidate,
+ };
+
++#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
++/**
++ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
++ * @uvma: The userptr vma to invalidate
++ *
++ * Perform a forced userptr invalidation for testing purposes.
++ */
++void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
++{
++ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
++
++ /* Protect against concurrent userptr pinning */
++ lockdep_assert_held(&vm->lock);
++ /* Protect against concurrent notifiers */
++ lockdep_assert_held(&vm->userptr.notifier_lock);
++ /*
++ * Protect against concurrent instances of this function and
++ * the critical exec sections
++ */
++ xe_vm_assert_held(vm);
++
++ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
++ uvma->userptr.notifier_seq))
++ uvma->userptr.notifier_seq -= 2;
++ __vma_userptr_invalidate(vm, uvma);
++}
++#endif
++
+ int xe_vm_userptr_pin(struct xe_vm *vm)
+ {
+ struct xe_userptr_vma *uvma, *next;
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -280,4 +280,12 @@ struct xe_vm_snapshot *xe_vm_snapshot_ca
+ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
+ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
+ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
++
++#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
++void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
++#else
++static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
++{
++}
++#endif
+ #endif
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -227,8 +227,8 @@ struct xe_vm {
+ * up for revalidation. Protected from access with the
+ * @invalidated_lock. Removing items from the list
+ * additionally requires @lock in write mode, and adding
+- * items to the list requires the @userptr.notifer_lock in
+- * write mode.
++ * items to the list requires either the @userptr.notifer_lock in
++ * write mode, OR @lock in write mode.
+ */
+ struct list_head invalidated;
+ } userptr;
--- /dev/null
+From 54f94dc7f6b4db45dbc23b4db3d20c7194e2c54f Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 27 Feb 2025 10:13:00 +0000
+Subject: drm/xe: Fix GT "for each engine" workarounds
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 54f94dc7f6b4db45dbc23b4db3d20c7194e2c54f upstream.
+
+Any rules using engine matching are currently broken due RTP processing
+happening too in early init, before the list of hardware engines has been
+initialised.
+
+Fix this by moving workaround processing to later in the driver probe
+sequence, to just before the processed list is used for the first time.
+
+Looking at the debugfs gt0/workarounds on ADL-P we notice 14011060649
+should be present while we see, before:
+
+ GT Workarounds
+ 14011059788
+ 14015795083
+
+And with the patch:
+
+ GT Workarounds
+ 14011060649
+ 14011059788
+ 14015795083
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: stable@vger.kernel.org # v6.11+
+Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250227101304.46660-2-tvrtko.ursulin@igalia.com
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit 25d434cef791e03cf40680f5441b576c639bfa84)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_gt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_gt.c
++++ b/drivers/gpu/drm/xe/xe_gt.c
+@@ -380,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt)
+ if (err)
+ return err;
+
+- xe_wa_process_gt(gt);
+ xe_wa_process_oob(gt);
+- xe_tuning_process_gt(gt);
+
+ xe_force_wake_init_gt(gt, gt_to_fw(gt));
+ spin_lock_init(>->global_invl_lock);
+@@ -474,6 +472,8 @@ static int all_fw_domain_init(struct xe_
+ }
+
+ xe_gt_mcr_set_implicit_defaults(gt);
++ xe_wa_process_gt(gt);
++ xe_tuning_process_gt(gt);
+ xe_reg_sr_apply_mmio(>->reg_sr, gt);
+
+ err = xe_gt_clock_init(gt);
--- /dev/null
+From 0a98219bcc961edd3388960576e4353e123b4a51 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 4 Mar 2025 18:33:41 +0100
+Subject: drm/xe/hmm: Don't dereference struct page pointers without notifier lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 0a98219bcc961edd3388960576e4353e123b4a51 upstream.
+
+The pnfs that we obtain from hmm_range_fault() point to pages that
+we don't have a reference on, and the guarantee that they are still
+in the cpu page-tables is that the notifier lock must be held and the
+notifier seqno is still valid.
+
+So while building the sg table and marking the pages accesses / dirty
+we need to hold this lock with a validated seqno.
+
+However, the lock is reclaim tainted which makes
+sg_alloc_table_from_pages_segment() unusable, since it internally
+allocates memory.
+
+Instead build the sg-table manually. For the non-iommu case
+this might lead to fewer coalesces, but if that's a problem it can
+be fixed up later in the resource cursor code. For the iommu case,
+the whole sg-table may still be coalesced to a single contigous
+device va region.
+
+This avoids marking pages that we don't own dirty and accessed, and
+it also avoid dereferencing struct pages that we don't own.
+
+v2:
+- Use assert to check whether hmm pfns are valid (Matthew Auld)
+- Take into account that large pages may cross range boundaries
+ (Matthew Auld)
+
+v3:
+- Don't unnecessarily check for a non-freed sg-table. (Matthew Auld)
+- Add a missing up_read() in an error path. (Matthew Auld)
+
+Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
+Cc: Oak Zeng <oak.zeng@intel.com>
+Cc: <stable@vger.kernel.org> # v6.10+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Acked-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-3-thomas.hellstrom@linux.intel.com
+(cherry picked from commit ea3e66d280ce2576664a862693d1da8fd324c317)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_hmm.c | 120 +++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 90 insertions(+), 30 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -42,6 +42,42 @@ static void xe_mark_range_accessed(struc
+ }
+ }
+
++static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
++ struct hmm_range *range, struct rw_semaphore *notifier_sem)
++{
++ unsigned long i, npages, hmm_pfn;
++ unsigned long num_chunks = 0;
++ int ret;
++
++ /* HMM docs says this is needed. */
++ ret = down_read_interruptible(notifier_sem);
++ if (ret)
++ return ret;
++
++ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
++ up_read(notifier_sem);
++ return -EAGAIN;
++ }
++
++ npages = xe_npages_in_range(range->start, range->end);
++ for (i = 0; i < npages;) {
++ unsigned long len;
++
++ hmm_pfn = range->hmm_pfns[i];
++ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
++
++ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
++
++ /* If order > 0 the page may extend beyond range->start */
++ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
++ i += len;
++ num_chunks++;
++ }
++ up_read(notifier_sem);
++
++ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
++}
++
+ /**
+ * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
+ * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+@@ -50,6 +86,7 @@ static void xe_mark_range_accessed(struc
+ * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+ * has the pfn numbers of pages that back up this hmm address range.
+ * @st: pointer to the sg table.
++ * @notifier_sem: The xe notifier lock.
+ * @write: whether we write to this range. This decides dma map direction
+ * for system pages. If write we map it bi-diretional; otherwise
+ * DMA_TO_DEVICE
+@@ -76,38 +113,41 @@ static void xe_mark_range_accessed(struc
+ * Returns 0 if successful; -ENOMEM if fails to allocate memory
+ */
+ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
+- struct sg_table *st, bool write)
++ struct sg_table *st,
++ struct rw_semaphore *notifier_sem,
++ bool write)
+ {
++ unsigned long npages = xe_npages_in_range(range->start, range->end);
+ struct device *dev = xe->drm.dev;
+- struct page **pages;
+- u64 i, npages;
+- int ret;
+-
+- npages = xe_npages_in_range(range->start, range->end);
+- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+- if (!pages)
+- return -ENOMEM;
+-
+- for (i = 0; i < npages; i++) {
+- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
+- xe_assert(xe, !is_device_private_page(pages[i]));
+- }
+-
+- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
+- xe_sg_segment_size(dev), GFP_KERNEL);
+- if (ret)
+- goto free_pages;
+-
+- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+- if (ret) {
+- sg_free_table(st);
+- st = NULL;
++ struct scatterlist *sgl;
++ struct page *page;
++ unsigned long i, j;
++
++ lockdep_assert_held(notifier_sem);
++
++ i = 0;
++ for_each_sg(st->sgl, sgl, st->nents, j) {
++ unsigned long hmm_pfn, size;
++
++ hmm_pfn = range->hmm_pfns[i];
++ page = hmm_pfn_to_page(hmm_pfn);
++ xe_assert(xe, !is_device_private_page(page));
++
++ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
++ size -= page_to_pfn(page) & (size - 1);
++ i += size;
++
++ if (unlikely(j == st->nents - 1)) {
++ if (i > npages)
++ size -= (i - npages);
++ sg_mark_end(sgl);
++ }
++ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
+ }
++ xe_assert(xe, i == npages);
+
+-free_pages:
+- kvfree(pages);
+- return ret;
++ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
++ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+ }
+
+ /**
+@@ -235,16 +275,36 @@ int xe_hmm_userptr_populate_range(struct
+ if (ret)
+ goto free_pfns;
+
+- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
++ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
+ if (ret)
+ goto free_pfns;
+
++ ret = down_read_interruptible(&vm->userptr.notifier_lock);
++ if (ret)
++ goto free_st;
++
++ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
++ ret = -EAGAIN;
++ goto out_unlock;
++ }
++
++ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
++ &vm->userptr.notifier_lock, write);
++ if (ret)
++ goto out_unlock;
++
+ xe_mark_range_accessed(&hmm_range, write);
+ userptr->sg = &userptr->sgt;
+ userptr->notifier_seq = hmm_range.notifier_seq;
++ up_read(&vm->userptr.notifier_lock);
++ kvfree(pfns);
++ return 0;
+
++out_unlock:
++ up_read(&vm->userptr.notifier_lock);
++free_st:
++ sg_free_table(&userptr->sgt);
+ free_pfns:
+ kvfree(pfns);
+ return ret;
+ }
+-
--- /dev/null
+From e3e2e7fc4cd8414c9a966ef1b344db543f8614f4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 4 Mar 2025 18:33:40 +0100
+Subject: drm/xe/hmm: Style- and include fixes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit e3e2e7fc4cd8414c9a966ef1b344db543f8614f4 upstream.
+
+Add proper #ifndef around the xe_hmm.h header, proper spacing
+and since the documentation mostly follows kerneldoc format,
+make it kerneldoc. Also prepare for upcoming -stable fixes.
+
+Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
+Cc: Oak Zeng <oak.zeng@intel.com>
+Cc: <stable@vger.kernel.org> # v6.10+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Acked-by: Matthew Brost <Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit bbe2b06b55bc061c8fcec034ed26e88287f39143)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_hmm.c | 9 +++------
+ drivers/gpu/drm/xe/xe_hmm.h | 5 +++++
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned l
+ return (end - start) >> PAGE_SHIFT;
+ }
+
+-/*
++/**
+ * xe_mark_range_accessed() - mark a range is accessed, so core mm
+ * have such information for memory eviction or write back to
+ * hard disk
+- *
+ * @range: the range to mark
+ * @write: if write to this range, we mark pages in this range
+ * as dirty
+@@ -43,11 +42,10 @@ static void xe_mark_range_accessed(struc
+ }
+ }
+
+-/*
++/**
+ * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
+ * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
+ * and will be used to program GPU page table later.
+- *
+ * @xe: the xe device who will access the dma-address in sg table
+ * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
+ * has the pfn numbers of pages that back up this hmm address range.
+@@ -112,9 +110,8 @@ free_pages:
+ return ret;
+ }
+
+-/*
++/**
+ * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
+- *
+ * @uvma: the userptr vma which hold the scatter gather table
+ *
+ * With function xe_userptr_populate_range, we allocate storage of
+--- a/drivers/gpu/drm/xe/xe_hmm.h
++++ b/drivers/gpu/drm/xe/xe_hmm.h
+@@ -3,9 +3,14 @@
+ * Copyright © 2024 Intel Corporation
+ */
+
++#ifndef _XE_HMM_H_
++#define _XE_HMM_H_
++
+ #include <linux/types.h>
+
+ struct xe_userptr_vma;
+
+ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
++
+ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
++#endif
--- /dev/null
+From 475d06e00b7496c7915d87f7ae67af26738e4649 Mon Sep 17 00:00:00 2001
+From: Matthew Auld <matthew.auld@intel.com>
+Date: Wed, 26 Feb 2025 17:47:49 +0000
+Subject: drm/xe/userptr: properly setup pfn_flags_mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+commit 475d06e00b7496c7915d87f7ae67af26738e4649 upstream.
+
+Currently we just leave it uninitialised, which at first looks harmless,
+however we also don't zero out the pfn array, and with pfn_flags_mask
+the idea is to be able set individual flags for a given range of pfn or
+completely ignore them, outside of default_flags. So here we end up with
+pfn[i] & pfn_flags_mask, and if both are uninitialised we might get back
+an unexpected flags value, like asking for read only with default_flags,
+but getting back write on top, leading to potentially bogus behaviour.
+
+To fix this ensure we zero the pfn_flags_mask, such that hmm only
+considers the default_flags and not also the initial pfn[i] value.
+
+v2 (Thomas):
+ - Prefer proper initializer.
+
+Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@intel.com>
+Cc: <stable@vger.kernel.org> # v6.10+
+Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250226174748.294285-2-matthew.auld@intel.com
+(cherry picked from commit dd8c01e42f4c5c1eaf02f003d7d588ba6706aa71)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_hmm.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -203,13 +203,20 @@ int xe_hmm_userptr_populate_range(struct
+ {
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
++ unsigned long *pfns;
+ struct xe_userptr *userptr;
+ struct xe_vma *vma = &uvma->vma;
+ u64 userptr_start = xe_vma_userptr(vma);
+ u64 userptr_end = userptr_start + xe_vma_size(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+- struct hmm_range hmm_range;
++ struct hmm_range hmm_range = {
++ .pfn_flags_mask = 0, /* ignore pfns */
++ .default_flags = HMM_PFN_REQ_FAULT,
++ .start = userptr_start,
++ .end = userptr_end,
++ .notifier = &uvma->userptr.notifier,
++ .dev_private_owner = vm->xe,
++ };
+ bool write = !xe_vma_read_only(vma);
+ unsigned long notifier_seq;
+ u64 npages;
+@@ -236,19 +243,14 @@ int xe_hmm_userptr_populate_range(struct
+ return -ENOMEM;
+
+ if (write)
+- flags |= HMM_PFN_REQ_WRITE;
++ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
+
+ if (!mmget_not_zero(userptr->notifier.mm)) {
+ ret = -EFAULT;
+ goto free_pfns;
+ }
+
+- hmm_range.default_flags = flags;
+ hmm_range.hmm_pfns = pfns;
+- hmm_range.notifier = &userptr->notifier;
+- hmm_range.start = userptr_start;
+- hmm_range.end = userptr_end;
+- hmm_range.dev_private_owner = vm->xe;
+
+ while (true) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
--- /dev/null
+From 333b8906336174478efbbfc1e24a89e3397ffe65 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 4 Mar 2025 18:33:42 +0100
+Subject: drm/xe/userptr: Unmap userptrs in the mmu notifier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 333b8906336174478efbbfc1e24a89e3397ffe65 upstream.
+
+If userptr pages are freed after a call to the xe mmu notifier,
+the device will not be blocked out from theoretically accessing
+these pages unless they are also unmapped from the iommu, and
+this violates some aspects of the iommu-imposed security.
+
+Ensure that userptrs are unmapped in the mmu notifier to
+mitigate this. A naive attempt would try to free the sg table, but
+the sg table itself may be accessed by a concurrent bind
+operation, so settle for only unmapping.
+
+v3:
+- Update lockdep asserts.
+- Fix a typo (Matthew Auld)
+
+Fixes: 81e058a3e7fd ("drm/xe: Introduce helper to populate userptr")
+Cc: Oak Zeng <oak.zeng@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v6.10+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Acked-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com
+(cherry picked from commit ba767b9d01a2c552d76cf6f46b125d50ec4147a6)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_hmm.c | 51 ++++++++++++++++++++++++++++++++-------
+ drivers/gpu/drm/xe/xe_hmm.h | 2 +
+ drivers/gpu/drm/xe/xe_vm.c | 4 +++
+ drivers/gpu/drm/xe/xe_vm_types.h | 4 +++
+ 4 files changed, 52 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_hmm.c
++++ b/drivers/gpu/drm/xe/xe_hmm.c
+@@ -150,6 +150,45 @@ static int xe_build_sg(struct xe_device
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+ }
+
++static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
++{
++ struct xe_userptr *userptr = &uvma->userptr;
++ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
++
++ lockdep_assert_held_write(&vm->lock);
++ lockdep_assert_held(&vm->userptr.notifier_lock);
++
++ mutex_lock(&userptr->unmap_mutex);
++ xe_assert(vm->xe, !userptr->mapped);
++ userptr->mapped = true;
++ mutex_unlock(&userptr->unmap_mutex);
++}
++
++void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
++{
++ struct xe_userptr *userptr = &uvma->userptr;
++ struct xe_vma *vma = &uvma->vma;
++ bool write = !xe_vma_read_only(vma);
++ struct xe_vm *vm = xe_vma_vm(vma);
++ struct xe_device *xe = vm->xe;
++
++ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
++ !lockdep_is_held_type(&vm->lock, 0) &&
++ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
++ /* Don't unmap in exec critical section. */
++ xe_vm_assert_held(vm);
++ /* Don't unmap while mapping the sg. */
++ lockdep_assert_held(&vm->lock);
++ }
++
++ mutex_lock(&userptr->unmap_mutex);
++ if (userptr->sg && userptr->mapped)
++ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
++ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
++ userptr->mapped = false;
++ mutex_unlock(&userptr->unmap_mutex);
++}
++
+ /**
+ * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
+ * @uvma: the userptr vma which hold the scatter gather table
+@@ -161,16 +200,9 @@ static int xe_build_sg(struct xe_device
+ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
+ {
+ struct xe_userptr *userptr = &uvma->userptr;
+- struct xe_vma *vma = &uvma->vma;
+- bool write = !xe_vma_read_only(vma);
+- struct xe_vm *vm = xe_vma_vm(vma);
+- struct xe_device *xe = vm->xe;
+- struct device *dev = xe->drm.dev;
+-
+- xe_assert(xe, userptr->sg);
+- dma_unmap_sgtable(dev, userptr->sg,
+- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+
++ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
++ xe_hmm_userptr_unmap(uvma);
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+ }
+@@ -297,6 +329,7 @@ int xe_hmm_userptr_populate_range(struct
+
+ xe_mark_range_accessed(&hmm_range, write);
+ userptr->sg = &userptr->sgt;
++ xe_hmm_userptr_set_mapped(uvma);
+ userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+--- a/drivers/gpu/drm/xe/xe_hmm.h
++++ b/drivers/gpu/drm/xe/xe_hmm.h
+@@ -13,4 +13,6 @@ struct xe_userptr_vma;
+ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
+ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
++
++void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+ #endif
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -621,6 +621,8 @@ static void __vma_userptr_invalidate(str
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
++
++ xe_hmm_userptr_unmap(uvma);
+ }
+
+ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+@@ -1039,6 +1041,7 @@ static struct xe_vma *xe_vma_create(stru
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
+ vma->gpuva.gem.offset = bo_offset_or_userptr;
++ mutex_init(&userptr->unmap_mutex);
+
+ err = mmu_interval_notifier_insert(&userptr->notifier,
+ current->mm,
+@@ -1080,6 +1083,7 @@ static void xe_vma_destroy_late(struct x
+ * them anymore
+ */
+ mmu_interval_notifier_remove(&userptr->notifier);
++ mutex_destroy(&userptr->unmap_mutex);
+ xe_vm_put(vm);
+ } else if (xe_vma_is_null(vma)) {
+ xe_vm_put(vm);
+--- a/drivers/gpu/drm/xe/xe_vm_types.h
++++ b/drivers/gpu/drm/xe/xe_vm_types.h
+@@ -59,12 +59,16 @@ struct xe_userptr {
+ struct sg_table *sg;
+ /** @notifier_seq: notifier sequence number */
+ unsigned long notifier_seq;
++ /** @unmap_mutex: Mutex protecting dma-unmapping */
++ struct mutex unmap_mutex;
+ /**
+ * @initial_bind: user pointer has been bound at least once.
+ * write: vm->userptr.notifier_lock in read mode and vm->resv held.
+ * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ */
+ bool initial_bind;
++ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
++ bool mapped;
+ #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ u32 divisor;
+ #endif
--- /dev/null
+From 1414d95d5805b1dc221d22db9b8dc5287ef083bc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Fri, 28 Feb 2025 08:30:56 +0100
+Subject: drm/xe/vm: Fix a misplaced #endif
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit 1414d95d5805b1dc221d22db9b8dc5287ef083bc upstream.
+
+Fix a (harmless) misplaced #endif leading to declarations
+appearing multiple times.
+
+Fixes: 0eb2a18a8fad ("drm/xe: Implement VM snapshot support for BO's and userptr")
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: José Roberto de Souza <jose.souza@intel.com>
+Cc: <stable@vger.kernel.org> # v6.12+
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Reviewed-by: Tejas Upadhyay <tejas.upadhyay@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250228073058.59510-3-thomas.hellstrom@linux.intel.com
+(cherry picked from commit fcc20a4c752214b3e25632021c57d7d1d71ee1dd)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_vm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -275,9 +275,9 @@ static inline void vm_dbg(const struct d
+ const char *format, ...)
+ { /* noop */ }
+ #endif
+-#endif
+
+ struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
+ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
+ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
+ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
++#endif
--- /dev/null
+From e775e2a060d99180edc5366fb9f4299d0f07b66c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Fri, 28 Feb 2025 08:30:55 +0100
+Subject: drm/xe/vm: Validate userptr during gpu vma prefetching
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit e775e2a060d99180edc5366fb9f4299d0f07b66c upstream.
+
+If a userptr vma subject to prefetching was already invalidated
+or invalidated during the prefetch operation, the operation would
+repeatedly return -EAGAIN which would typically cause an infinite
+loop.
+
+Validate the userptr to ensure this doesn't happen.
+
+v2:
+- Don't fallthrough from UNMAP to PREFETCH (Matthew Brost)
+
+Fixes: 5bd24e78829a ("drm/xe/vm: Subclass userptr vmas")
+Fixes: 617eebb9c480 ("drm/xe: Fix array of binds")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v6.9+
+Suggested-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250228073058.59510-2-thomas.hellstrom@linux.intel.com
+(cherry picked from commit 03c346d4d0d85d210d549d43c8cfb3dfb7f20e0a)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -2284,8 +2284,17 @@ static int vm_bind_ioctl_ops_parse(struc
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
++ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
++ break;
+ case DRM_GPUVA_OP_PREFETCH:
+- /* FIXME: Need to skip some prefetch ops */
++ vma = gpuva_to_vma(op->base.prefetch.va);
++
++ if (xe_vma_is_userptr(vma)) {
++ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
++ if (err)
++ return err;
++ }
++
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
+ default:
--- /dev/null
+From 12f65d1203507f7db3ba59930fe29a3b8eee9945 Mon Sep 17 00:00:00 2001
+From: Koichiro Den <koichiro.den@canonical.com>
+Date: Mon, 24 Feb 2025 23:31:26 +0900
+Subject: gpio: aggregator: protect driver attr handlers against module unload
+
+From: Koichiro Den <koichiro.den@canonical.com>
+
+commit 12f65d1203507f7db3ba59930fe29a3b8eee9945 upstream.
+
+Both new_device_store and delete_device_store touch module global
+resources (e.g. gpio_aggregator_lock). To prevent race conditions with
+module unload, a reference needs to be held.
+
+Add try_module_get() in these handlers.
+
+For new_device_store, this eliminates what appears to be the most dangerous
+scenario: if an id is allocated from gpio_aggregator_idr but
+platform_device_register has not yet been called or completed, a concurrent
+module unload could fail to unregister/delete the device, leaving behind a
+dangling platform device/GPIO forwarder. This can result in various issues.
+The following simple reproducer demonstrates these problems:
+
+ #!/bin/bash
+ while :; do
+ # note: whether 'gpiochip0 0' exists or not does not matter.
+ echo 'gpiochip0 0' > /sys/bus/platform/drivers/gpio-aggregator/new_device
+ done &
+ while :; do
+ modprobe gpio-aggregator
+ modprobe -r gpio-aggregator
+ done &
+ wait
+
+ Starting with the following warning, several kinds of warnings will appear
+ and the system may become unstable:
+
+ ------------[ cut here ]------------
+ list_del corruption, ffff888103e2e980->next is LIST_POISON1 (dead000000000100)
+ WARNING: CPU: 1 PID: 1327 at lib/list_debug.c:56 __list_del_entry_valid_or_report+0xa3/0x120
+ [...]
+ RIP: 0010:__list_del_entry_valid_or_report+0xa3/0x120
+ [...]
+ Call Trace:
+ <TASK>
+ ? __list_del_entry_valid_or_report+0xa3/0x120
+ ? __warn.cold+0x93/0xf2
+ ? __list_del_entry_valid_or_report+0xa3/0x120
+ ? report_bug+0xe6/0x170
+ ? __irq_work_queue_local+0x39/0xe0
+ ? handle_bug+0x58/0x90
+ ? exc_invalid_op+0x13/0x60
+ ? asm_exc_invalid_op+0x16/0x20
+ ? __list_del_entry_valid_or_report+0xa3/0x120
+ gpiod_remove_lookup_table+0x22/0x60
+ new_device_store+0x315/0x350 [gpio_aggregator]
+ kernfs_fop_write_iter+0x137/0x1f0
+ vfs_write+0x262/0x430
+ ksys_write+0x60/0xd0
+ do_syscall_64+0x6c/0x180
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+ </TASK>
+ ---[ end trace 0000000000000000 ]---
+
+Fixes: 828546e24280 ("gpio: Add GPIO Aggregator")
+Cc: stable@vger.kernel.org
+Signed-off-by: Koichiro Den <koichiro.den@canonical.com>
+Link: https://lore.kernel.org/r/20250224143134.3024598-2-koichiro.den@canonical.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-aggregator.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpio/gpio-aggregator.c
++++ b/drivers/gpio/gpio-aggregator.c
+@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct d
+ struct platform_device *pdev;
+ int res, id;
+
++ if (!try_module_get(THIS_MODULE))
++ return -ENOENT;
++
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
+- if (!aggr)
+- return -ENOMEM;
++ if (!aggr) {
++ res = -ENOMEM;
++ goto put_module;
++ }
+
+ memcpy(aggr->args, buf, count + 1);
+
+@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct d
+ }
+
+ aggr->pdev = pdev;
++ module_put(THIS_MODULE);
+ return count;
+
+ remove_table:
+@@ -175,6 +181,8 @@ free_table:
+ kfree(aggr->lookups);
+ free_ga:
+ kfree(aggr);
++put_module:
++ module_put(THIS_MODULE);
+ return res;
+ }
+
+@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struc
+ if (error)
+ return error;
+
++ if (!try_module_get(THIS_MODULE))
++ return -ENOENT;
++
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+- if (!aggr)
++ if (!aggr) {
++ module_put(THIS_MODULE);
+ return -ENOENT;
++ }
+
+ gpio_aggregator_free(aggr);
++ module_put(THIS_MODULE);
+ return count;
+ }
+ static DRIVER_ATTR_WO(delete_device);
--- /dev/null
+From f02c41f87cfe61440c18bf77d1ef0a884b9ee2b5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Niklas=20S=C3=B6derlund?=
+ <niklas.soderlund+renesas@ragnatech.se>
+Date: Tue, 21 Jan 2025 14:58:33 +0100
+Subject: gpio: rcar: Use raw_spinlock to protect register access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+
+commit f02c41f87cfe61440c18bf77d1ef0a884b9ee2b5 upstream.
+
+Use raw_spinlock in order to fix spurious messages about invalid context
+when spinlock debugging is enabled. The lock is only used to serialize
+register access.
+
+ [ 4.239592] =============================
+ [ 4.239595] [ BUG: Invalid wait context ]
+ [ 4.239599] 6.13.0-rc7-arm64-renesas-05496-gd088502a519f #35 Not tainted
+ [ 4.239603] -----------------------------
+ [ 4.239606] kworker/u8:5/76 is trying to lock:
+ [ 4.239609] ffff0000091898a0 (&p->lock){....}-{3:3}, at: gpio_rcar_config_interrupt_input_mode+0x34/0x164
+ [ 4.239641] other info that might help us debug this:
+ [ 4.239643] context-{5:5}
+ [ 4.239646] 5 locks held by kworker/u8:5/76:
+ [ 4.239651] #0: ffff0000080fb148 ((wq_completion)async){+.+.}-{0:0}, at: process_one_work+0x190/0x62c
+ [ 4.250180] OF: /soc/sound@ec500000/ports/port@0/endpoint: Read of boolean property 'frame-master' with a value.
+ [ 4.254094] #1: ffff80008299bd80 ((work_completion)(&entry->work)){+.+.}-{0:0}, at: process_one_work+0x1b8/0x62c
+ [ 4.254109] #2: ffff00000920c8f8
+ [ 4.258345] OF: /soc/sound@ec500000/ports/port@1/endpoint: Read of boolean property 'bitclock-master' with a value.
+ [ 4.264803] (&dev->mutex){....}-{4:4}, at: __device_attach_async_helper+0x3c/0xdc
+ [ 4.264820] #3: ffff00000a50ca40 (request_class#2){+.+.}-{4:4}, at: __setup_irq+0xa0/0x690
+ [ 4.264840] #4:
+ [ 4.268872] OF: /soc/sound@ec500000/ports/port@1/endpoint: Read of boolean property 'frame-master' with a value.
+ [ 4.273275] ffff00000a50c8c8 (lock_class){....}-{2:2}, at: __setup_irq+0xc4/0x690
+ [ 4.296130] renesas_sdhi_internal_dmac ee100000.mmc: mmc1 base at 0x00000000ee100000, max clock rate 200 MHz
+ [ 4.304082] stack backtrace:
+ [ 4.304086] CPU: 1 UID: 0 PID: 76 Comm: kworker/u8:5 Not tainted 6.13.0-rc7-arm64-renesas-05496-gd088502a519f #35
+ [ 4.304092] Hardware name: Renesas Salvator-X 2nd version board based on r8a77965 (DT)
+ [ 4.304097] Workqueue: async async_run_entry_fn
+ [ 4.304106] Call trace:
+ [ 4.304110] show_stack+0x14/0x20 (C)
+ [ 4.304122] dump_stack_lvl+0x6c/0x90
+ [ 4.304131] dump_stack+0x14/0x1c
+ [ 4.304138] __lock_acquire+0xdfc/0x1584
+ [ 4.426274] lock_acquire+0x1c4/0x33c
+ [ 4.429942] _raw_spin_lock_irqsave+0x5c/0x80
+ [ 4.434307] gpio_rcar_config_interrupt_input_mode+0x34/0x164
+ [ 4.440061] gpio_rcar_irq_set_type+0xd4/0xd8
+ [ 4.444422] __irq_set_trigger+0x5c/0x178
+ [ 4.448435] __setup_irq+0x2e4/0x690
+ [ 4.452012] request_threaded_irq+0xc4/0x190
+ [ 4.456285] devm_request_threaded_irq+0x7c/0xf4
+ [ 4.459398] ata1: link resume succeeded after 1 retries
+ [ 4.460902] mmc_gpiod_request_cd_irq+0x68/0xe0
+ [ 4.470660] mmc_start_host+0x50/0xac
+ [ 4.474327] mmc_add_host+0x80/0xe4
+ [ 4.477817] tmio_mmc_host_probe+0x2b0/0x440
+ [ 4.482094] renesas_sdhi_probe+0x488/0x6f4
+ [ 4.486281] renesas_sdhi_internal_dmac_probe+0x60/0x78
+ [ 4.491509] platform_probe+0x64/0xd8
+ [ 4.495178] really_probe+0xb8/0x2a8
+ [ 4.498756] __driver_probe_device+0x74/0x118
+ [ 4.503116] driver_probe_device+0x3c/0x154
+ [ 4.507303] __device_attach_driver+0xd4/0x160
+ [ 4.511750] bus_for_each_drv+0x84/0xe0
+ [ 4.515588] __device_attach_async_helper+0xb0/0xdc
+ [ 4.520470] async_run_entry_fn+0x30/0xd8
+ [ 4.524481] process_one_work+0x210/0x62c
+ [ 4.528494] worker_thread+0x1ac/0x340
+ [ 4.532245] kthread+0x10c/0x110
+ [ 4.535476] ret_from_fork+0x10/0x20
+
+Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250121135833.3769310-1-niklas.soderlund+renesas@ragnatech.se
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-rcar.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -40,7 +40,7 @@ struct gpio_rcar_info {
+
+ struct gpio_rcar_priv {
+ void __iomem *base;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct device *dev;
+ struct gpio_chip gpio_chip;
+ unsigned int irq_parent;
+@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_i
+ * "Setting Level-Sensitive Interrupt Input Mode"
+ */
+
+- spin_lock_irqsave(&p->lock, flags);
++ raw_spin_lock_irqsave(&p->lock, flags);
+
+ /* Configure positive or negative logic in POSNEG */
+ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
+@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_i
+ if (!level_trigger)
+ gpio_rcar_write(p, INTCLR, BIT(hwirq));
+
+- spin_unlock_irqrestore(&p->lock, flags);
++ raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+
+ static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
+@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_inp
+ * "Setting General Input Mode"
+ */
+
+- spin_lock_irqsave(&p->lock, flags);
++ raw_spin_lock_irqsave(&p->lock, flags);
+
+ /* Configure positive logic in POSNEG */
+ gpio_rcar_modify_bit(p, POSNEG, gpio, false);
+@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_inp
+ if (p->info.has_outdtsel && output)
+ gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
+
+- spin_unlock_irqrestore(&p->lock, flags);
++ raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+
+ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
+@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct
+ return 0;
+ }
+
+- spin_lock_irqsave(&p->lock, flags);
++ raw_spin_lock_irqsave(&p->lock, flags);
+ outputs = gpio_rcar_read(p, INOUTSEL);
+ m = outputs & bankmask;
+ if (m)
+@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct
+ m = ~outputs & bankmask;
+ if (m)
+ val |= gpio_rcar_read(p, INDT) & m;
+- spin_unlock_irqrestore(&p->lock, flags);
++ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ bits[0] = val;
+ return 0;
+@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_ch
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+ unsigned long flags;
+
+- spin_lock_irqsave(&p->lock, flags);
++ raw_spin_lock_irqsave(&p->lock, flags);
+ gpio_rcar_modify_bit(p, OUTDT, offset, value);
+- spin_unlock_irqrestore(&p->lock, flags);
++ raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+
+ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struc
+ if (!bankmask)
+ return;
+
+- spin_lock_irqsave(&p->lock, flags);
++ raw_spin_lock_irqsave(&p->lock, flags);
+ val = gpio_rcar_read(p, OUTDT);
+ val &= ~bankmask;
+ val |= (bankmask & bits[0]);
+ gpio_rcar_write(p, OUTDT, val);
+- spin_unlock_irqrestore(&p->lock, flags);
++ raw_spin_unlock_irqrestore(&p->lock, flags);
+ }
+
+ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
+@@ -505,7 +505,7 @@ static int gpio_rcar_probe(struct platfo
+ return -ENOMEM;
+
+ p->dev = dev;
+- spin_lock_init(&p->lock);
++ raw_spin_lock_init(&p->lock);
+
+ /* Get device configuration from DT node */
+ ret = gpio_rcar_parse_dt(p, &npins);
--- /dev/null
+From 2ff5baa9b5275e3acafdf7f2089f74cccb2f38d1 Mon Sep 17 00:00:00 2001
+From: Daniil Dulov <d.dulov@aladdin.ru>
+Date: Mon, 24 Feb 2025 20:30:30 +0300
+Subject: HID: appleir: Fix potential NULL dereference at raw event handle
+
+From: Daniil Dulov <d.dulov@aladdin.ru>
+
+commit 2ff5baa9b5275e3acafdf7f2089f74cccb2f38d1 upstream.
+
+Syzkaller reports a NULL pointer dereference issue in input_event().
+
+BUG: KASAN: null-ptr-deref in instrument_atomic_read include/linux/instrumented.h:68 [inline]
+BUG: KASAN: null-ptr-deref in _test_bit include/asm-generic/bitops/instrumented-non-atomic.h:141 [inline]
+BUG: KASAN: null-ptr-deref in is_event_supported drivers/input/input.c:67 [inline]
+BUG: KASAN: null-ptr-deref in input_event+0x42/0xa0 drivers/input/input.c:395
+Read of size 8 at addr 0000000000000028 by task syz-executor199/2949
+
+CPU: 0 UID: 0 PID: 2949 Comm: syz-executor199 Not tainted 6.13.0-rc4-syzkaller-00076-gf097a36ef88d #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/13/2024
+Call Trace:
+ <IRQ>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x116/0x1f0 lib/dump_stack.c:120
+ kasan_report+0xd9/0x110 mm/kasan/report.c:602
+ check_region_inline mm/kasan/generic.c:183 [inline]
+ kasan_check_range+0xef/0x1a0 mm/kasan/generic.c:189
+ instrument_atomic_read include/linux/instrumented.h:68 [inline]
+ _test_bit include/asm-generic/bitops/instrumented-non-atomic.h:141 [inline]
+ is_event_supported drivers/input/input.c:67 [inline]
+ input_event+0x42/0xa0 drivers/input/input.c:395
+ input_report_key include/linux/input.h:439 [inline]
+ key_down drivers/hid/hid-appleir.c:159 [inline]
+ appleir_raw_event+0x3e5/0x5e0 drivers/hid/hid-appleir.c:232
+ __hid_input_report.constprop.0+0x312/0x440 drivers/hid/hid-core.c:2111
+ hid_ctrl+0x49f/0x550 drivers/hid/usbhid/hid-core.c:484
+ __usb_hcd_giveback_urb+0x389/0x6e0 drivers/usb/core/hcd.c:1650
+ usb_hcd_giveback_urb+0x396/0x450 drivers/usb/core/hcd.c:1734
+ dummy_timer+0x17f7/0x3960 drivers/usb/gadget/udc/dummy_hcd.c:1993
+ __run_hrtimer kernel/time/hrtimer.c:1739 [inline]
+ __hrtimer_run_queues+0x20a/0xae0 kernel/time/hrtimer.c:1803
+ hrtimer_run_softirq+0x17d/0x350 kernel/time/hrtimer.c:1820
+ handle_softirqs+0x206/0x8d0 kernel/softirq.c:561
+ __do_softirq kernel/softirq.c:595 [inline]
+ invoke_softirq kernel/softirq.c:435 [inline]
+ __irq_exit_rcu+0xfa/0x160 kernel/softirq.c:662
+ irq_exit_rcu+0x9/0x30 kernel/softirq.c:678
+ instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1049 [inline]
+ sysvec_apic_timer_interrupt+0x90/0xb0 arch/x86/kernel/apic/apic.c:1049
+ </IRQ>
+ <TASK>
+ asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:702
+ __mod_timer+0x8f6/0xdc0 kernel/time/timer.c:1185
+ add_timer+0x62/0x90 kernel/time/timer.c:1295
+ schedule_timeout+0x11f/0x280 kernel/time/sleep_timeout.c:98
+ usbhid_wait_io+0x1c7/0x380 drivers/hid/usbhid/hid-core.c:645
+ usbhid_init_reports+0x19f/0x390 drivers/hid/usbhid/hid-core.c:784
+ hiddev_ioctl+0x1133/0x15b0 drivers/hid/usbhid/hiddev.c:794
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:906 [inline]
+ __se_sys_ioctl fs/ioctl.c:892 [inline]
+ __x64_sys_ioctl+0x190/0x200 fs/ioctl.c:892
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ </TASK>
+
+This happens due to the malformed report items sent by the emulated device
+which results in a report, that has no fields, being added to the report list.
+Due to this appleir_input_configured() is never called, hidinput_connect()
+fails which results in the HID_CLAIMED_INPUT flag is not being set. However,
+it does not make appleir_probe() fail and lets the event callback to be
+called without the associated input device.
+
+Thus, add a check for the HID_CLAIMED_INPUT flag and leave the event hook
+early if the driver didn't claim any input_dev for some reason. Moreover,
+some other hid drivers accessing input_dev in their event callbacks do have
+similar checks, too.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Fixes: 9a4a5574ce42 ("HID: appleir: add support for Apple ir devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniil Dulov <d.dulov@aladdin.ru>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/hid-appleir.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/hid-appleir.c
++++ b/drivers/hid/hid-appleir.c
+@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_
+ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
+ unsigned long flags;
+
+- if (len != 5)
++ if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
+ goto out;
+
+ if (!memcmp(data, keydown, sizeof(keydown))) {
--- /dev/null
+From 0c28e4d1e10d2aae608094620bb386e6fd73d55e Mon Sep 17 00:00:00 2001
+From: Stuart Hayhurst <stuart.a.hayhurst@gmail.com>
+Date: Thu, 13 Feb 2025 13:38:49 +0000
+Subject: HID: corsair-void: Update power supply values with a unified work handler
+
+From: Stuart Hayhurst <stuart.a.hayhurst@gmail.com>
+
+commit 0c28e4d1e10d2aae608094620bb386e6fd73d55e upstream.
+
+corsair_void_process_receiver can be called from an interrupt context,
+locking battery_mutex in it was causing a kernel panic.
+Fix it by moving the critical section into its own work, sharing this
+work with battery_add_work and battery_remove_work to remove the need
+for any locking
+
+Closes: https://bugzilla.suse.com/show_bug.cgi?id=1236843
+Fixes: 6ea2a6fd3872 ("HID: corsair-void: Add Corsair Void headset family driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Stuart Hayhurst <stuart.a.hayhurst@gmail.com>
+Reviewed-by: Jiri Slaby <jirislaby@kernel.org>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/hid-corsair-void.c | 83 ++++++++++++++++++----------------
+ 1 file changed, 43 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
+index 56e858066c3c..afbd67aa9719 100644
+--- a/drivers/hid/hid-corsair-void.c
++++ b/drivers/hid/hid-corsair-void.c
+@@ -71,11 +71,9 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/bitops.h>
+-#include <linux/cleanup.h>
+ #include <linux/device.h>
+ #include <linux/hid.h>
+ #include <linux/module.h>
+-#include <linux/mutex.h>
+ #include <linux/power_supply.h>
+ #include <linux/usb.h>
+ #include <linux/workqueue.h>
+@@ -120,6 +118,12 @@ enum {
+ CORSAIR_VOID_BATTERY_CHARGING = 5,
+ };
+
++enum {
++ CORSAIR_VOID_ADD_BATTERY = 0,
++ CORSAIR_VOID_REMOVE_BATTERY = 1,
++ CORSAIR_VOID_UPDATE_BATTERY = 2,
++};
++
+ static enum power_supply_property corsair_void_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+@@ -155,12 +159,12 @@ struct corsair_void_drvdata {
+
+ struct power_supply *battery;
+ struct power_supply_desc battery_desc;
+- struct mutex battery_mutex;
+
+ struct delayed_work delayed_status_work;
+ struct delayed_work delayed_firmware_work;
+- struct work_struct battery_remove_work;
+- struct work_struct battery_add_work;
++
++ unsigned long battery_work_flags;
++ struct work_struct battery_work;
+ };
+
+ /*
+@@ -260,11 +264,9 @@ static void corsair_void_process_receiver(struct corsair_void_drvdata *drvdata,
+
+ /* Inform power supply if battery values changed */
+ if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
+- scoped_guard(mutex, &drvdata->battery_mutex) {
+- if (drvdata->battery) {
+- power_supply_changed(drvdata->battery);
+- }
+- }
++ set_bit(CORSAIR_VOID_UPDATE_BATTERY,
++ &drvdata->battery_work_flags);
++ schedule_work(&drvdata->battery_work);
+ }
+ }
+
+@@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
+
+ }
+
+-static void corsair_void_battery_remove_work_handler(struct work_struct *work)
++static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
+ {
+- struct corsair_void_drvdata *drvdata;
+-
+- drvdata = container_of(work, struct corsair_void_drvdata,
+- battery_remove_work);
+- scoped_guard(mutex, &drvdata->battery_mutex) {
+- if (drvdata->battery) {
+- power_supply_unregister(drvdata->battery);
+- drvdata->battery = NULL;
+- }
+- }
+-}
+-
+-static void corsair_void_battery_add_work_handler(struct work_struct *work)
+-{
+- struct corsair_void_drvdata *drvdata;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply *new_supply;
+
+- drvdata = container_of(work, struct corsair_void_drvdata,
+- battery_add_work);
+- guard(mutex)(&drvdata->battery_mutex);
+ if (drvdata->battery)
+ return;
+
+@@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
+ drvdata->battery = new_supply;
+ }
+
++static void corsair_void_battery_work_handler(struct work_struct *work)
++{
++ struct corsair_void_drvdata *drvdata = container_of(work,
++ struct corsair_void_drvdata, battery_work);
++
++ bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
++ &drvdata->battery_work_flags);
++ bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
++ &drvdata->battery_work_flags);
++ bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
++ &drvdata->battery_work_flags);
++
++ if (add_battery && !remove_battery) {
++ corsair_void_add_battery(drvdata);
++ } else if (remove_battery && !add_battery && drvdata->battery) {
++ power_supply_unregister(drvdata->battery);
++ drvdata->battery = NULL;
++ }
++
++ if (update_battery && drvdata->battery)
++ power_supply_changed(drvdata->battery);
++
++}
++
+ static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
+ {
+- schedule_work(&drvdata->battery_add_work);
++ set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
++ schedule_work(&drvdata->battery_work);
+ schedule_delayed_work(&drvdata->delayed_firmware_work,
+ msecs_to_jiffies(100));
+ }
+
+ static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
+ {
+- schedule_work(&drvdata->battery_remove_work);
++ set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
++ schedule_work(&drvdata->battery_work);
+
+ corsair_void_set_unknown_wireless_data(drvdata);
+ corsair_void_set_unknown_batt(drvdata);
+@@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
+ drvdata->battery_desc.get_property = corsair_void_battery_get_property;
+
+ drvdata->battery = NULL;
+- INIT_WORK(&drvdata->battery_remove_work,
+- corsair_void_battery_remove_work_handler);
+- INIT_WORK(&drvdata->battery_add_work,
+- corsair_void_battery_add_work_handler);
+- ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
+- if (ret)
+- return ret;
++ INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
+
+ ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
+ if (ret)
+@@ -721,8 +725,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
+ struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
+
+ hid_hw_stop(hid_dev);
+- cancel_work_sync(&drvdata->battery_remove_work);
+- cancel_work_sync(&drvdata->battery_add_work);
++ cancel_work_sync(&drvdata->battery_work);
+ if (drvdata->battery)
+ power_supply_unregister(drvdata->battery);
+
+--
+2.48.1
+
--- /dev/null
+From 5797c04400ee117bfe459ff1e468d0ea38054ab4 Mon Sep 17 00:00:00 2001
+From: Paul Fertser <fercerpav@gmail.com>
+Date: Thu, 23 Jan 2025 15:20:02 +0300
+Subject: hwmon: (peci/dimmtemp) Do not provide fake thresholds data
+
+From: Paul Fertser <fercerpav@gmail.com>
+
+commit 5797c04400ee117bfe459ff1e468d0ea38054ab4 upstream.
+
+When an Icelake or Sapphire Rapids CPU isn't providing the maximum and
+critical thresholds for particular DIMM the driver should return an
+error to the userspace instead of giving it stale (best case) or wrong
+(the structure contains all zeros after kzalloc() call) data.
+
+The issue can be reproduced by binding the peci driver while the host is
+fully booted and idle, this makes PECI interaction unreliable enough.
+
+Fixes: 73bc1b885dae ("hwmon: peci: Add dimmtemp driver")
+Fixes: 621995b6d795 ("hwmon: (peci/dimmtemp) Add Sapphire Rapids support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Fertser <fercerpav@gmail.com>
+Reviewed-by: Iwona Winiarska <iwona.winiarska@intel.com>
+Link: https://lore.kernel.org/r/20250123122003.6010-1-fercerpav@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/peci/dimmtemp.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/hwmon/peci/dimmtemp.c
++++ b/drivers/hwmon/peci/dimmtemp.c
+@@ -127,8 +127,6 @@ static int update_thresholds(struct peci
+ return 0;
+
+ ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
+- if (ret == -ENODATA) /* Use default or previous value */
+- return 0;
+ if (ret)
+ return ret;
+
+@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val);
+ if (ret || !(reg_val & BIT(31)))
+- return -ENODATA; /* Use default or previous value */
++ return -ENODATA;
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val);
+ if (ret)
+- return -ENODATA; /* Use default or previous value */
++ return -ENODATA;
+
+ /*
+ * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
+@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val);
+ if (ret || !(reg_val & BIT(31)))
+- return -ENODATA; /* Use default or previous value */
++ return -ENODATA;
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val);
+ if (ret)
+- return -ENODATA; /* Use default or previous value */
++ return -ENODATA;
+
+ /*
+ * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
--- /dev/null
+From e26e2d2e15daf1ab33e0135caf2304a0cfa2744b Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Thu, 27 Feb 2025 15:49:10 +0900
+Subject: ksmbd: fix bug on trap in smb2_lock
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit e26e2d2e15daf1ab33e0135caf2304a0cfa2744b upstream.
+
+If lock count is greater than 1, flags could be old value.
+It should be checked with flags of smb_lock, not flags.
+It will cause bug-on trap from locks_free_lock in error handling
+routine.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7467,7 +7467,7 @@ no_check_cl:
+ retry:
+ rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+ skip:
+- if (flags & SMB2_LOCKFLAG_UNLOCK) {
++ if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) {
+ if (!rc) {
+ ksmbd_debug(SMB, "File unlocked\n");
+ } else if (rc == -ENOENT) {
--- /dev/null
+From d6e13e19063db24f94b690159d0633aaf72a0f03 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 18 Feb 2025 22:49:50 +0900
+Subject: ksmbd: fix out-of-bounds in parse_sec_desc()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit d6e13e19063db24f94b690159d0633aaf72a0f03 upstream.
+
+If osidoffset, gsidoffset and dacloffset could be greater than smb_ntsd
+struct size. If it is smaller, It could cause slab-out-of-bounds.
+And when validating sid, It need to check it included subauth array size.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smbacl.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/fs/smb/server/smbacl.c
++++ b/fs/smb/server/smbacl.c
+@@ -807,6 +807,13 @@ static int parse_sid(struct smb_sid *psi
+ return -EINVAL;
+ }
+
++ if (!psid->num_subauth)
++ return 0;
++
++ if (psid->num_subauth > SID_MAX_SUB_AUTHORITIES ||
++ end_of_acl < (char *)psid + 8 + sizeof(__le32) * psid->num_subauth)
++ return -EINVAL;
++
+ return 0;
+ }
+
+@@ -848,6 +855,9 @@ int parse_sec_desc(struct mnt_idmap *idm
+ pntsd->type = cpu_to_le16(DACL_PRESENT);
+
+ if (pntsd->osidoffset) {
++ if (le32_to_cpu(pntsd->osidoffset) < sizeof(struct smb_ntsd))
++ return -EINVAL;
++
+ rc = parse_sid(owner_sid_ptr, end_of_acl);
+ if (rc) {
+ pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
+@@ -863,6 +873,9 @@ int parse_sec_desc(struct mnt_idmap *idm
+ }
+
+ if (pntsd->gsidoffset) {
++ if (le32_to_cpu(pntsd->gsidoffset) < sizeof(struct smb_ntsd))
++ return -EINVAL;
++
+ rc = parse_sid(group_sid_ptr, end_of_acl);
+ if (rc) {
+ pr_err("%s: Error %d mapping Owner SID to gid\n",
+@@ -884,6 +897,9 @@ int parse_sec_desc(struct mnt_idmap *idm
+ pntsd->type |= cpu_to_le16(DACL_PROTECTED);
+
+ if (dacloffset) {
++ if (dacloffset < sizeof(struct smb_ntsd))
++ return -EINVAL;
++
+ parse_dacl(idmap, dacl_ptr, end_of_acl,
+ owner_sid_ptr, group_sid_ptr, fattr);
+ }
--- /dev/null
+From e2ff19f0b7a30e03516e6eb73b948e27a55bc9d2 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Fri, 21 Feb 2025 14:16:23 +0900
+Subject: ksmbd: fix type confusion via race condition when using ipc_msg_send_request
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit e2ff19f0b7a30e03516e6eb73b948e27a55bc9d2 upstream.
+
+req->handle is allocated using ksmbd_acquire_id(&ipc_ida), based on
+ida_alloc. req->handle from ksmbd_ipc_login_request and
+FSCTL_PIPE_TRANSCEIVE ioctl can be same and it could lead to type confusion
+between messages, resulting in access to unexpected parts of memory after
+an incorrect delivery. ksmbd check type of ipc response but missing add
+continue to check next ipc reponse.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/transport_ipc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -281,6 +281,7 @@ static int handle_response(int type, voi
+ if (entry->type + 1 != type) {
+ pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
+ entry->type + 1, type);
++ continue;
+ }
+
+ entry->response = kvzalloc(sz, KSMBD_DEFAULT_GFP);
--- /dev/null
+From 84d2d1641b71dec326e8736a749b7ee76a9599fc Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 26 Feb 2025 15:44:02 +0900
+Subject: ksmbd: fix use-after-free in smb2_lock
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 84d2d1641b71dec326e8736a749b7ee76a9599fc upstream.
+
+If smb_lock->zero_len has value, ->llist of smb_lock is not delete and
+flock is old one. It will cause use-after-free on error handling
+routine.
+
+Cc: stable@vger.kernel.org
+Reported-by: Norbert Szetei <norbert@doyensec.com>
+Tested-by: Norbert Szetei <norbert@doyensec.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -7457,13 +7457,13 @@ out_check_cl:
+ }
+
+ no_check_cl:
++ flock = smb_lock->fl;
++ list_del(&smb_lock->llist);
++
+ if (smb_lock->zero_len) {
+ err = 0;
+ goto skip;
+ }
+-
+- flock = smb_lock->fl;
+- list_del(&smb_lock->llist);
+ retry:
+ rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+ skip:
--- /dev/null
+From da64a2359092ceec4f9dea5b329d0aef20104217 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Sat, 8 Mar 2025 13:50:45 +0800
+Subject: LoongArch: Convert unreachable() to BUG()
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit da64a2359092ceec4f9dea5b329d0aef20104217 upstream.
+
+When compiling on LoongArch, there exists the following objtool warning
+in arch/loongarch/kernel/machine_kexec.o:
+
+ kexec_reboot() falls through to next function crash_shutdown_secondary()
+
+Avoid using unreachable() as it can (and will in the absence of UBSAN)
+generate fall-through code. Use BUG() so we get a "break BRK_BUG" trap
+(with unreachable annotation).
+
+Cc: stable@vger.kernel.org # 6.12+
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/machine_kexec.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/kernel/machine_kexec.c
++++ b/arch/loongarch/kernel/machine_kexec.c
+@@ -126,14 +126,14 @@ void kexec_reboot(void)
+ /* All secondary cpus go to kexec_smp_wait */
+ if (smp_processor_id() > 0) {
+ relocated_kexec_smp_wait(NULL);
+- unreachable();
++ BUG();
+ }
+ #endif
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
+
+- unreachable();
++ BUG();
+ }
+
+
--- /dev/null
+From 6fb1867d5a44b0a061cf39d2492d23d314bcb8ce Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Sat, 8 Mar 2025 13:51:59 +0800
+Subject: LoongArch: KVM: Add interrupt checking for AVEC
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit 6fb1867d5a44b0a061cf39d2492d23d314bcb8ce upstream.
+
+There is a newly added macro INT_AVEC with CSR ESTAT register, which is
+bit 14 used for LoongArch AVEC support. AVEC interrupt status bit 14 is
+supported with macro CSR_ESTAT_IS, so here replace the hard-coded value
+0x1fff with macro CSR_ESTAT_IS so that the AVEC interrupt status is also
+supported by KVM.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kvm/vcpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/loongarch/kvm/vcpu.c
++++ b/arch/loongarch/kvm/vcpu.c
+@@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_ru
+ {
+ int ret = RESUME_GUEST;
+ unsigned long estat = vcpu->arch.host_estat;
+- u32 intr = estat & 0x1fff; /* Ignore NMI */
++ u32 intr = estat & CSR_ESTAT_IS;
+ u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+
+ vcpu->mode = OUTSIDE_GUEST_MODE;
--- /dev/null
+From 6bdbb73dc8d99fbb77f5db79dbb6f108708090b4 Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Sat, 8 Mar 2025 13:52:04 +0800
+Subject: LoongArch: KVM: Fix GPA size issue about VM
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit 6bdbb73dc8d99fbb77f5db79dbb6f108708090b4 upstream.
+
+Physical address space is 48 bit on Loongson-3A5000 physical machine,
+however it is 47 bit for VM on Loongson-3A5000 system. Size of physical
+address space of VM is the same with the size of virtual user space (a
+half) of physical machine.
+
+Variable cpu_vabits represents user address space, kernel address space
+is not included (user space and kernel space are both a half of total).
+Here cpu_vabits, rather than cpu_vabits - 1, is to represent the size of
+guest physical address space.
+
+Also there is strict checking about page fault GPA address, inject error
+if it is larger than maximum GPA address of VM.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kvm/exit.c | 6 ++++++
+ arch/loongarch/kvm/vm.c | 6 +++++-
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/kvm/exit.c
++++ b/arch/loongarch/kvm/exit.c
+@@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct
+ struct kvm_run *run = vcpu->run;
+ unsigned long badv = vcpu->arch.badv;
+
++ /* Inject ADE exception if exceed max GPA size */
++ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
++ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
++ return RESUME_GUEST;
++ }
++
+ ret = kvm_handle_mm_fault(vcpu, badv, write);
+ if (ret) {
+ /* Treat as MMIO */
+--- a/arch/loongarch/kvm/vm.c
++++ b/arch/loongarch/kvm/vm.c
+@@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, un
+ if (kvm_pvtime_supported())
+ kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+
+- kvm->arch.gpa_size = BIT(cpu_vabits - 1);
++ /*
++ * cpu_vabits means user address space only (a half of total).
++ * GPA size of VM is the same with the size of user address space.
++ */
++ kvm->arch.gpa_size = BIT(cpu_vabits);
+ kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
+ kvm->arch.invalid_ptes[0] = 0;
+ kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
--- /dev/null
+From 78d7bc5a02e1468df53896df354fa80727f35b7d Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Sat, 8 Mar 2025 13:52:01 +0800
+Subject: LoongArch: KVM: Reload guest CSR registers after sleep
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit 78d7bc5a02e1468df53896df354fa80727f35b7d upstream.
+
+On host, the HW guest CSR registers are lost after suspend and resume
+operation. Since last_vcpu of boot CPU still records latest vCPU pointer
+so that the guest CSR register skips to reload when boot CPU resumes and
+vCPU is scheduled.
+
+Here last_vcpu is cleared so that guest CSR registers will reload from
+scheduled vCPU context after suspend and resume.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kvm/main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/loongarch/kvm/main.c
++++ b/arch/loongarch/kvm/main.c
+@@ -299,6 +299,13 @@ int kvm_arch_enable_virtualization_cpu(v
+ kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
+ read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+
++ /*
++ * HW Guest CSR registers are lost after CPU suspend and resume.
++ * Clear last_vcpu so that Guest CSR registers forced to reload
++ * from vCPU SW state.
++ */
++ this_cpu_ptr(vmcs)->last_vcpu = NULL;
++
+ return 0;
+ }
+
--- /dev/null
+From 3109d5ff484b7bc7b955f166974c6776d91f247b Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Sat, 8 Mar 2025 13:51:32 +0800
+Subject: LoongArch: Set hugetlb mmap base address aligned with pmd size
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit 3109d5ff484b7bc7b955f166974c6776d91f247b upstream.
+
+With ltp test case "testcases/bin/hugefork02", there is a dmesg error
+report message such as:
+
+ kernel BUG at mm/hugetlb.c:5550!
+ Oops - BUG[#1]:
+ CPU: 0 UID: 0 PID: 1517 Comm: hugefork02 Not tainted 6.14.0-rc2+ #241
+ Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
+ pc 90000000004eaf1c ra 9000000000485538 tp 900000010edbc000 sp 900000010edbf940
+ a0 900000010edbfb00 a1 9000000108d20280 a2 00007fffe9474000 a3 00007ffff3474000
+ a4 0000000000000000 a5 0000000000000003 a6 00000000003cadd3 a7 0000000000000000
+ t0 0000000001ffffff t1 0000000001474000 t2 900000010ecd7900 t3 00007fffe9474000
+ t4 00007fffe9474000 t5 0000000000000040 t6 900000010edbfb00 t7 0000000000000001
+ t8 0000000000000005 u0 90000000004849d0 s9 900000010edbfa00 s0 9000000108d20280
+ s1 00007fffe9474000 s2 0000000002000000 s3 9000000108d20280 s4 9000000002b38b10
+ s5 900000010edbfb00 s6 00007ffff3474000 s7 0000000000000406 s8 900000010edbfa08
+ ra: 9000000000485538 unmap_vmas+0x130/0x218
+ ERA: 90000000004eaf1c __unmap_hugepage_range+0x6f4/0x7d0
+ PRMD: 00000004 (PPLV0 +PIE -PWE)
+ EUEN: 00000007 (+FPE +SXE +ASXE -BTE)
+ ECFG: 00071c1d (LIE=0,2-4,10-12 VS=7)
+ ESTAT: 000c0000 [BRK] (IS= ECode=12 EsubCode=0)
+ PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
+ Process hugefork02 (pid: 1517, threadinfo=00000000a670eaf4, task=000000007a95fc64)
+ Call Trace:
+ [<90000000004eaf1c>] __unmap_hugepage_range+0x6f4/0x7d0
+ [<9000000000485534>] unmap_vmas+0x12c/0x218
+ [<9000000000494068>] exit_mmap+0xe0/0x308
+ [<900000000025fdc4>] mmput+0x74/0x180
+ [<900000000026a284>] do_exit+0x294/0x898
+ [<900000000026aa30>] do_group_exit+0x30/0x98
+ [<900000000027bed4>] get_signal+0x83c/0x868
+ [<90000000002457b4>] arch_do_signal_or_restart+0x54/0xfa0
+ [<90000000015795e8>] irqentry_exit_to_user_mode+0xb8/0x138
+ [<90000000002572d0>] tlb_do_page_fault_1+0x114/0x1b4
+
+The problem is that base address allocated from hugetlbfs is not aligned
+with pmd size. Here add a checking for hugetlbfs and align base address
+with pmd size. After this patch the test case "testcases/bin/hugefork02"
+passes to run.
+
+This is similar to the commit 7f24cbc9c4d42db8a3c8484d1 ("mm/mmap: teach
+generic_get_unmapped_area{_topdown} to handle hugetlb mappings").
+
+Cc: stable@vger.kernel.org # 6.13+
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/mm/mmap.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/mm/mmap.c
++++ b/arch/loongarch/mm/mmap.c
+@@ -3,6 +3,7 @@
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+ #include <linux/export.h>
++#include <linux/hugetlb.h>
+ #include <linux/io.h>
+ #include <linux/kfence.h>
+ #include <linux/memblock.h>
+@@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_a
+ }
+
+ info.length = len;
+- info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ if (filp && is_file_hugepages(filp))
++ info.align_mask = huge_page_mask_align(filp);
++ else
++ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
--- /dev/null
+From c8477bb0a8e7f6b2e47952b403c5cb67a6929e55 Mon Sep 17 00:00:00 2001
+From: Bibo Mao <maobibo@loongson.cn>
+Date: Sat, 8 Mar 2025 13:51:32 +0800
+Subject: LoongArch: Set max_pfn with the PFN of the last page
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+commit c8477bb0a8e7f6b2e47952b403c5cb67a6929e55 upstream.
+
+The current max_pfn equals to zero. In this case, it causes user cannot
+get some page information through /proc filesystem such as kpagecount.
+The following message is displayed by stress-ng test suite with command
+"stress-ng --verbose --physpage 1 -t 1".
+
+ # stress-ng --verbose --physpage 1 -t 1
+ stress-ng: error: [1691] physpage: cannot read page count for address 0x134ac000 in /proc/kpagecount, errno=22 (Invalid argument)
+ stress-ng: error: [1691] physpage: cannot read page count for address 0x7ffff207c3a8 in /proc/kpagecount, errno=22 (Invalid argument)
+ stress-ng: error: [1691] physpage: cannot read page count for address 0x134b0000 in /proc/kpagecount, errno=22 (Invalid argument)
+ ...
+
+After applying this patch, the kernel can pass the test.
+
+ # stress-ng --verbose --physpage 1 -t 1
+ stress-ng: debug: [1701] physpage: [1701] started (instance 0 on CPU 3)
+ stress-ng: debug: [1701] physpage: [1701] exited (instance 0 on CPU 3)
+ stress-ng: debug: [1700] physpage: [1701] terminated (success)
+
+Cc: stable@vger.kernel.org # 6.8+
+Fixes: ff6c3d81f2e8 ("NUMA: optimize detection of memory with no node id assigned by firmware")
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/setup.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -387,6 +387,9 @@ static void __init check_kernel_sections
+ */
+ static void __init arch_mem_init(char **cmdline_p)
+ {
++ /* Recalculate max_low_pfn for "mem=xxx" */
++ max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
++
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
--- /dev/null
+From c9117434c8f7523f0b77db4c5766f5011cc94677 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Sat, 8 Mar 2025 13:51:32 +0800
+Subject: LoongArch: Use polling play_dead() when resuming from hibernation
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit c9117434c8f7523f0b77db4c5766f5011cc94677 upstream.
+
+When CONFIG_RANDOM_KMALLOC_CACHES or other randomization infrastructrue
+enabled, the idle_task's stack may different between the booting kernel
+and target kernel. So when resuming from hibernation, an ACTION_BOOT_CPU
+IPI wakeup the idle instruction in arch_cpu_idle_dead() and jump to the
+interrupt handler. But since the stack pointer is changed, the interrupt
+handler cannot restore correct context.
+
+So rename the current arch_cpu_idle_dead() to idle_play_dead(), make it
+as the default version of play_dead(), and the new arch_cpu_idle_dead()
+call play_dead() directly. For hibernation, implement an arch-specific
+hibernate_resume_nonboot_cpu_disable() to use the polling version (idle
+instruction is replace by nop, and irq is disabled) of play_dead(), i.e.
+poll_play_dead(), to avoid IPI handler corrupting the idle_task's stack
+when resuming from hibernation.
+
+This solution is a little similar to commit 406f992e4a372dafbe3c ("x86 /
+hibernate: Use hlt_play_dead() when resuming from hibernation").
+
+Cc: stable@vger.kernel.org
+Tested-by: Erpeng Xu <xuerpeng@uniontech.com>
+Tested-by: Yuli Wang <wangyuli@uniontech.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/smp.c | 47 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 46 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/smp.h>
+ #include <linux/threads.h>
+ #include <linux/export.h>
++#include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/time.h>
+ #include <linux/tracepoint.h>
+@@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
+ mb();
+ }
+
+-void __noreturn arch_cpu_idle_dead(void)
++static void __noreturn idle_play_dead(void)
+ {
+ register uint64_t addr;
+ register void (*init_fn)(void);
+@@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
+ BUG();
+ }
+
++#ifdef CONFIG_HIBERNATION
++static void __noreturn poll_play_dead(void)
++{
++ register uint64_t addr;
++ register void (*init_fn)(void);
++
++ idle_task_exit();
++ __this_cpu_write(cpu_state, CPU_DEAD);
++
++ __smp_mb();
++ do {
++ __asm__ __volatile__("nop\n\t");
++ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
++ } while (addr == 0);
++
++ init_fn = (void *)TO_CACHE(addr);
++ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
++
++ init_fn();
++ BUG();
++}
++#endif
++
++static void (*play_dead)(void) = idle_play_dead;
++
++void __noreturn arch_cpu_idle_dead(void)
++{
++ play_dead();
++ BUG(); /* play_dead() doesn't return */
++}
++
++#ifdef CONFIG_HIBERNATION
++int hibernate_resume_nonboot_cpu_disable(void)
++{
++ int ret;
++
++ play_dead = poll_play_dead;
++ ret = suspend_disable_secondary_cpus();
++ play_dead = idle_play_dead;
++
++ return ret;
++}
++#endif
++
+ #endif
+
+ /*
--- /dev/null
+From 022bfe24aad8937705704ff2e414b100cf0f2e1a Mon Sep 17 00:00:00 2001
+From: Krister Johansen <kjlx@templeofstupid.com>
+Date: Mon, 3 Mar 2025 18:10:13 +0100
+Subject: mptcp: fix 'scheduling while atomic' in mptcp_pm_nl_append_new_local_addr
+
+From: Krister Johansen <kjlx@templeofstupid.com>
+
+commit 022bfe24aad8937705704ff2e414b100cf0f2e1a upstream.
+
+If multiple connection requests attempt to create an implicit mptcp
+endpoint in parallel, more than one caller may end up in
+mptcp_pm_nl_append_new_local_addr because none found the address in
+local_addr_list during their call to mptcp_pm_nl_get_local_id. In this
+case, the concurrent new_local_addr calls may delete the address entry
+created by the previous caller. These deletes use synchronize_rcu, but
+this is not permitted in some of the contexts where this function may be
+called. During packet recv, the caller may be in a rcu read critical
+section and have preemption disabled.
+
+An example stack:
+
+ BUG: scheduling while atomic: swapper/2/0/0x00000302
+
+ Call Trace:
+ <IRQ>
+ dump_stack_lvl (lib/dump_stack.c:117 (discriminator 1))
+ dump_stack (lib/dump_stack.c:124)
+ __schedule_bug (kernel/sched/core.c:5943)
+ schedule_debug.constprop.0 (arch/x86/include/asm/preempt.h:33 kernel/sched/core.c:5970)
+ __schedule (arch/x86/include/asm/jump_label.h:27 include/linux/jump_label.h:207 kernel/sched/features.h:29 kernel/sched/core.c:6621)
+ schedule (arch/x86/include/asm/preempt.h:84 kernel/sched/core.c:6804 kernel/sched/core.c:6818)
+ schedule_timeout (kernel/time/timer.c:2160)
+ wait_for_completion (kernel/sched/completion.c:96 kernel/sched/completion.c:116 kernel/sched/completion.c:127 kernel/sched/completion.c:148)
+ __wait_rcu_gp (include/linux/rcupdate.h:311 kernel/rcu/update.c:444)
+ synchronize_rcu (kernel/rcu/tree.c:3609)
+ mptcp_pm_nl_append_new_local_addr (net/mptcp/pm_netlink.c:966 net/mptcp/pm_netlink.c:1061)
+ mptcp_pm_nl_get_local_id (net/mptcp/pm_netlink.c:1164)
+ mptcp_pm_get_local_id (net/mptcp/pm.c:420)
+ subflow_check_req (net/mptcp/subflow.c:98 net/mptcp/subflow.c:213)
+ subflow_v4_route_req (net/mptcp/subflow.c:305)
+ tcp_conn_request (net/ipv4/tcp_input.c:7216)
+ subflow_v4_conn_request (net/mptcp/subflow.c:651)
+ tcp_rcv_state_process (net/ipv4/tcp_input.c:6709)
+ tcp_v4_do_rcv (net/ipv4/tcp_ipv4.c:1934)
+ tcp_v4_rcv (net/ipv4/tcp_ipv4.c:2334)
+ ip_protocol_deliver_rcu (net/ipv4/ip_input.c:205 (discriminator 1))
+ ip_local_deliver_finish (include/linux/rcupdate.h:813 net/ipv4/ip_input.c:234)
+ ip_local_deliver (include/linux/netfilter.h:314 include/linux/netfilter.h:308 net/ipv4/ip_input.c:254)
+ ip_sublist_rcv_finish (include/net/dst.h:461 net/ipv4/ip_input.c:580)
+ ip_sublist_rcv (net/ipv4/ip_input.c:640)
+ ip_list_rcv (net/ipv4/ip_input.c:675)
+ __netif_receive_skb_list_core (net/core/dev.c:5583 net/core/dev.c:5631)
+ netif_receive_skb_list_internal (net/core/dev.c:5685 net/core/dev.c:5774)
+ napi_complete_done (include/linux/list.h:37 include/net/gro.h:449 include/net/gro.h:444 net/core/dev.c:6114)
+ igb_poll (drivers/net/ethernet/intel/igb/igb_main.c:8244) igb
+ __napi_poll (net/core/dev.c:6582)
+ net_rx_action (net/core/dev.c:6653 net/core/dev.c:6787)
+ handle_softirqs (kernel/softirq.c:553)
+ __irq_exit_rcu (kernel/softirq.c:588 kernel/softirq.c:427 kernel/softirq.c:636)
+ irq_exit_rcu (kernel/softirq.c:651)
+ common_interrupt (arch/x86/kernel/irq.c:247 (discriminator 14))
+ </IRQ>
+
+This problem seems particularly prevalent if the user advertises an
+endpoint that has a different external vs internal address. In the case
+where the external address is advertised and multiple connections
+already exist, multiple subflow SYNs arrive in parallel which tends to
+trigger the race during creation of the first local_addr_list entries
+which have the internal address instead.
+
+Fix by skipping the replacement of an existing implicit local address if
+called via mptcp_pm_nl_get_local_id.
+
+Fixes: d045b9eb95a9 ("mptcp: introduce implicit endpoints")
+Cc: stable@vger.kernel.org
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Krister Johansen <kjlx@templeofstupid.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250303-net-mptcp-fix-sched-while-atomic-v1-1-f6a216c5a74c@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -977,7 +977,7 @@ static void __mptcp_pm_release_addr_entr
+
+ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ struct mptcp_pm_addr_entry *entry,
+- bool needs_id)
++ bool needs_id, bool replace)
+ {
+ struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
+ unsigned int addr_max;
+@@ -1017,6 +1017,17 @@ static int mptcp_pm_nl_append_new_local_
+ if (entry->addr.id)
+ goto out;
+
++ /* allow callers that only need to look up the local
++ * addr's id to skip replacement. This allows them to
++ * avoid calling synchronize_rcu in the packet recv
++ * path.
++ */
++ if (!replace) {
++ kfree(entry);
++ ret = cur->addr.id;
++ goto out;
++ }
++
+ pernet->addrs--;
+ entry->addr.id = cur->addr.id;
+ list_del_rcu(&cur->list);
+@@ -1165,7 +1176,7 @@ int mptcp_pm_nl_get_local_id(struct mptc
+ entry->ifindex = 0;
+ entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+ entry->lsk = NULL;
+- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
++ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false);
+ if (ret < 0)
+ kfree(entry);
+
+@@ -1433,7 +1444,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_
+ }
+ }
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
+- !mptcp_pm_has_addr_attr_id(attr, info));
++ !mptcp_pm_has_addr_attr_id(attr, info),
++ true);
+ if (ret < 0) {
+ GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
+ goto out_free;
--- /dev/null
+From d0d10eaedcb53740883d7e5d53c5e15c879b48fb Mon Sep 17 00:00:00 2001
+From: Mingcong Bai <jeffbai@aosc.io>
+Date: Sat, 22 Feb 2025 00:48:24 +0800
+Subject: platform/x86: thinkpad_acpi: Add battery quirk for ThinkPad X131e
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mingcong Bai <jeffbai@aosc.io>
+
+commit d0d10eaedcb53740883d7e5d53c5e15c879b48fb upstream.
+
+Based on the dmesg messages from the original reporter:
+
+[ 4.964073] ACPI: \_SB_.PCI0.LPCB.EC__.HKEY: BCTG evaluated but flagged as error
+[ 4.964083] thinkpad_acpi: Error probing battery 2
+
+Lenovo ThinkPad X131e also needs this battery quirk.
+
+Reported-by: Fan Yang <804284660@qq.com>
+Tested-by: Fan Yang <804284660@qq.com>
+Co-developed-by: Xi Ruoyao <xry111@xry111.site>
+Signed-off-by: Xi Ruoyao <xry111@xry111.site>
+Signed-off-by: Mingcong Bai <jeffbai@aosc.io>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250221164825.77315-1-jeffbai@aosc.io
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9958,6 +9958,7 @@ static const struct tpacpi_quirk battery
+ * Individual addressing is broken on models that expose the
+ * primary battery as BAT1.
+ */
++ TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */
+ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
+ TPACPI_Q_LNV('J', '7', true), /* B5400 */
+ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
--- /dev/null
+From eae116d1f0449ade3269ca47a67432622f5c6438 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+Date: Tue, 25 Feb 2025 22:22:58 -0500
+Subject: Revert "mm/page_alloc.c: don't show protection in zone's ->lowmem_reserve[] for empty zone"
+
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+
+commit eae116d1f0449ade3269ca47a67432622f5c6438 upstream.
+
+Commit 96a5c186efff ("mm/page_alloc.c: don't show protection in zone's
+->lowmem_reserve[] for empty zone") removes the protection of lower zones
+from allocations targeting memory-less high zones. This had an unintended
+impact on the pattern of reclaims because it makes the high-zone-targeted
+allocation more likely to succeed in lower zones, which adds pressure to
+said zones. I.e, the following corresponding checks in
+zone_watermark_ok/zone_watermark_fast are less likely to trigger:
+
+ if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
+ return false;
+
+As a result, we are observing an increase in reclaim and kswapd scans, due
+to the increased pressure. This was initially observed as increased
+latency in filesystem operations when benchmarking with fio on a machine
+with some memory-less zones, but it has since been associated with
+increased contention in locks related to memory reclaim. By reverting
+this patch, the original performance was recovered on that machine.
+
+The original commit was introduced as a clarification of the
+/proc/zoneinfo output, so it doesn't seem there are usecases depending on
+it, making the revert a simple solution.
+
+For reference, I collected vmstat with and without this patch on a freshly
+booted system running intensive randread io from an nvme for 5 minutes. I
+got:
+
+rpm-6.12.0-slfo.1.2 -> pgscan_kswapd 5629543865
+Patched -> pgscan_kswapd 33580844
+
+33M scans is similar to what we had in kernels predating this patch.
+These numbers is fairly representative of the workload on this machine, as
+measured in several runs. So we are talking about a 2-order of magnitude
+increase.
+
+Link: https://lkml.kernel.org/r/20250226032258.234099-1-krisman@suse.de
+Fixes: 96a5c186efff ("mm/page_alloc.c: don't show protection in zone's ->lowmem_reserve[] for empty zone")
+Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5858,11 +5858,10 @@ static void setup_per_zone_lowmem_reserv
+
+ for (j = i + 1; j < MAX_NR_ZONES; j++) {
+ struct zone *upper_zone = &pgdat->node_zones[j];
+- bool empty = !zone_managed_pages(upper_zone);
+
+ managed_pages += zone_managed_pages(upper_zone);
+
+- if (clear || empty)
++ if (clear)
+ zone->lowmem_reserve[j] = 0;
+ else
+ zone->lowmem_reserve[j] = managed_pages / ratio;
--- /dev/null
+From 75f1f311d883dfaffb98be3c1da208d6ed5d4df9 Mon Sep 17 00:00:00 2001
+From: "Rob Herring (Arm)" <robh@kernel.org>
+Date: Wed, 26 Feb 2025 13:38:19 -0600
+Subject: Revert "of: reserved-memory: Fix using wrong number of cells to get property 'alignment'"
+
+From: Rob Herring (Arm) <robh@kernel.org>
+
+commit 75f1f311d883dfaffb98be3c1da208d6ed5d4df9 upstream.
+
+This reverts commit 267b21d0bef8e67dbe6c591c9991444e58237ec9.
+
+Turns out some DTs do depend on this behavior. Specifically, a
+downstream Pixel 6 DT. Revert the change at least until we can decide if
+the DT spec can be changed instead.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/of/of_reserved_mem.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -415,12 +415,12 @@ static int __init __reserved_mem_alloc_s
+
+ prop = of_get_flat_dt_prop(node, "alignment", &len);
+ if (prop) {
+- if (len != dt_root_size_cells * sizeof(__be32)) {
++ if (len != dt_root_addr_cells * sizeof(__be32)) {
+ pr_err("invalid alignment property in '%s' node.\n",
+ uname);
+ return -EINVAL;
+ }
+- align = dt_mem_next_cell(dt_root_size_cells, &prop);
++ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ }
+
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
--- /dev/null
+From 0a7565ee6ec31eb16c0476adbfc1af3f2271cb6b Mon Sep 17 00:00:00 2001
+From: John Hubbard <jhubbard@nvidia.com>
+Date: Thu, 13 Feb 2025 19:38:50 -0800
+Subject: Revert "selftests/mm: remove local __NR_* definitions"
+
+From: John Hubbard <jhubbard@nvidia.com>
+
+commit 0a7565ee6ec31eb16c0476adbfc1af3f2271cb6b upstream.
+
+This reverts commit a5c6bc590094a1a73cf6fa3f505e1945d2bf2461.
+
+The general approach described in commit e076eaca5906 ("selftests: break
+the dependency upon local header files") was taken one step too far here:
+it should not have been extended to include the syscall numbers. This is
+because doing so would require per-arch support in tools/include/uapi, and
+no such support exists.
+
+This revert fixes two separate reports of test failures, from Dave
+Hansen[1], and Li Wang[2]. An excerpt of Dave's report:
+
+Before this commit (a5c6bc590094a1a73cf6fa3f505e1945d2bf2461) things are
+fine. But after, I get:
+
+ running PKEY tests for unsupported CPU/OS
+
+An excerpt of Li's report:
+
+ I just found that mlock2_() return a wrong value in mlock2-test
+
+[1] https://lore.kernel.org/dc585017-6740-4cab-a536-b12b37a7582d@intel.com
+[2] https://lore.kernel.org/CAEemH2eW=UMu9+turT2jRie7+6ewUazXmA6kL+VBo3cGDGU6RA@mail.gmail.com
+
+Link: https://lkml.kernel.org/r/20250214033850.235171-1-jhubbard@nvidia.com
+Fixes: a5c6bc590094 ("selftests/mm: remove local __NR_* definitions")
+Signed-off-by: John Hubbard <jhubbard@nvidia.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Li Wang <liwang@redhat.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Jeff Xu <jeffxu@chromium.org>
+Cc: Andrei Vagin <avagin@google.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/hugepage-mremap.c | 2 +-
+ tools/testing/selftests/mm/ksm_functional_tests.c | 8 +++++++-
+ tools/testing/selftests/mm/memfd_secret.c | 14 +++++++++++++-
+ tools/testing/selftests/mm/mkdirty.c | 8 +++++++-
+ tools/testing/selftests/mm/mlock2.h | 1 -
+ tools/testing/selftests/mm/protection_keys.c | 2 +-
+ tools/testing/selftests/mm/uffd-common.c | 4 ++++
+ tools/testing/selftests/mm/uffd-stress.c | 15 ++++++++++++++-
+ tools/testing/selftests/mm/uffd-unit-tests.c | 14 +++++++++++++-
+ 9 files changed, 60 insertions(+), 8 deletions(-)
+
+--- a/tools/testing/selftests/mm/hugepage-mremap.c
++++ b/tools/testing/selftests/mm/hugepage-mremap.c
+@@ -15,7 +15,7 @@
+ #define _GNU_SOURCE
+ #include <stdlib.h>
+ #include <stdio.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <sys/mman.h>
+ #include <errno.h>
+ #include <fcntl.h> /* Definition of O_* constants */
+--- a/tools/testing/selftests/mm/ksm_functional_tests.c
++++ b/tools/testing/selftests/mm/ksm_functional_tests.c
+@@ -11,7 +11,7 @@
+ #include <string.h>
+ #include <stdbool.h>
+ #include <stdint.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+@@ -369,6 +369,7 @@ unmap:
+ munmap(map, size);
+ }
+
++#ifdef __NR_userfaultfd
+ static void test_unmerge_uffd_wp(void)
+ {
+ struct uffdio_writeprotect uffd_writeprotect;
+@@ -429,6 +430,7 @@ close_uffd:
+ unmap:
+ munmap(map, size);
+ }
++#endif
+
+ /* Verify that KSM can be enabled / queried with prctl. */
+ static void test_prctl(void)
+@@ -684,7 +686,9 @@ int main(int argc, char **argv)
+ exit(test_child_ksm());
+ }
+
++#ifdef __NR_userfaultfd
+ tests++;
++#endif
+
+ ksft_print_header();
+ ksft_set_plan(tests);
+@@ -696,7 +700,9 @@ int main(int argc, char **argv)
+ test_unmerge();
+ test_unmerge_zero_pages();
+ test_unmerge_discarded();
++#ifdef __NR_userfaultfd
+ test_unmerge_uffd_wp();
++#endif
+
+ test_prot_none();
+
+--- a/tools/testing/selftests/mm/memfd_secret.c
++++ b/tools/testing/selftests/mm/memfd_secret.c
+@@ -17,7 +17,7 @@
+
+ #include <stdlib.h>
+ #include <string.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <errno.h>
+ #include <stdio.h>
+ #include <fcntl.h>
+@@ -28,6 +28,8 @@
+ #define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
+ #define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
+
++#ifdef __NR_memfd_secret
++
+ #define PATTERN 0x55
+
+ static const int prot = PROT_READ | PROT_WRITE;
+@@ -332,3 +334,13 @@ int main(int argc, char *argv[])
+
+ ksft_finished();
+ }
++
++#else /* __NR_memfd_secret */
++
++int main(int argc, char *argv[])
++{
++ printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
++ return KSFT_SKIP;
++}
++
++#endif /* __NR_memfd_secret */
+--- a/tools/testing/selftests/mm/mkdirty.c
++++ b/tools/testing/selftests/mm/mkdirty.c
+@@ -9,7 +9,7 @@
+ */
+ #include <fcntl.h>
+ #include <signal.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <string.h>
+ #include <errno.h>
+ #include <stdlib.h>
+@@ -265,6 +265,7 @@ munmap:
+ munmap(mmap_mem, mmap_size);
+ }
+
++#ifdef __NR_userfaultfd
+ static void test_uffdio_copy(void)
+ {
+ struct uffdio_register uffdio_register;
+@@ -321,6 +322,7 @@ munmap:
+ munmap(dst, pagesize);
+ free(src);
+ }
++#endif /* __NR_userfaultfd */
+
+ int main(void)
+ {
+@@ -333,7 +335,9 @@ int main(void)
+ thpsize / 1024);
+ tests += 3;
+ }
++#ifdef __NR_userfaultfd
+ tests += 1;
++#endif /* __NR_userfaultfd */
+
+ ksft_print_header();
+ ksft_set_plan(tests);
+@@ -363,7 +367,9 @@ int main(void)
+ if (thpsize)
+ test_pte_mapped_thp();
+ /* Placing a fresh page via userfaultfd may set the PTE dirty. */
++#ifdef __NR_userfaultfd
+ test_uffdio_copy();
++#endif /* __NR_userfaultfd */
+
+ err = ksft_get_fail_cnt();
+ if (err)
+--- a/tools/testing/selftests/mm/mlock2.h
++++ b/tools/testing/selftests/mm/mlock2.h
+@@ -3,7 +3,6 @@
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+-#include <asm-generic/unistd.h>
+
+ static int mlock2_(void *start, size_t len, int flags)
+ {
+--- a/tools/testing/selftests/mm/protection_keys.c
++++ b/tools/testing/selftests/mm/protection_keys.c
+@@ -42,7 +42,7 @@
+ #include <sys/wait.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+-#include <asm-generic/unistd.h>
++#include <unistd.h>
+ #include <sys/ptrace.h>
+ #include <setjmp.h>
+
+--- a/tools/testing/selftests/mm/uffd-common.c
++++ b/tools/testing/selftests/mm/uffd-common.c
+@@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags)
+
+ int uffd_open_sys(unsigned int flags)
+ {
++#ifdef __NR_userfaultfd
+ return syscall(__NR_userfaultfd, flags);
++#else
++ return -1;
++#endif
+ }
+
+ int uffd_open(unsigned int flags)
+--- a/tools/testing/selftests/mm/uffd-stress.c
++++ b/tools/testing/selftests/mm/uffd-stress.c
+@@ -33,10 +33,11 @@
+ * pthread_mutex_lock will also verify the atomicity of the memory
+ * transfer (UFFDIO_COPY).
+ */
+-#include <asm-generic/unistd.h>
++
+ #include "uffd-common.h"
+
+ uint64_t features;
++#ifdef __NR_userfaultfd
+
+ #define BOUNCE_RANDOM (1<<0)
+ #define BOUNCE_RACINGFAULTS (1<<1)
+@@ -471,3 +472,15 @@ int main(int argc, char **argv)
+ nr_pages, nr_pages_per_cpu);
+ return userfaultfd_stress();
+ }
++
++#else /* __NR_userfaultfd */
++
++#warning "missing __NR_userfaultfd definition"
++
++int main(void)
++{
++ printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
++ return KSFT_SKIP;
++}
++
++#endif /* __NR_userfaultfd */
+--- a/tools/testing/selftests/mm/uffd-unit-tests.c
++++ b/tools/testing/selftests/mm/uffd-unit-tests.c
+@@ -5,11 +5,12 @@
+ * Copyright (C) 2015-2023 Red Hat, Inc.
+ */
+
+-#include <asm-generic/unistd.h>
+ #include "uffd-common.h"
+
+ #include "../../../../mm/gup_test.h"
+
++#ifdef __NR_userfaultfd
++
+ /* The unit test doesn't need a large or random size, make it 32MB for now */
+ #define UFFD_TEST_MEM_SIZE (32UL << 20)
+
+@@ -1558,3 +1559,14 @@ int main(int argc, char *argv[])
+ return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
+ }
+
++#else /* __NR_userfaultfd */
++
++#warning "missing __NR_userfaultfd definition"
++
++int main(void)
++{
++ printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
++ return KSFT_SKIP;
++}
++
++#endif /* __NR_userfaultfd */
smb311-failure-to-open-files-of-length-1040-when-mou.patch
x86-microcode-amd-add-some-forgotten-models-to-the-sha-check.patch
loongarch-use-asm_reachable.patch
+revert-of-reserved-memory-fix-using-wrong-number-of-cells-to-get-property-alignment.patch
+tracing-tprobe-events-fix-a-memory-leak-when-tprobe-with-retval.patch
+tracing-tprobe-events-reject-invalid-tracepoint-name.patch
+stmmac-loongson-pass-correct-arg-to-pci-function.patch
+loongarch-convert-unreachable-to-bug.patch
+loongarch-use-polling-play_dead-when-resuming-from-hibernation.patch
+loongarch-set-hugetlb-mmap-base-address-aligned-with-pmd-size.patch
+loongarch-set-max_pfn-with-the-pfn-of-the-last-page.patch
+loongarch-kvm-add-interrupt-checking-for-avec.patch
+loongarch-kvm-reload-guest-csr-registers-after-sleep.patch
+loongarch-kvm-fix-gpa-size-issue-about-vm.patch
+hid-appleir-fix-potential-null-dereference-at-raw-event-handle.patch
+hid-corsair-void-update-power-supply-values-with-a-unified-work-handler.patch
+ksmbd-fix-type-confusion-via-race-condition-when-using-ipc_msg_send_request.patch
+ksmbd-fix-out-of-bounds-in-parse_sec_desc.patch
+ksmbd-fix-use-after-free-in-smb2_lock.patch
+ksmbd-fix-bug-on-trap-in-smb2_lock.patch
+gpio-rcar-use-raw_spinlock-to-protect-register-access.patch
+gpio-aggregator-protect-driver-attr-handlers-against-module-unload.patch
+alsa-seq-avoid-module-auto-load-handling-at-event-delivery.patch
+alsa-hda-intel-add-dell-alc3271-to-power_save-denylist.patch
+alsa-hda-realtek-add-supported-mic-mute-led-for-lenovo-platform.patch
+alsa-hda-realtek-update-alc222-depop-optimize.patch
+btrfs-zoned-fix-extent-range-end-unlock-in-cow_file_range.patch
+btrfs-fix-a-leaked-chunk-map-issue-in-read_one_chunk.patch
+virt-sev-guest-allocate-request-data-dynamically.patch
+hwmon-peci-dimmtemp-do-not-provide-fake-thresholds-data.patch
+drm-amd-display-fix-null-check-for-pipe_ctx-plane_state-in-resource_build_scaling_params.patch
+drm-amdkfd-fix-null-pointer-dereference-in-kfd-queue.patch
+drm-amd-pm-always-allow-ih-interrupt-from-fw.patch
+drm-imagination-avoid-deadlock-on-fence-release.patch
+drm-imagination-hold-drm_gem_gpuva-lock-for-unmap.patch
+drm-imagination-only-init-job-done-fences-once.patch
+drm-radeon-fix-rs400_gpu_init-for-ati-mobility-radeon-xpress-200m.patch
+revert-mm-page_alloc.c-don-t-show-protection-in-zone-s-lowmem_reserve-for-empty-zone.patch
+revert-selftests-mm-remove-local-__nr_-definitions.patch
+platform-x86-thinkpad_acpi-add-battery-quirk-for-thinkpad-x131e.patch
+x86-boot-sanitize-boot-params-before-parsing-command-line.patch
+x86-cacheinfo-validate-cpuid-leaf-0x2-edx-output.patch
+x86-cpu-validate-cpuid-leaf-0x2-edx-output.patch
+x86-cpu-properly-parse-cpuid-leaf-0x2-tlb-descriptor-0x63.patch
+drm-xe-add-staging-tree-for-vm-binds.patch
+drm-xe-hmm-style-and-include-fixes.patch
+drm-xe-hmm-don-t-dereference-struct-page-pointers-without-notifier-lock.patch
+drm-xe-vm-fix-a-misplaced-endif.patch
+drm-xe-vm-validate-userptr-during-gpu-vma-prefetching.patch
+mptcp-fix-scheduling-while-atomic-in-mptcp_pm_nl_append_new_local_addr.patch
+drm-xe-fix-gt-for-each-engine-workarounds.patch
+drm-xe-fix-fault-mode-invalidation-with-unbind.patch
+drm-xe-userptr-properly-setup-pfn_flags_mask.patch
+drm-xe-userptr-unmap-userptrs-in-the-mmu-notifier.patch
--- /dev/null
+From 00371a3f48775967950c2fe3ec97b7c786ca956d Mon Sep 17 00:00:00 2001
+From: Philipp Stanner <phasta@kernel.org>
+Date: Wed, 26 Feb 2025 09:52:05 +0100
+Subject: stmmac: loongson: Pass correct arg to PCI function
+
+From: Philipp Stanner <phasta@kernel.org>
+
+commit 00371a3f48775967950c2fe3ec97b7c786ca956d upstream.
+
+pcim_iomap_regions() should receive the driver's name as its third
+parameter, not the PCI device's name.
+
+Define the driver name with a macro and use it at the appropriate
+places, including pcim_iomap_regions().
+
+Cc: stable@vger.kernel.org # v5.14+
+Fixes: 30bba69d7db4 ("stmmac: pci: Add dwmac support for Loongson")
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Yanteng Si <si.yanteng@linux.dev>
+Tested-by: Henry Chen <chenx97@aosc.io>
+Link: https://patch.msgid.link/20250226085208.97891-2-phasta@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+@@ -11,6 +11,8 @@
+ #include "dwmac_dma.h"
+ #include "dwmac1000.h"
+
++#define DRIVER_NAME "dwmac-loongson-pci"
++
+ /* Normal Loongson Tx Summary */
+ #define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
+ /* Normal Loongson Rx Summary */
+@@ -568,7 +570,7 @@ static int loongson_dwmac_probe(struct p
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
++ ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
+ if (ret)
+ goto err_disable_device;
+ break;
+@@ -687,7 +689,7 @@ static const struct pci_device_id loongs
+ MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
+
+ static struct pci_driver loongson_dwmac_driver = {
+- .name = "dwmac-loongson-pci",
++ .name = DRIVER_NAME,
+ .id_table = loongson_dwmac_id_table,
+ .probe = loongson_dwmac_probe,
+ .remove = loongson_dwmac_remove,
--- /dev/null
+From ac965d7d88fc36fb42e3d50225c0a44dd8326da4 Mon Sep 17 00:00:00 2001
+From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
+Date: Wed, 26 Feb 2025 15:18:46 +0900
+Subject: tracing: tprobe-events: Fix a memory leak when tprobe with $retval
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+commit ac965d7d88fc36fb42e3d50225c0a44dd8326da4 upstream.
+
+Fix a memory leak when a tprobe is defined with $retval. This
+combination is not allowed, but the parse_symbol_and_return() does
+not free the *symbol which should not be used if it returns the error.
+Thus, it leaks the *symbol memory in that error path.
+
+Link: https://lore.kernel.org/all/174055072650.4079315.3063014346697447838.stgit@mhiramat.tok.corp.google.com/
+
+Fixes: ce51e6153f77 ("tracing: fprobe-event: Fix to check tracepoint event and return")
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_fprobe.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -1025,6 +1025,8 @@ static int parse_symbol_and_return(int a
+ if (is_tracepoint) {
+ trace_probe_log_set_index(i);
+ trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
++ kfree(*symbol);
++ *symbol = NULL;
+ return -EINVAL;
+ }
+ *is_return = true;
--- /dev/null
+From d0453655b6ddc685a4837f3cc0776ae8eef62d01 Mon Sep 17 00:00:00 2001
+From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
+Date: Wed, 26 Feb 2025 15:18:54 +0900
+Subject: tracing: tprobe-events: Reject invalid tracepoint name
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+commit d0453655b6ddc685a4837f3cc0776ae8eef62d01 upstream.
+
+Commit 57a7e6de9e30 ("tracing/fprobe: Support raw tracepoints on
+future loaded modules") allows user to set a tprobe on non-exist
+tracepoint but it does not check the tracepoint name is acceptable.
+So it leads tprobe has a wrong character for events (e.g. with
+subsystem prefix). In this case, the event is not shown in the
+events directory.
+
+Reject such invalid tracepoint name.
+
+The tracepoint name must consist of alphabet or digit or '_'.
+
+Link: https://lore.kernel.org/all/174055073461.4079315.15875502830565214255.stgit@mhiramat.tok.corp.google.com/
+
+Fixes: 57a7e6de9e30 ("tracing/fprobe: Support raw tracepoints on future loaded modules")
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_fprobe.c | 13 +++++++++++++
+ kernel/trace/trace_probe.h | 1 +
+ 2 files changed, 14 insertions(+)
+
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -1018,6 +1018,19 @@ static int parse_symbol_and_return(int a
+ if (*is_return)
+ return 0;
+
++ if (is_tracepoint) {
++ tmp = *symbol;
++ while (*tmp && (isalnum(*tmp) || *tmp == '_'))
++ tmp++;
++ if (*tmp) {
++ /* find a wrong character. */
++ trace_probe_log_err(tmp - *symbol, BAD_TP_NAME);
++ kfree(*symbol);
++ *symbol = NULL;
++ return -EINVAL;
++ }
++ }
++
+ /* If there is $retval, this should be a return fprobe. */
+ for (i = 2; i < argc; i++) {
+ tmp = strstr(argv[i], "$retval");
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -481,6 +481,7 @@ extern int traceprobe_define_arg_fields(
+ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
+ C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
+ C(NO_TRACEPOINT, "Tracepoint is not found"), \
++ C(BAD_TP_NAME, "Invalid character in tracepoint name"),\
+ C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
+ C(NO_GROUP_NAME, "Group name is not specified"), \
+ C(GROUP_TOO_LONG, "Group name is too long"), \
--- /dev/null
+From ac7c06acaa3738b38e83815ac0f07140ad320f13 Mon Sep 17 00:00:00 2001
+From: Nikunj A Dadhania <nikunj@amd.com>
+Date: Thu, 6 Mar 2025 19:17:21 +1100
+Subject: virt: sev-guest: Allocate request data dynamically
+
+From: Nikunj A Dadhania <nikunj@amd.com>
+
+commit ac7c06acaa3738b38e83815ac0f07140ad320f13 upstream.
+
+Commit
+
+ ae596615d93d ("virt: sev-guest: Reduce the scope of SNP command mutex")
+
+narrowed the command mutex scope to snp_send_guest_request(). However,
+GET_REPORT, GET_DERIVED_KEY, and GET_EXT_REPORT share the req structure in
+snp_guest_dev. Without the mutex protection, concurrent requests can overwrite
+each other's data. Fix it by dynamically allocating the request structure.
+
+Fixes: ae596615d93d ("virt: sev-guest: Reduce the scope of SNP command mutex")
+Closes: https://github.com/AMDESE/AMDSEV/issues/265
+Reported-by: andreas.stuehrk@yaxi.tech
+Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
+Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20250307013700.437505-2-aik@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/virt/coco/sev-guest/sev-guest.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -41,12 +41,6 @@ struct snp_guest_dev {
+ struct miscdevice misc;
+
+ struct snp_msg_desc *msg_desc;
+-
+- union {
+- struct snp_report_req report;
+- struct snp_derived_key_req derived_key;
+- struct snp_ext_report_req ext_report;
+- } req;
+ };
+
+ /*
+@@ -390,7 +384,7 @@ struct snp_req_resp {
+
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+- struct snp_report_req *report_req = &snp_dev->req.report;
++ struct snp_report_req *report_req __free(kfree) = NULL;
+ struct snp_msg_desc *mdesc = snp_dev->msg_desc;
+ struct snp_report_resp *report_resp;
+ struct snp_guest_req req = {};
+@@ -399,6 +393,10 @@ static int get_report(struct snp_guest_d
+ if (!arg->req_data || !arg->resp_data)
+ return -EINVAL;
+
++ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
++ if (!report_req)
++ return -ENOMEM;
++
+ if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
+ return -EFAULT;
+
+@@ -435,7 +433,7 @@ e_free:
+
+ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+ {
+- struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
++ struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
+ struct snp_derived_key_resp derived_key_resp = {0};
+ struct snp_msg_desc *mdesc = snp_dev->msg_desc;
+ struct snp_guest_req req = {};
+@@ -455,6 +453,10 @@ static int get_derived_key(struct snp_gu
+ if (sizeof(buf) < resp_len)
+ return -ENOMEM;
+
++ derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
++ if (!derived_key_req)
++ return -ENOMEM;
++
+ if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
+ sizeof(*derived_key_req)))
+ return -EFAULT;
+@@ -487,7 +489,7 @@ static int get_ext_report(struct snp_gue
+ struct snp_req_resp *io)
+
+ {
+- struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
++ struct snp_ext_report_req *report_req __free(kfree) = NULL;
+ struct snp_msg_desc *mdesc = snp_dev->msg_desc;
+ struct snp_report_resp *report_resp;
+ struct snp_guest_req req = {};
+@@ -497,6 +499,10 @@ static int get_ext_report(struct snp_gue
+ if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
+ return -EINVAL;
+
++ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
++ if (!report_req)
++ return -ENOMEM;
++
+ if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
+ return -EFAULT;
+
--- /dev/null
+From c00b413a96261faef4ce22329153c6abd4acef25 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Thu, 6 Mar 2025 16:59:16 +0100
+Subject: x86/boot: Sanitize boot params before parsing command line
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit c00b413a96261faef4ce22329153c6abd4acef25 upstream.
+
+The 5-level paging code parses the command line to look for the 'no5lvl'
+string, and does so very early, before sanitize_boot_params() has been
+called and has been given the opportunity to wipe bogus data from the
+fields in boot_params that are not covered by struct setup_header, and
+are therefore supposed to be initialized to zero by the bootloader.
+
+This triggers an early boot crash when using syslinux-efi to boot a
+recent kernel built with CONFIG_X86_5LEVEL=y and CONFIG_EFI_STUB=n, as
+the 0xff padding that now fills the unused PE/COFF header is copied into
+boot_params by the bootloader, and interpreted as the top half of the
+command line pointer.
+
+Fix this by sanitizing the boot_params before use. Note that there is no
+harm in calling this more than once; subsequent invocations are able to
+spot that the boot_params have already been cleaned up.
+
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: <stable@vger.kernel.org> # v6.1+
+Link: https://lore.kernel.org/r/20250306155915.342465-2-ardb+git@google.com
+Closes: https://lore.kernel.org/all/202503041549.35913.ulrich.gemkow@ikr.uni-stuttgart.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/pgtable_64.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "misc.h"
+ #include <asm/bootparam.h>
++#include <asm/bootparam_utils.h>
+ #include <asm/e820/types.h>
+ #include <asm/processor.h>
+ #include "pgtable.h"
+@@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(
+ bool l5_required = false;
+
+ /* Initialize boot_params. Required for cmdline_find_option_bool(). */
++ sanitize_boot_params(bp);
+ boot_params_ptr = bp;
+
+ /*
--- /dev/null
+From 8177c6bedb7013cf736137da586cf783922309dd Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <darwi@linutronix.de>
+Date: Tue, 4 Mar 2025 09:51:12 +0100
+Subject: x86/cacheinfo: Validate CPUID leaf 0x2 EDX output
+
+From: Ahmed S. Darwish <darwi@linutronix.de>
+
+commit 8177c6bedb7013cf736137da586cf783922309dd upstream.
+
+CPUID leaf 0x2 emits one-byte descriptors in its four output registers
+EAX, EBX, ECX, and EDX. For these descriptors to be valid, the most
+significant bit (MSB) of each register must be clear.
+
+The historical Git commit:
+
+ 019361a20f016 ("- pre6: Intel: start to add Pentium IV specific stuff (128-byte cacheline etc)...")
+
+introduced leaf 0x2 output parsing. It only validated the MSBs of EAX,
+EBX, and ECX, but left EDX unchecked.
+
+Validate EDX's most-significant bit.
+
+Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20250304085152.51092-2-darwi@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/cacheinfo.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/cacheinfo.c
++++ b/arch/x86/kernel/cpu/cacheinfo.c
+@@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo
+ cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+- for (j = 0 ; j < 3 ; j++)
++ for (j = 0 ; j < 4 ; j++)
+ if (regs[j] & (1 << 31))
+ regs[j] = 0;
+
--- /dev/null
+From f6bdaab79ee4228a143ee1b4cb80416d6ffc0c63 Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <darwi@linutronix.de>
+Date: Tue, 4 Mar 2025 09:51:14 +0100
+Subject: x86/cpu: Properly parse CPUID leaf 0x2 TLB descriptor 0x63
+
+From: Ahmed S. Darwish <darwi@linutronix.de>
+
+commit f6bdaab79ee4228a143ee1b4cb80416d6ffc0c63 upstream.
+
+CPUID leaf 0x2's one-byte TLB descriptors report the number of entries
+for specific TLB types, among other properties.
+
+Typically, each emitted descriptor implies the same number of entries
+for its respective TLB type(s). An emitted 0x63 descriptor is an
+exception: it implies 4 data TLB entries for 1GB pages and 32 data TLB
+entries for 2MB or 4MB pages.
+
+For the TLB descriptors parsing code, the entry count for 1GB pages is
+encoded at the intel_tlb_table[] mapping, but the 2MB/4MB entry count is
+totally ignored.
+
+Update leaf 0x2's parsing logic 0x2 to account for 32 data TLB entries
+for 2MB/4MB pages implied by the 0x63 descriptor.
+
+Fixes: e0ba94f14f74 ("x86/tlb_info: get last level TLB entry number of CPU")
+Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20250304085152.51092-4-darwi@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c | 60 ++++++++++++++++++++++++++++----------------
+ 1 file changed, 39 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -640,26 +640,37 @@ static unsigned int intel_size_cache(str
+ }
+ #endif
+
+-#define TLB_INST_4K 0x01
+-#define TLB_INST_4M 0x02
+-#define TLB_INST_2M_4M 0x03
+-
+-#define TLB_INST_ALL 0x05
+-#define TLB_INST_1G 0x06
+-
+-#define TLB_DATA_4K 0x11
+-#define TLB_DATA_4M 0x12
+-#define TLB_DATA_2M_4M 0x13
+-#define TLB_DATA_4K_4M 0x14
+-
+-#define TLB_DATA_1G 0x16
+-
+-#define TLB_DATA0_4K 0x21
+-#define TLB_DATA0_4M 0x22
+-#define TLB_DATA0_2M_4M 0x23
+-
+-#define STLB_4K 0x41
+-#define STLB_4K_2M 0x42
++#define TLB_INST_4K 0x01
++#define TLB_INST_4M 0x02
++#define TLB_INST_2M_4M 0x03
++
++#define TLB_INST_ALL 0x05
++#define TLB_INST_1G 0x06
++
++#define TLB_DATA_4K 0x11
++#define TLB_DATA_4M 0x12
++#define TLB_DATA_2M_4M 0x13
++#define TLB_DATA_4K_4M 0x14
++
++#define TLB_DATA_1G 0x16
++#define TLB_DATA_1G_2M_4M 0x17
++
++#define TLB_DATA0_4K 0x21
++#define TLB_DATA0_4M 0x22
++#define TLB_DATA0_2M_4M 0x23
++
++#define STLB_4K 0x41
++#define STLB_4K_2M 0x42
++
++/*
++ * All of leaf 0x2's one-byte TLB descriptors implies the same number of
++ * entries for their respective TLB types. The 0x63 descriptor is an
++ * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
++ * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
++ * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
++ * intel_tlb_table[] mapping.
++ */
++#define TLB_0x63_2M_4M_ENTRIES 32
+
+ static const struct _tlb_table intel_tlb_table[] = {
+ { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
+@@ -681,7 +692,8 @@ static const struct _tlb_table intel_tlb
+ { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
+- { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
++ { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
++ " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
+ { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
+ { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
+ { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
+@@ -781,6 +793,12 @@ static void intel_tlb_lookup(const unsig
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
++ case TLB_DATA_1G_2M_4M:
++ if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
++ tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
++ if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
++ tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
++ fallthrough;
+ case TLB_DATA_1G:
+ if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
--- /dev/null
+From 1881148215c67151b146450fb89ec22fd92337a7 Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <darwi@linutronix.de>
+Date: Tue, 4 Mar 2025 09:51:13 +0100
+Subject: x86/cpu: Validate CPUID leaf 0x2 EDX output
+
+From: Ahmed S. Darwish <darwi@linutronix.de>
+
+commit 1881148215c67151b146450fb89ec22fd92337a7 upstream.
+
+CPUID leaf 0x2 emits one-byte descriptors in its four output registers
+EAX, EBX, ECX, and EDX. For these descriptors to be valid, the most
+significant bit (MSB) of each register must be clear.
+
+Leaf 0x2 parsing at intel.c only validated the MSBs of EAX, EBX, and
+ECX, but left EDX unchecked.
+
+Validate EDX's most-significant bit as well.
+
+Fixes: e0ba94f14f74 ("x86/tlb_info: get last level TLB entry number of CPU")
+Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@kernel.org
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/r/20250304085152.51092-3-darwi@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/intel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -804,7 +804,7 @@ static void intel_detect_tlb(struct cpui
+ cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
+
+ /* If bit 31 is set, this is an unknown format */
+- for (j = 0 ; j < 3 ; j++)
++ for (j = 0 ; j < 4 ; j++)
+ if (regs[j] & (1 << 31))
+ regs[j] = 0;
+