--- /dev/null
+From 3c0b6f924e1259ade38587ea719b693f6f6f2f3e Mon Sep 17 00:00:00 2001
+From: Mavroudis Chatzilazaridis <mavchatz@protonmail.com>
+Date: Sun, 28 Jul 2024 12:36:04 +0000
+Subject: ALSA: hda/realtek: Add quirk for Acer Aspire E5-574G
+
+From: Mavroudis Chatzilazaridis <mavchatz@protonmail.com>
+
+commit 3c0b6f924e1259ade38587ea719b693f6f6f2f3e upstream.
+
+ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST fixes combo jack detection and
+limits the internal microphone boost that causes clipping on this model.
+
+Signed-off-by: Mavroudis Chatzilazaridis <mavchatz@protonmail.com>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20240728123601.144017-1-mavchatz@protonmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9866,6 +9866,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
++ SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
--- /dev/null
+From 952b13c215234855d75ef4b5bb0138075e73677c Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 26 Jul 2024 16:34:54 +0200
+Subject: ALSA: seq: ump: Optimize conversions from SysEx to UMP
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 952b13c215234855d75ef4b5bb0138075e73677c upstream.
+
+The current conversion from the legacy SysEx event to UMP SysEx packet
+in the sequencer core has a couple of issues:
+
+* The first packet trims the SysEx start byte (0xf0), hence it
+ contains only 5 bytes instead of 6. This isn't wrong, per
+ specification, but it's strange not to fill 6 bytes.
+
+* When the SysEx end marker (0xf7) is placed at the first byte of the
+ next packet, it'll end up with an empty data just with the END
+ status. It can be rather folded into the previous packet with the
+ END status.
+
+This patch tries to address those issues. The first packet may have 6
+bytes even with the SysEx start, and an empty packet with the SysEx
+end marker is omitted.
+
+Fixes: e9e02819a98a ("ALSA: seq: Automatic conversion of UMP events")
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20240726143455.3254-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/seq/seq_ump_convert.c | 37 +++++++++++++++++++++++--------------
+ 1 file changed, 23 insertions(+), 14 deletions(-)
+
+--- a/sound/core/seq/seq_ump_convert.c
++++ b/sound/core/seq/seq_ump_convert.c
+@@ -1192,44 +1192,53 @@ static int cvt_sysex_to_ump(struct snd_s
+ {
+ struct snd_seq_ump_event ev_cvt;
+ unsigned char status;
+- u8 buf[6], *xbuf;
++ u8 buf[8], *xbuf;
+ int offset = 0;
+ int len, err;
++ bool finished = false;
+
+ if (!snd_seq_ev_is_variable(event))
+ return 0;
+
+ setup_ump_event(&ev_cvt, event);
+- for (;;) {
++ while (!finished) {
+ len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset);
+ if (len <= 0)
+ break;
+- if (WARN_ON(len > 6))
++ if (WARN_ON(len > sizeof(buf)))
+ break;
+- offset += len;
++
+ xbuf = buf;
++ status = UMP_SYSEX_STATUS_CONTINUE;
++ /* truncate the sysex start-marker */
+ if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) {
+ status = UMP_SYSEX_STATUS_START;
+- xbuf++;
+ len--;
+- if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ offset++;
++ xbuf++;
++ }
++
++ /* if the last of this packet or the 1st byte of the next packet
++ * is the end-marker, finish the transfer with this packet
++ */
++ if (len > 0 && len < 8 &&
++ xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ if (status == UMP_SYSEX_STATUS_START)
+ status = UMP_SYSEX_STATUS_SINGLE;
+- len--;
+- }
+- } else {
+- if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
++ else
+ status = UMP_SYSEX_STATUS_END;
+- len--;
+- } else {
+- status = UMP_SYSEX_STATUS_CONTINUE;
+- }
++ len--;
++ finished = true;
+ }
++
++ len = min(len, 6);
+ fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len);
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
++ offset += len;
+ }
+ return 0;
+ }
--- /dev/null
+From b7b7e1ab7619deb3b299b5e5c619c3e6f183a12d Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Wed, 31 Jul 2024 16:19:41 +0200
+Subject: ALSA: usb-audio: Correct surround channels in UAC1 channel map
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit b7b7e1ab7619deb3b299b5e5c619c3e6f183a12d upstream.
+
+USB-audio driver puts SNDRV_CHMAP_SL and _SR as left and right
+surround channels for UAC1 channel map, respectively. But they should
+have been SNDRV_CHMAP_RL and _RR; the current value *_SL and _SR are
+rather "side" channels, not "surround". I guess I took those
+mistakenly when I read the spec mentioning "surround left".
+
+This patch corrects those entries to be the right channels.
+
+Suggested-by: Sylvain BERTRAND <sylvain.bertrand@legeek.net>
+Closes: https://lore.kernel.orgZ/qIyJD8lhd8hFhlC@freedom
+Fixes: 04324ccc75f9 ("ALSA: usb-audio: add channel map support")
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20240731142018.24750-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/usb/stream.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *conver
+ SNDRV_CHMAP_FR, /* right front */
+ SNDRV_CHMAP_FC, /* center front */
+ SNDRV_CHMAP_LFE, /* LFE */
+- SNDRV_CHMAP_SL, /* left surround */
+- SNDRV_CHMAP_SR, /* right surround */
++ SNDRV_CHMAP_RL, /* left surround */
++ SNDRV_CHMAP_RR, /* right surround */
+ SNDRV_CHMAP_FLC, /* left of center */
+ SNDRV_CHMAP_FRC, /* right of center */
+ SNDRV_CHMAP_RC, /* surround */
--- /dev/null
+From df3d6a3e01fd82cb74b6bb309f7be71e728a3448 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Thu, 25 Jul 2024 18:28:08 -0400
+Subject: Bluetooth: hci_event: Fix setting DISCOVERY_FINDING for passive scanning
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+commit df3d6a3e01fd82cb74b6bb309f7be71e728a3448 upstream.
+
+DISCOVERY_FINDING shall only be set for active scanning as passive
+scanning is not meant to generate MGMT Device Found events causing
+discovering state to go out of sync since userspace would believe it
+is discovering when in fact it is just passive scanning.
+
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=219088
+Fixes: 2e2515c1ba38 ("Bluetooth: hci_event: Set DISCOVERY_FINDING on SCAN_ENABLED")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/hci_core.c | 7 -------
+ net/bluetooth/hci_event.c | 5 +++--
+ 2 files changed, 3 insertions(+), 9 deletions(-)
+
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -120,13 +120,6 @@ void hci_discovery_set_state(struct hci_
+ case DISCOVERY_STARTING:
+ break;
+ case DISCOVERY_FINDING:
+- /* If discovery was not started then it was initiated by the
+- * MGMT interface so no MGMT event shall be generated either
+- */
+- if (old_state != DISCOVERY_STARTING) {
+- hdev->discovery.state = old_state;
+- return;
+- }
+ mgmt_discovering(hdev, 1);
+ break;
+ case DISCOVERY_RESOLVING:
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1722,9 +1722,10 @@ static void le_set_scan_enable_complete(
+ switch (enable) {
+ case LE_SCAN_ENABLE:
+ hci_dev_set_flag(hdev, HCI_LE_SCAN);
+- if (hdev->le_scan_type == LE_SCAN_ACTIVE)
++ if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
+ clear_pending_adv_report(hdev);
+- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
++ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
++ }
+ break;
+
+ case LE_SCAN_DISABLE:
--- /dev/null
+From d89c285d28491d8f10534c262ac9e6bdcbe1b4d2 Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Thu, 11 Jul 2024 23:50:58 +0900
+Subject: btrfs: do not subtract delalloc from avail bytes
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit d89c285d28491d8f10534c262ac9e6bdcbe1b4d2 upstream.
+
+The block group's avail bytes printed when dumping a space info subtract
+the delalloc_bytes. However, as shown in btrfs_add_reserved_bytes() and
+btrfs_free_reserved_bytes(), it is added or subtracted along with
+"reserved" for the delalloc case, which means the "delalloc_bytes" is a
+part of the "reserved" bytes. So, excluding it to calculate the avail space
+counts delalloc_bytes twice, which can lead to an invalid result.
+
+Fixes: e50b122b832b ("btrfs: print available space for a block group when dumping a space info")
+CC: stable@vger.kernel.org # 6.6+
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/space-info.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -573,8 +573,7 @@ again:
+
+ spin_lock(&cache->lock);
+ avail = cache->length - cache->used - cache->pinned -
+- cache->reserved - cache->delalloc_bytes -
+- cache->bytes_super - cache->zone_unusable;
++ cache->reserved - cache->bytes_super - cache->zone_unusable;
+ btrfs_info(fs_info,
+ "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
+ cache->start, cache->length, cache->used, cache->pinned,
--- /dev/null
+From 478574370bef7951fbd9ef5155537d6cbed49472 Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Mon, 22 Jul 2024 16:49:45 -0700
+Subject: btrfs: make cow_file_range_inline() honor locked_page on error
+
+From: Boris Burkov <boris@bur.io>
+
+commit 478574370bef7951fbd9ef5155537d6cbed49472 upstream.
+
+The btrfs buffered write path runs through __extent_writepage() which
+has some tricky return value handling for writepage_delalloc().
+Specifically, when that returns 1, we exit, but for other return values
+we continue and end up calling btrfs_folio_end_all_writers(). If the
+folio has been unlocked (note that we check the PageLocked bit at the
+start of __extent_writepage()), this results in an assert panic like
+this one from syzbot:
+
+ BTRFS: error (device loop0 state EAL) in free_log_tree:3267: errno=-5 IO failure
+ BTRFS warning (device loop0 state EAL): Skipping commit of aborted transaction.
+ BTRFS: error (device loop0 state EAL) in cleanup_transaction:2018: errno=-5 IO failure
+ assertion failed: folio_test_locked(folio), in fs/btrfs/subpage.c:871
+ ------------[ cut here ]------------
+ kernel BUG at fs/btrfs/subpage.c:871!
+ Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI
+ CPU: 1 PID: 5090 Comm: syz-executor225 Not tainted
+ 6.10.0-syzkaller-05505-gb1bc554e009e #0
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+ Google 06/27/2024
+ RIP: 0010:btrfs_folio_end_all_writers+0x55b/0x610 fs/btrfs/subpage.c:871
+ Code: e9 d3 fb ff ff e8 25 22 c2 fd 48 c7 c7 c0 3c 0e 8c 48 c7 c6 80 3d
+ 0e 8c 48 c7 c2 60 3c 0e 8c b9 67 03 00 00 e8 66 47 ad 07 90 <0f> 0b e8
+ 6e 45 b0 07 4c 89 ff be 08 00 00 00 e8 21 12 25 fe 4c 89
+ RSP: 0018:ffffc900033d72e0 EFLAGS: 00010246
+ RAX: 0000000000000045 RBX: 00fff0000000402c RCX: 663b7a08c50a0a00
+ RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000
+ RBP: ffffc900033d73b0 R08: ffffffff8176b98c R09: 1ffff9200067adfc
+ R10: dffffc0000000000 R11: fffff5200067adfd R12: 0000000000000001
+ R13: dffffc0000000000 R14: 0000000000000000 R15: ffffea0001cbee80
+ FS: 0000000000000000(0000) GS:ffff8880b9500000(0000)
+ knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f5f076012f8 CR3: 000000000e134000 CR4: 00000000003506f0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ <TASK>
+ __extent_writepage fs/btrfs/extent_io.c:1597 [inline]
+ extent_write_cache_pages fs/btrfs/extent_io.c:2251 [inline]
+ btrfs_writepages+0x14d7/0x2760 fs/btrfs/extent_io.c:2373
+ do_writepages+0x359/0x870 mm/page-writeback.c:2656
+ filemap_fdatawrite_wbc+0x125/0x180 mm/filemap.c:397
+ __filemap_fdatawrite_range mm/filemap.c:430 [inline]
+ __filemap_fdatawrite mm/filemap.c:436 [inline]
+ filemap_flush+0xdf/0x130 mm/filemap.c:463
+ btrfs_release_file+0x117/0x130 fs/btrfs/file.c:1547
+ __fput+0x24a/0x8a0 fs/file_table.c:422
+ task_work_run+0x24f/0x310 kernel/task_work.c:222
+ exit_task_work include/linux/task_work.h:40 [inline]
+ do_exit+0xa2f/0x27f0 kernel/exit.c:877
+ do_group_exit+0x207/0x2c0 kernel/exit.c:1026
+ __do_sys_exit_group kernel/exit.c:1037 [inline]
+ __se_sys_exit_group kernel/exit.c:1035 [inline]
+ __x64_sys_exit_group+0x3f/0x40 kernel/exit.c:1035
+ x64_sys_call+0x2634/0x2640
+ arch/x86/include/generated/asm/syscalls_64.h:232
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ RIP: 0033:0x7f5f075b70c9
+ Code: Unable to access opcode bytes at
+ 0x7f5f075b709f.
+
+I was hitting the same issue by doing hundreds of accelerated runs of
+generic/475, which also hits IO errors by design.
+
+I instrumented that reproducer with bpftrace and found that the
+undesirable folio_unlock was coming from the following callstack:
+
+ folio_unlock+5
+ __process_pages_contig+475
+ cow_file_range_inline.constprop.0+230
+ cow_file_range+803
+ btrfs_run_delalloc_range+566
+ writepage_delalloc+332
+ __extent_writepage # inlined in my stacktrace, but I added it here
+ extent_write_cache_pages+622
+
+Looking at the bisected-to patch in the syzbot report, Josef realized
+that the logic of the cow_file_range_inline error path subtly changing.
+In the past, on error, it jumped to out_unlock in cow_file_range(),
+which honors the locked_page, so when we ultimately call
+folio_end_all_writers(), the folio of interest is still locked. After
+the change, we always unlocked ignoring the locked_page, on both success
+and error. On the success path, this all results in returning 1 to
+__extent_writepage(), which skips the folio_end_all_writers() call,
+which makes it OK to have unlocked.
+
+Fix the bug by wiring the locked_page into cow_file_range_inline() and
+only setting locked_page to NULL on success.
+
+Reported-by: syzbot+a14d8ac9af3a2a4fd0c8@syzkaller.appspotmail.com
+Fixes: 0586d0a89e77 ("btrfs: move extent bit and page cleanup into cow_file_range_inline")
+CC: stable@vger.kernel.org # 6.10+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -737,8 +737,9 @@ out:
+ return ret;
+ }
+
+-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
+- u64 end,
++static noinline int cow_file_range_inline(struct btrfs_inode *inode,
++ struct page *locked_page,
++ u64 offset, u64 end,
+ size_t compressed_size,
+ int compress_type,
+ struct folio *compressed_folio,
+@@ -762,7 +763,10 @@ static noinline int cow_file_range_inlin
+ return ret;
+ }
+
+- extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
++ if (ret == 0)
++ locked_page = NULL;
++
++ extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
+ clear_flags,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+@@ -1037,10 +1041,10 @@ again:
+ * extent for the subpage case.
+ */
+ if (total_in < actual_end)
+- ret = cow_file_range_inline(inode, start, end, 0,
++ ret = cow_file_range_inline(inode, NULL, start, end, 0,
+ BTRFS_COMPRESS_NONE, NULL, false);
+ else
+- ret = cow_file_range_inline(inode, start, end, total_compressed,
++ ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
+ compress_type, folios[0], false);
+ if (ret <= 0) {
+ if (ret < 0)
+@@ -1359,7 +1363,7 @@ static noinline int cow_file_range(struc
+
+ if (!no_inline) {
+ /* lets try to make an inline extent */
+- ret = cow_file_range_inline(inode, start, end, 0,
++ ret = cow_file_range_inline(inode, locked_page, start, end, 0,
+ BTRFS_COMPRESS_NONE, NULL, false);
+ if (ret <= 0) {
+ /*
--- /dev/null
+From 8cd44dd1d17a23d5cc8c443c659ca57aa76e2fa5 Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Wed, 15 Feb 2023 09:18:02 +0900
+Subject: btrfs: zoned: fix zone_unusable accounting on making block group read-write again
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit 8cd44dd1d17a23d5cc8c443c659ca57aa76e2fa5 upstream.
+
+When btrfs makes a block group read-only, it adds all free regions in the
+block group to space_info->bytes_readonly. That free space excludes
+reserved and pinned regions. OTOH, when btrfs makes the block group
+read-write again, it moves all the unused regions into the block group's
+zone_unusable. That unused region includes reserved and pinned regions.
+As a result, it counts too much zone_unusable bytes.
+
+Fortunately (or unfortunately), having erroneous zone_unusable does not
+affect the calculation of space_info->bytes_readonly, because free
+space (num_bytes in btrfs_dec_block_group_ro) calculation is done based on
+the erroneous zone_unusable and it reduces the num_bytes just to cancel the
+error.
+
+This behavior can be easily discovered by adding a WARN_ON to check e.g,
+"bg->pinned > 0" in btrfs_dec_block_group_ro(), and running fstests test
+case like btrfs/282.
+
+Fix it by properly considering pinned and reserved in
+btrfs_dec_block_group_ro(). Also, add a WARN_ON and introduce
+btrfs_space_info_update_bytes_zone_unusable() to catch a similar mistake.
+
+Fixes: 169e0da91a21 ("btrfs: zoned: track unusable bytes for zones")
+CC: stable@vger.kernel.org # 5.15+
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/block-group.c | 13 ++++++++-----
+ fs/btrfs/extent-tree.c | 3 ++-
+ fs/btrfs/free-space-cache.c | 4 +++-
+ fs/btrfs/space-info.c | 2 +-
+ fs/btrfs/space-info.h | 1 +
+ include/trace/events/btrfs.h | 8 ++++++++
+ 6 files changed, 23 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1216,8 +1216,8 @@ int btrfs_remove_block_group(struct btrf
+ block_group->space_info->total_bytes -= block_group->length;
+ block_group->space_info->bytes_readonly -=
+ (block_group->length - block_group->zone_unusable);
+- block_group->space_info->bytes_zone_unusable -=
+- block_group->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
++ -block_group->zone_unusable);
+ block_group->space_info->disk_total -= block_group->length * factor;
+
+ spin_unlock(&block_group->space_info->lock);
+@@ -1389,7 +1389,8 @@ static int inc_block_group_ro(struct btr
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes to readonly */
+ sinfo->bytes_readonly += cache->zone_unusable;
+- sinfo->bytes_zone_unusable -= cache->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++ -cache->zone_unusable);
+ cache->zone_unusable = 0;
+ }
+ cache->ro++;
+@@ -3034,9 +3035,11 @@ void btrfs_dec_block_group_ro(struct btr
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes back */
+ cache->zone_unusable =
+- (cache->alloc_offset - cache->used) +
++ (cache->alloc_offset - cache->used - cache->pinned -
++ cache->reserved) +
+ (cache->length - cache->zone_capacity);
+- sinfo->bytes_zone_unusable += cache->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
++ cache->zone_unusable);
+ sinfo->bytes_readonly -= cache->zone_unusable;
+ }
+ num_bytes = cache->length - cache->reserved -
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2806,7 +2806,8 @@ static int unpin_extent_range(struct btr
+ readonly = true;
+ } else if (btrfs_is_zoned(fs_info)) {
+ /* Need reset before reusing in a zoned block group */
+- space_info->bytes_zone_unusable += len;
++ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
++ len);
+ readonly = true;
+ }
+ spin_unlock(&cache->lock);
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2723,8 +2723,10 @@ static int __btrfs_add_free_space_zoned(
+ * If the block group is read-only, we should account freed space into
+ * bytes_readonly.
+ */
+- if (!block_group->ro)
++ if (!block_group->ro) {
+ block_group->zone_unusable += to_unusable;
++ WARN_ON(block_group->zone_unusable > block_group->length);
++ }
+ spin_unlock(&ctl->tree_lock);
+ if (!used) {
+ spin_lock(&block_group->lock);
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -311,7 +311,7 @@ void btrfs_add_bg_to_space_info(struct b
+ found->bytes_used += block_group->used;
+ found->disk_used += block_group->used * factor;
+ found->bytes_readonly += block_group->bytes_super;
+- found->bytes_zone_unusable += block_group->zone_unusable;
++ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
+ if (block_group->length > 0)
+ found->full = 0;
+ btrfs_try_granting_tickets(info, found);
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -207,6 +207,7 @@ btrfs_space_info_update_##name(struct bt
+
+ DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
+ DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
++DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
+
+ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
+ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -2394,6 +2394,14 @@ DEFINE_EVENT(btrfs__space_info_update, u
+ TP_ARGS(fs_info, sinfo, old, diff)
+ );
+
++DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
++
++ TP_PROTO(const struct btrfs_fs_info *fs_info,
++ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
++
++ TP_ARGS(fs_info, sinfo, old, diff)
++);
++
+ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
+
+ TP_PROTO(const struct btrfs_raid_bio *rbio,
--- /dev/null
+From 31634d7597d8c57894b6c98eeefc9e58cf842993 Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Fri, 12 Jul 2024 12:40:19 +0800
+Subject: ceph: force sending a cap update msg back to MDS for revoke op
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit 31634d7597d8c57894b6c98eeefc9e58cf842993 upstream.
+
+If a client sends out a cap update dropping caps with the prior 'seq'
+just before an incoming cap revoke request, then the client may drop
+the revoke because it believes it's already released the requested
+capabilities.
+
+This causes the MDS to wait indefinitely for the client to respond
+to the revoke. It's therefore always a good idea to ack the cap
+revoke request with the bumped up 'seq'.
+
+Currently if the cap->issued equals to the newcaps the check_caps()
+will do nothing, we should force flush the caps.
+
+Cc: stable@vger.kernel.org
+Link: https://tracker.ceph.com/issues/61782
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 35 ++++++++++++++++++++++++-----------
+ fs/ceph/super.h | 7 ++++---
+ 2 files changed, 28 insertions(+), 14 deletions(-)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2016,6 +2016,8 @@ bool __ceph_should_report_size(struct ce
+ * CHECK_CAPS_AUTHONLY - we should only check the auth cap
+ * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
+ * further delay.
++ * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without
++ * further delay.
+ */
+ void ceph_check_caps(struct ceph_inode_info *ci, int flags)
+ {
+@@ -2097,7 +2099,7 @@ retry:
+ }
+
+ doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
+- "flushing %s issued %s revoking %s retain %s %s%s%s\n",
++ "flushing %s issued %s revoking %s retain %s %s%s%s%s\n",
+ inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
+ ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
+ ceph_cap_string(ci->i_flushing_caps),
+@@ -2105,7 +2107,8 @@ retry:
+ ceph_cap_string(retain),
+ (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
+ (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
+- (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
++ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "",
++ (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : "");
+
+ /*
+ * If we no longer need to hold onto old our caps, and we may
+@@ -2180,6 +2183,11 @@ retry:
+ queue_writeback = true;
+ }
+
++ if (flags & CHECK_CAPS_FLUSH_FORCE) {
++ doutc(cl, "force to flush caps\n");
++ goto ack;
++ }
++
+ if (cap == ci->i_auth_cap &&
+ (cap->issued & CEPH_CAP_FILE_WR)) {
+ /* request larger max_size from MDS? */
+@@ -3504,6 +3512,8 @@ static void handle_cap_grant(struct inod
+ bool queue_invalidate = false;
+ bool deleted_inode = false;
+ bool fill_inline = false;
++ bool revoke_wait = false;
++ int flags = 0;
+
+ /*
+ * If there is at least one crypto block then we'll trust
+@@ -3699,16 +3709,18 @@ static void handle_cap_grant(struct inod
+ ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
+ ceph_cap_string(revoking));
+ if (S_ISREG(inode->i_mode) &&
+- (revoking & used & CEPH_CAP_FILE_BUFFER))
++ (revoking & used & CEPH_CAP_FILE_BUFFER)) {
+ writeback = true; /* initiate writeback; will delay ack */
+- else if (queue_invalidate &&
++ revoke_wait = true;
++ } else if (queue_invalidate &&
+ revoking == CEPH_CAP_FILE_CACHE &&
+- (newcaps & CEPH_CAP_FILE_LAZYIO) == 0)
+- ; /* do nothing yet, invalidation will be queued */
+- else if (cap == ci->i_auth_cap)
++ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) {
++ revoke_wait = true; /* do nothing yet, invalidation will be queued */
++ } else if (cap == ci->i_auth_cap) {
+ check_caps = 1; /* check auth cap only */
+- else
++ } else {
+ check_caps = 2; /* check all caps */
++ }
+ /* If there is new caps, try to wake up the waiters */
+ if (~cap->issued & newcaps)
+ wake = true;
+@@ -3735,8 +3747,9 @@ static void handle_cap_grant(struct inod
+ BUG_ON(cap->issued & ~cap->implemented);
+
+ /* don't let check_caps skip sending a response to MDS for revoke msgs */
+- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
++ if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ cap->mds_wanted = 0;
++ flags |= CHECK_CAPS_FLUSH_FORCE;
+ if (cap == ci->i_auth_cap)
+ check_caps = 1; /* check auth cap only */
+ else
+@@ -3792,9 +3805,9 @@ static void handle_cap_grant(struct inod
+
+ mutex_unlock(&session->s_mutex);
+ if (check_caps == 1)
+- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
++ ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
+ else if (check_caps == 2)
+- ceph_check_caps(ci, CHECK_CAPS_NOINVAL);
++ ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
+ }
+
+ /*
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -200,9 +200,10 @@ struct ceph_cap {
+ struct list_head caps_item;
+ };
+
+-#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
+-#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
+-#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
++#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
++#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
++#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
++#define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */
+
+ struct ceph_cap_flush {
+ u64 tid;
--- /dev/null
+From f3572db3c049b4d32bb5ba77ad5305616c44c7c1 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 24 Jul 2024 09:24:02 +0200
+Subject: drm/amdgpu: fix contiguous handling for IB parsing v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+commit f3572db3c049b4d32bb5ba77ad5305616c44c7c1 upstream.
+
+Otherwise we won't get correct access to the IB.
+
+v2: keep setting AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS to avoid problems in
+ the VRAM backend.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3501
+Fixes: e362b7c8f8c7 ("drm/amdgpu: Modify the contiguous flags behaviour")
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Tested-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit fbfb5f0342253d92c4e446588c428a9d90c3f610)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1763,7 +1763,7 @@ int amdgpu_cs_find_mapping(struct amdgpu
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va_mapping *mapping;
+- int r;
++ int i, r;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+
+@@ -1778,13 +1778,13 @@ int amdgpu_cs_find_mapping(struct amdgpu
+ if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
+ return -EINVAL;
+
+- if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+- (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+- if (r)
+- return r;
+- }
++ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
++ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
++ for (i = 0; i < (*bo)->placement.num_placement; i++)
++ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
++ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
++ if (r)
++ return r;
+
+ return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ }
--- /dev/null
+From 0ce91928ec62d189b5c51816e325f02587b53118 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Wed, 17 Jul 2024 16:24:16 +0200
+Subject: drm/ast: astdp: Wake up during connector status detection
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 0ce91928ec62d189b5c51816e325f02587b53118 upstream.
+
+Power up the ASTDP connector for connection status detection if the
+connector is not active. Keep it powered if a display is attached.
+
+This fixes a bug where the connector does not come back after
+disconnecting the display. The encoder's atomic_disable turns off
+power on the physical connector. Further HPD reads will fail,
+thus preventing the driver from detecting re-connected displays.
+
+For connectors that are actively used, only test the HPD flag without
+touching power.
+
+Fixes: f81bb0ac7872 ("drm/ast: report connection status on Display Port.")
+Cc: Jocelyn Falempe <jfalempe@redhat.com>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.6+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240717143319.104012-2-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ast/ast_dp.c | 7 +++++++
+ drivers/gpu/drm/ast/ast_drv.h | 1 +
+ drivers/gpu/drm/ast/ast_mode.c | 29 +++++++++++++++++++++++++++--
+ 3 files changed, 35 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/ast/ast_dp.c
++++ b/drivers/gpu/drm/ast/ast_dp.c
+@@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *de
+ ASTDP_HOST_EDID_READ_DONE);
+ }
+
++bool ast_dp_power_is_on(struct ast_device *ast)
++{
++ u8 vgacre3;
++
++ vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
+
++ return !(vgacre3 & AST_DP_PHY_SLEEP);
++}
+
+ void ast_dp_power_on_off(struct drm_device *dev, bool on)
+ {
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *d
+ bool ast_astdp_is_connected(struct ast_device *ast);
+ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
+ void ast_dp_launch(struct drm_device *dev);
++bool ast_dp_power_is_on(struct ast_device *ast);
+ void ast_dp_power_on_off(struct drm_device *dev, bool no);
+ void ast_dp_set_on_off(struct drm_device *dev, bool no);
+ void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -28,6 +28,7 @@
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
++#include <linux/delay.h>
+ #include <linux/export.h>
+ #include <linux/pci.h>
+
+@@ -1641,11 +1642,35 @@ static int ast_astdp_connector_helper_de
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+ {
++ struct drm_device *dev = connector->dev;
+ struct ast_device *ast = to_ast_device(connector->dev);
++ enum drm_connector_status status = connector_status_disconnected;
++ struct drm_connector_state *connector_state = connector->state;
++ bool is_active = false;
++
++ mutex_lock(&ast->modeset_lock);
++
++ if (connector_state && connector_state->crtc) {
++ struct drm_crtc_state *crtc_state = connector_state->crtc->state;
++
++ if (crtc_state && crtc_state->active)
++ is_active = true;
++ }
++
++ if (!is_active && !ast_dp_power_is_on(ast)) {
++ ast_dp_power_on_off(dev, true);
++ msleep(50);
++ }
+
+ if (ast_astdp_is_connected(ast))
+- return connector_status_connected;
+- return connector_status_disconnected;
++ status = connector_status_connected;
++
++ if (!is_active && status == connector_status_disconnected)
++ ast_dp_power_on_off(dev, false);
++
++ mutex_unlock(&ast->modeset_lock);
++
++ return status;
+ }
+
+ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
--- /dev/null
+From 12c35c5582acb0fd8f7713ffa75f450766022ff1 Mon Sep 17 00:00:00 2001
+From: Jammy Huang <jammy_huang@aspeedtech.com>
+Date: Thu, 18 Jul 2024 11:03:52 +0800
+Subject: drm/ast: Fix black screen after resume
+
+From: Jammy Huang <jammy_huang@aspeedtech.com>
+
+commit 12c35c5582acb0fd8f7713ffa75f450766022ff1 upstream.
+
+Suspend will disable pcie device. Thus, resume should do full hw
+initialization again.
+Add some APIs to ast_drm_thaw() before ast_post_gpu() to fix the issue.
+
+v2:
+- fix function-call arguments
+
+Fixes: 5b71707dd13c ("drm/ast: Enable and unlock device access early during init")
+Reported-by: Cary Garrett <cogarre@gmail.com>
+Closes: https://lore.kernel.org/dri-devel/8ce1e1cc351153a890b65e62fed93b54ccd43f6a.camel@gmail.com/
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Jocelyn Falempe <jfalempe@redhat.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.6+
+Signed-off-by: Jammy Huang <jammy_huang@aspeedtech.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240718030352.654155-1-jammy_huang@aspeedtech.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/ast/ast_drv.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_dev
+
+ static int ast_drm_thaw(struct drm_device *dev)
+ {
++ struct ast_device *ast = to_ast_device(dev);
++
++ ast_enable_vga(ast->ioregs);
++ ast_open_key(ast->ioregs);
++ ast_enable_mmio(dev->dev, ast->ioregs);
+ ast_post_gpu(dev);
+
+ return drm_mode_config_helper_resume(dev);
--- /dev/null
+From 5b511572660190db1dc8ba412efd0be0d3781ab6 Mon Sep 17 00:00:00 2001
+From: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+Date: Mon, 29 Jul 2024 10:40:35 -0700
+Subject: drm/i915: Fix possible int overflow in skl_ddi_calculate_wrpll()
+
+From: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+
+commit 5b511572660190db1dc8ba412efd0be0d3781ab6 upstream.
+
+On the off chance that clock value ends up being too high (by means
+of skl_ddi_calculate_wrpll() having been called with big enough
+value of crtc_state->port_clock * 1000), one possible consequence
+may be that the result will not be able to fit into signed int.
+
+Fix this issue by moving conversion of clock parameter from kHz to Hz
+into the body of skl_ddi_calculate_wrpll(), as well as casting the
+same parameter to u64 type while calculating the value for AFE clock.
+This both mitigates the overflow problem and avoids possible erroneous
+integer promotion mishaps.
+
+Found by Linux Verification Center (linuxtesting.org) with static
+analysis tool SVACE.
+
+Fixes: 82d354370189 ("drm/i915/skl: Implementation of SKL DPLL programming")
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+Reviewed-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240729174035.25727-1-n.zhandarovich@fintech.ru
+(cherry picked from commit 833cf12846aa19adf9b76bc79c40747726f3c0c1)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+@@ -1658,7 +1658,7 @@ static void skl_wrpll_params_populate(st
+ }
+
+ static int
+-skl_ddi_calculate_wrpll(int clock /* in Hz */,
++skl_ddi_calculate_wrpll(int clock,
+ int ref_clock,
+ struct skl_wrpll_params *wrpll_params)
+ {
+@@ -1683,7 +1683,7 @@ skl_ddi_calculate_wrpll(int clock /* in
+ };
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+- u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
++ u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+@@ -1808,7 +1808,7 @@ static int skl_ddi_hdmi_pll_dividers(str
+ struct skl_wrpll_params wrpll_params = {};
+ int ret;
+
+- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
++ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
--- /dev/null
+From 32df4abc44f24dbec239d43e2b26d5768c5d1a78 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 11 Jul 2024 14:53:32 +0100
+Subject: drm/v3d: Fix potential memory leak in the performance extension
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 32df4abc44f24dbec239d43e2b26d5768c5d1a78 upstream.
+
+If fetching of userspace memory fails during the main loop, all drm sync
+objs looked up until that point will be leaked because of the missing
+drm_syncobj_put.
+
+Fix it by exporting and using a common cleanup helper.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: bae7cb5d6800 ("drm/v3d: Create a CPU job extension for the reset performance query job")
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: Iago Toral Quiroga <itoral@igalia.com>
+Cc: stable@vger.kernel.org # v6.8+
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240711135340.84617-4-tursulin@igalia.com
+(cherry picked from commit 484de39fa5f5b7bd0c5f2e2c5265167250ef7501)
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_drv.h | 2 ++
+ drivers/gpu/drm/v3d/v3d_sched.c | 22 ++++++++++----
+ drivers/gpu/drm/v3d/v3d_submit.c | 52 ++++++++++++++++++++------------
+ 3 files changed, 50 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index c46eed35d26b..1d535abedc57 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -558,6 +558,8 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo);
+ /* v3d_sched.c */
+ void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ unsigned int count);
++void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
++ unsigned int count);
+ void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
+ int v3d_sched_init(struct v3d_dev *v3d);
+ void v3d_sched_fini(struct v3d_dev *v3d);
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 3da4fa49552b..30d5366d6288 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -87,20 +87,30 @@ v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ }
+ }
+
++void
++v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
++ unsigned int count)
++{
++ if (query_info->queries) {
++ unsigned int i;
++
++ for (i = 0; i < count; i++)
++ drm_syncobj_put(query_info->queries[i].syncobj);
++
++ kvfree(query_info->queries);
++ }
++}
++
+ static void
+ v3d_cpu_job_free(struct drm_sched_job *sched_job)
+ {
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+- struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+ v3d_timestamp_query_info_free(&job->timestamp_query,
+ job->timestamp_query.count);
+
+- if (performance_query->queries) {
+- for (int i = 0; i < performance_query->count; i++)
+- drm_syncobj_put(performance_query->queries[i].syncobj);
+- kvfree(performance_query->queries);
+- }
++ v3d_performance_query_info_free(&job->performance_query,
++ job->performance_query.count);
+
+ v3d_job_cleanup(&job->base);
+ }
+diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
+index 121bf1314b80..50be4e8a7512 100644
+--- a/drivers/gpu/drm/v3d/v3d_submit.c
++++ b/drivers/gpu/drm/v3d/v3d_submit.c
+@@ -640,6 +640,8 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_reset_performance_query reset;
++ unsigned int i, j;
++ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+@@ -668,39 +670,43 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ syncs = u64_to_user_ptr(reset.syncs);
+ kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
+
+- for (int i = 0; i < reset.count; i++) {
++ for (i = 0; i < reset.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+-
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+- for (int j = 0; j < reset.nperfmons; j++) {
++ for (j = 0; j < reset.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
++
++ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->performance_query.count = reset.count;
+ job->performance_query.nperfmons = reset.nperfmons;
+
+ return 0;
++
++error:
++ v3d_performance_query_info_free(&job->performance_query, i);
++ return err;
+ }
+
+ static int
+@@ -711,6 +717,8 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_copy_performance_query copy;
++ unsigned int i, j;
++ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+@@ -742,34 +750,34 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ syncs = u64_to_user_ptr(copy.syncs);
+ kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+
+- for (int i = 0; i < copy.count; i++) {
++ for (i = 0; i < copy.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+-
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+- for (int j = 0; j < copy.nperfmons; j++) {
++ for (j = 0; j < copy.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+- kvfree(job->performance_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
++
++ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->performance_query.count = copy.count;
+ job->performance_query.nperfmons = copy.nperfmons;
+@@ -782,6 +790,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ job->copy.stride = copy.stride;
+
+ return 0;
++
++error:
++ v3d_performance_query_info_free(&job->performance_query, i);
++ return err;
+ }
+
+ /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+--
+2.46.0
+
--- /dev/null
+From 0e50fcc20bd87584840266e8004f9064a8985b4f Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 11 Jul 2024 14:53:31 +0100
+Subject: drm/v3d: Fix potential memory leak in the timestamp extension
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 0e50fcc20bd87584840266e8004f9064a8985b4f upstream.
+
+If fetching of userspace memory fails during the main loop, all drm sync
+objs looked up until that point will be leaked because of the missing
+drm_syncobj_put.
+
+Fix it by exporting and using a common cleanup helper.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 9ba0ff3e083f ("drm/v3d: Create a CPU job extension for the timestamp query job")
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: Iago Toral Quiroga <itoral@igalia.com>
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240711135340.84617-3-tursulin@igalia.com
+(cherry picked from commit 753ce4fea62182c77e1691ab4f9022008f25b62e)
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_drv.h | 2 ++
+ drivers/gpu/drm/v3d/v3d_sched.c | 22 +++++++++++-----
+ drivers/gpu/drm/v3d/v3d_submit.c | 43 ++++++++++++++++++++++----------
+ 3 files changed, 48 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
+index a2c516fe6d79..c46eed35d26b 100644
+--- a/drivers/gpu/drm/v3d/v3d_drv.h
++++ b/drivers/gpu/drm/v3d/v3d_drv.h
+@@ -556,6 +556,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
+ void v3d_mmu_remove_ptes(struct v3d_bo *bo);
+
+ /* v3d_sched.c */
++void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
++ unsigned int count);
+ void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
+ int v3d_sched_init(struct v3d_dev *v3d);
+ void v3d_sched_fini(struct v3d_dev *v3d);
+diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
+index 7cd8c335cd9b..3da4fa49552b 100644
+--- a/drivers/gpu/drm/v3d/v3d_sched.c
++++ b/drivers/gpu/drm/v3d/v3d_sched.c
+@@ -73,18 +73,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
+ v3d_job_cleanup(job);
+ }
+
++void
++v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
++ unsigned int count)
++{
++ if (query_info->queries) {
++ unsigned int i;
++
++ for (i = 0; i < count; i++)
++ drm_syncobj_put(query_info->queries[i].syncobj);
++
++ kvfree(query_info->queries);
++ }
++}
++
+ static void
+ v3d_cpu_job_free(struct drm_sched_job *sched_job)
+ {
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+- struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+- if (timestamp_query->queries) {
+- for (int i = 0; i < timestamp_query->count; i++)
+- drm_syncobj_put(timestamp_query->queries[i].syncobj);
+- kvfree(timestamp_query->queries);
+- }
++ v3d_timestamp_query_info_free(&job->timestamp_query,
++ job->timestamp_query.count);
+
+ if (performance_query->queries) {
+ for (int i = 0; i < performance_query->count; i++)
+diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
+index 263fefc1d04f..121bf1314b80 100644
+--- a/drivers/gpu/drm/v3d/v3d_submit.c
++++ b/drivers/gpu/drm/v3d/v3d_submit.c
+@@ -452,6 +452,8 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ {
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_timestamp_query timestamp;
++ unsigned int i;
++ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+@@ -480,19 +482,19 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ offsets = u64_to_user_ptr(timestamp.offsets);
+ syncs = u64_to_user_ptr(timestamp.syncs);
+
+- for (int i = 0; i < timestamp.count; i++) {
++ for (i = 0; i < timestamp.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+- kvfree(job->timestamp_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+- kvfree(job->timestamp_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+@@ -500,6 +502,10 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ job->timestamp_query.count = timestamp.count;
+
+ return 0;
++
++error:
++ v3d_timestamp_query_info_free(&job->timestamp_query, i);
++ return err;
+ }
+
+ static int
+@@ -509,6 +515,8 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ {
+ u32 __user *syncs;
+ struct drm_v3d_reset_timestamp_query reset;
++ unsigned int i;
++ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+@@ -533,14 +541,14 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+
+ syncs = u64_to_user_ptr(reset.syncs);
+
+- for (int i = 0; i < reset.count; i++) {
++ for (i = 0; i < reset.count; i++) {
+ u32 sync;
+
+ job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+- kvfree(job->timestamp_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+@@ -548,6 +556,10 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ job->timestamp_query.count = reset.count;
+
+ return 0;
++
++error:
++ v3d_timestamp_query_info_free(&job->timestamp_query, i);
++ return err;
+ }
+
+ /* Get data for the copy timestamp query results job submission. */
+@@ -558,7 +570,8 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ {
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_copy_timestamp_query copy;
+- int i;
++ unsigned int i;
++ int err;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+@@ -591,15 +604,15 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+- kvfree(job->timestamp_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+- kvfree(job->timestamp_query.queries);
+- return -EFAULT;
++ err = -EFAULT;
++ goto error;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+@@ -613,6 +626,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ job->copy.stride = copy.stride;
+
+ return 0;
++
++error:
++ v3d_timestamp_query_info_free(&job->timestamp_query, i);
++ return err;
+ }
+
+ static int
+--
+2.46.0
+
--- /dev/null
+From 6ce9efd12ae81cf46bf44eb0348594558dfbb9d2 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 11 Jul 2024 14:53:30 +0100
+Subject: drm/v3d: Prevent out of bounds access in performance query extensions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 6ce9efd12ae81cf46bf44eb0348594558dfbb9d2 upstream.
+
+Check that the number of perfmons userspace is passing in the copy and
+reset extensions is not greater than the internal kernel storage where
+the ids will be copied into.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: bae7cb5d6800 ("drm/v3d: Create a CPU job extension for the reset performance query job")
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: Iago Toral Quiroga <itoral@igalia.com>
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
+Reviewed-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240711135340.84617-2-tursulin@igalia.com
+(cherry picked from commit f32b5128d2c440368b5bf3a7a356823e235caabb)
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_submit.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/gpu/drm/v3d/v3d_submit.c
++++ b/drivers/gpu/drm/v3d/v3d_submit.c
+@@ -637,6 +637,9 @@ v3d_get_cpu_reset_performance_params(str
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
++ if (reset.nperfmons > V3D_MAX_PERFMONS)
++ return -EINVAL;
++
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(reset.count,
+@@ -708,6 +711,9 @@ v3d_get_cpu_copy_performance_query_param
+ if (copy.pad)
+ return -EINVAL;
+
++ if (copy.nperfmons > V3D_MAX_PERFMONS)
++ return -EINVAL;
++
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(copy.count,
--- /dev/null
+From 4ecc24a84d7e0254efd150ec23e0b89638386516 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 11 Jul 2024 14:53:34 +0100
+Subject: drm/v3d: Validate passed in drm syncobj handles in the performance extension
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 4ecc24a84d7e0254efd150ec23e0b89638386516 upstream.
+
+If userspace provides an unknown or invalid handle anywhere in the handle
+array the rest of the driver will not handle that well.
+
+Fix it by checking handle was looked up successfully or otherwise fail the
+extension by jumping into the existing unwind.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: bae7cb5d6800 ("drm/v3d: Create a CPU job extension for the reset performance query job")
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: Iago Toral Quiroga <itoral@igalia.com>
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240711135340.84617-6-tursulin@igalia.com
+(cherry picked from commit a546b7e4d73c23838d7e4d2c92882b3ca902d213)
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_submit.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
+index 9a3e32075ebe..4cdfabbf4964 100644
+--- a/drivers/gpu/drm/v3d/v3d_submit.c
++++ b/drivers/gpu/drm/v3d/v3d_submit.c
+@@ -710,6 +710,10 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
++ if (!job->performance_query.queries[i].syncobj) {
++ err = -ENOENT;
++ goto error;
++ }
+ }
+ job->performance_query.count = reset.count;
+ job->performance_query.nperfmons = reset.nperfmons;
+@@ -790,6 +794,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
++ if (!job->performance_query.queries[i].syncobj) {
++ err = -ENOENT;
++ goto error;
++ }
+ }
+ job->performance_query.count = copy.count;
+ job->performance_query.nperfmons = copy.nperfmons;
+--
+2.46.0
+
--- /dev/null
+From 023d22e8bb0cdd6900382ad1ed06df3b6c2ea791 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 11 Jul 2024 14:53:33 +0100
+Subject: drm/v3d: Validate passed in drm syncobj handles in the timestamp extension
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 023d22e8bb0cdd6900382ad1ed06df3b6c2ea791 upstream.
+
+If userspace provides an unknown or invalid handle anywhere in the handle
+array the rest of the driver will not handle that well.
+
+Fix it by checking handle was looked up successfully or otherwise fail the
+extension by jumping into the existing unwind.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 9ba0ff3e083f ("drm/v3d: Create a CPU job extension for the timestamp query job")
+Cc: Maíra Canal <mcanal@igalia.com>
+Cc: Iago Toral Quiroga <itoral@igalia.com>
+Cc: stable@vger.kernel.org # v6.8+
+Reviewed-by: Maíra Canal <mcanal@igalia.com>
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240711135340.84617-5-tursulin@igalia.com
+(cherry picked from commit 8d1276d1b8f738c3afe1457d4dff5cc66fc848a3)
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/v3d/v3d_submit.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
+index 50be4e8a7512..9a3e32075ebe 100644
+--- a/drivers/gpu/drm/v3d/v3d_submit.c
++++ b/drivers/gpu/drm/v3d/v3d_submit.c
+@@ -498,6 +498,10 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
++ if (!job->timestamp_query.queries[i].syncobj) {
++ err = -ENOENT;
++ goto error;
++ }
+ }
+ job->timestamp_query.count = timestamp.count;
+
+@@ -552,6 +556,10 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
++ if (!job->timestamp_query.queries[i].syncobj) {
++ err = -ENOENT;
++ goto error;
++ }
+ }
+ job->timestamp_query.count = reset.count;
+
+@@ -616,6 +624,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
++ if (!job->timestamp_query.queries[i].syncobj) {
++ err = -ENOENT;
++ goto error;
++ }
+ }
+ job->timestamp_query.count = copy.count;
+
+--
+2.46.0
+
--- /dev/null
+From 445d336cd15860f1efb441e6d694f829fbf679eb Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Sun, 14 Jul 2024 23:50:09 +0300
+Subject: drm/virtio: Fix type of dma-fence context variable
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit 445d336cd15860f1efb441e6d694f829fbf679eb upstream.
+
+Type of DMA fence context is u64. Fence-waiting code uses u32 for the
+context variable, fix it.
+
+Fixes: e4812ab8e6b1 ("drm/virtio: Refactor and optimize job submission code path")
+Cc: <stable@vger.kernel.org> # v6.4+
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Reviewed-by: Rob Clark <robdclark@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240714205009.3408298-1-dmitry.osipenko@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/virtio/virtgpu_submit.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
++++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
+@@ -48,7 +48,7 @@ struct virtio_gpu_submit {
+ static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
+ struct dma_fence *in_fence)
+ {
+- u32 context = submit->fence_ctx + submit->ring_idx;
++ u64 context = submit->fence_ctx + submit->ring_idx;
+
+ if (dma_fence_match_context(in_fence, context))
+ return 0;
--- /dev/null
+From e58337100721f3cc0c7424a18730e4f39844934f Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Mon, 22 Jul 2024 14:41:13 -0400
+Subject: drm/vmwgfx: Fix a deadlock in dma buf fence polling
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit e58337100721f3cc0c7424a18730e4f39844934f upstream.
+
+Introduce a version of the fence ops that on release doesn't remove
+the fence from the pending list, and thus doesn't require a lock to
+fix poll->fence wait->fence unref deadlocks.
+
+vmwgfx overwrites the wait callback to iterate over the list of all
+fences and update their status, to do that it holds a lock to prevent
+the list modifcations from other threads. The fence destroy callback
+both deletes the fence and removes it from the list of pending
+fences, for which it holds a lock.
+
+dma buf polling cb unrefs a fence after it's been signaled: so the poll
+calls the wait, which signals the fences, which are being destroyed.
+The destruction tries to acquire the lock on the pending fences list
+which it can never get because it's held by the wait from which it
+was called.
+
+Old bug, but not a lot of userspace apps were using dma-buf polling
+interfaces. Fix those, in particular this fixes KDE stalls/deadlock.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: 2298e804e96e ("drm/vmwgfx: rework to new fence interface, v2")
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.2+
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240722184313.181318-2-zack.rusin@broadcom.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -32,7 +32,6 @@
+ #define VMW_FENCE_WRAP (1 << 31)
+
+ struct vmw_fence_manager {
+- int num_fence_objects;
+ struct vmw_private *dev_priv;
+ spinlock_t lock;
+ struct list_head fence_list;
+@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct
+ {
+ struct vmw_fence_obj *fence =
+ container_of(f, struct vmw_fence_obj, base);
+-
+ struct vmw_fence_manager *fman = fman_from_fence(fence);
+
+- spin_lock(&fman->lock);
+- list_del_init(&fence->head);
+- --fman->num_fence_objects;
+- spin_unlock(&fman->lock);
++ if (!list_empty(&fence->head)) {
++ spin_lock(&fman->lock);
++ list_del_init(&fence->head);
++ spin_unlock(&fman->lock);
++ }
+ fence->destroy(fence);
+ }
+
+@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fe
+ .release = vmw_fence_obj_destroy,
+ };
+
+-
+ /*
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw
+ goto out_unlock;
+ }
+ list_add_tail(&fence->head, &fman->fence_list);
+- ++fman->num_fence_objects;
+
+ out_unlock:
+ spin_unlock(&fman->lock);
+@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(st
+ u32 passed_seqno)
+ {
+ u32 goal_seqno;
+- struct vmw_fence_obj *fence;
++ struct vmw_fence_obj *fence, *next_fence;
+
+ if (likely(!fman->seqno_valid))
+ return false;
+@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(st
+ return false;
+
+ fman->seqno_valid = false;
+- list_for_each_entry(fence, &fman->fence_list, head) {
++ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+ if (!list_empty(&fence->seq_passed_actions)) {
+ fman->seqno_valid = true;
+ vmw_fence_goal_write(fman->dev_priv,
--- /dev/null
+From d6667f0ddf46c671d379cd5fe66ce0a54d2a743a Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Mon, 22 Jul 2024 14:41:15 -0400
+Subject: drm/vmwgfx: Fix handling of dumb buffers
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit d6667f0ddf46c671d379cd5fe66ce0a54d2a743a upstream.
+
+Dumb buffers can be used in kms but also through prime with gallium's
+resource_from_handle. In the second case the dumb buffers can be
+rendered by the GPU where with the regular DRM kms interfaces they
+are mapped and written to by the CPU. Because the same buffer can
+be written to by the GPU and CPU vmwgfx needs to use vmw_surface (object
+which properly tracks dirty state of the guest and gpu memory)
+instead of vmw_bo (which is just guest side memory).
+
+Furthermore the dumb buffer handles are expected to be gem objects by
+a lot of userspace.
+
+Make vmwgfx accept gem handles in prime and kms but internally switch
+to vmw_surface's to properly track the dirty state of the objects between
+the GPU and CPU.
+
+Fixes new kwin and kde on wayland.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: b32233acceff ("drm/vmwgfx: Fix prime import/export")
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.9+
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240722184313.181318-4-zack.rusin@broadcom.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmw_surface_cache.h | 10
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 127 ++++---
+ drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 15
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 40 +-
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 502 +++++++++++------------------
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 17
+ drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 14
+ drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 32 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 27 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 33 +
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 145 +++-----
+ drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 280 +++++++++++++++-
+ 12 files changed, 740 insertions(+), 502 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
++++ b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
+@@ -1,6 +1,8 @@
++/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**********************************************************
+- * Copyright 2021 VMware, Inc.
+- * SPDX-License-Identifier: GPL-2.0 OR MIT
++ *
++ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+@@ -31,6 +33,10 @@
+
+ #include <drm/vmwgfx_drm.h>
+
++#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
++#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
++ ((svga3d_flags) & ((uint64_t)U32_MAX))
++
+ static inline u32 clamped_umul32(u32 a, u32 b)
+ {
+ uint64_t tmp = (uint64_t) a*b;
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+@@ -1,8 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
+- * All Rights Reserved.
++ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -28,15 +28,39 @@
+
+ #include "vmwgfx_bo.h"
+ #include "vmwgfx_drv.h"
+-
++#include "vmwgfx_resource_priv.h"
+
+ #include <drm/ttm/ttm_placement.h>
+
+ static void vmw_bo_release(struct vmw_bo *vbo)
+ {
++ struct vmw_resource *res;
++
+ WARN_ON(vbo->tbo.base.funcs &&
+ kref_read(&vbo->tbo.base.refcount) != 0);
+ vmw_bo_unmap(vbo);
++
++ xa_destroy(&vbo->detached_resources);
++ WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
++ if (vbo->is_dumb && vbo->dumb_surface) {
++ res = &vbo->dumb_surface->res;
++ WARN_ON(vbo != res->guest_memory_bo);
++ WARN_ON(!res->guest_memory_bo);
++ if (res->guest_memory_bo) {
++ /* Reserve and switch the backing mob. */
++ mutex_lock(&res->dev_priv->cmdbuf_mutex);
++ (void)vmw_resource_reserve(res, false, true);
++ vmw_resource_mob_detach(res);
++ if (res->coherent)
++ vmw_bo_dirty_release(res->guest_memory_bo);
++ res->guest_memory_bo = NULL;
++ res->guest_memory_offset = 0;
++ vmw_resource_unreserve(res, false, false, false, NULL,
++ 0);
++ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
++ }
++ vmw_surface_unreference(&vbo->dumb_surface);
++ }
+ drm_gem_object_release(&vbo->tbo.base);
+ }
+
+@@ -326,6 +350,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *
+ */
+ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
+ {
++ return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
++}
++
++void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
++{
+ struct ttm_buffer_object *bo = &vbo->tbo;
+ bool not_used;
+ void *virtual;
+@@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo
+ if (virtual)
+ return virtual;
+
+- ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
++ ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
+ if (ret)
+- DRM_ERROR("Buffer object map failed: %d.\n", ret);
++ DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
++ ret, bo->base.size, size);
+
+ return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
+ }
+@@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_privat
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->tbo.priority = 3;
+ vmw_bo->res_tree = RB_ROOT;
++ xa_init(&vmw_bo->detached_resources);
+
+ params->size = ALIGN(params->size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
+@@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buff
+ dma_fence_put(&fence->base);
+ }
+
+-
+-/**
+- * vmw_dumb_create - Create a dumb kms buffer
+- *
+- * @file_priv: Pointer to a struct drm_file identifying the caller.
+- * @dev: Pointer to the drm device.
+- * @args: Pointer to a struct drm_mode_create_dumb structure
+- * Return: Zero on success, negative error code on failure.
+- *
+- * This is a driver callback for the core drm create_dumb functionality.
+- * Note that this is very similar to the vmw_bo_alloc ioctl, except
+- * that the arguments have a different format.
+- */
+-int vmw_dumb_create(struct drm_file *file_priv,
+- struct drm_device *dev,
+- struct drm_mode_create_dumb *args)
+-{
+- struct vmw_private *dev_priv = vmw_priv(dev);
+- struct vmw_bo *vbo;
+- int cpp = DIV_ROUND_UP(args->bpp, 8);
+- int ret;
+-
+- switch (cpp) {
+- case 1: /* DRM_FORMAT_C8 */
+- case 2: /* DRM_FORMAT_RGB565 */
+- case 4: /* DRM_FORMAT_XRGB8888 */
+- break;
+- default:
+- /*
+- * Dumb buffers don't allow anything else.
+- * This is tested via IGT's dumb_buffers
+- */
+- return -EINVAL;
+- }
+-
+- args->pitch = args->width * cpp;
+- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+-
+- ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+- args->size, &args->handle,
+- &vbo);
+- /* drop reference from allocate - handle holds it now */
+- drm_gem_object_put(&vbo->tbo.base);
+- return ret;
+-}
+-
+ /**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+@@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accele
+
+ vmw_bo_placement_set(bo, domain, domain);
+ }
++
++void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
++{
++ xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
++}
++
++void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
++{
++ xa_erase(&vbo->detached_resources, (unsigned long)res);
++}
++
++struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
++{
++ unsigned long index;
++ struct vmw_resource *res = NULL;
++ struct vmw_surface *surf = NULL;
++ struct rb_node *rb_itr = vbo->res_tree.rb_node;
++
++ if (vbo->is_dumb && vbo->dumb_surface) {
++ res = &vbo->dumb_surface->res;
++ goto out;
++ }
++
++ xa_for_each(&vbo->detached_resources, index, res) {
++ if (res->func->res_type == vmw_res_surface)
++ goto out;
++ }
++
++ for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
++ rb_itr = rb_next(rb_itr)) {
++ res = rb_entry(rb_itr, struct vmw_resource, mob_node);
++ if (res->func->res_type == vmw_res_surface)
++ goto out;
++ }
++
++out:
++ if (res)
++ surf = vmw_res_to_srf(res);
++ return surf;
++}
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+@@ -1,7 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**************************************************************************
+ *
+- * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -35,11 +36,13 @@
+
+ #include <linux/rbtree_types.h>
+ #include <linux/types.h>
++#include <linux/xarray.h>
+
+ struct vmw_bo_dirty;
+ struct vmw_fence_obj;
+ struct vmw_private;
+ struct vmw_resource;
++struct vmw_surface;
+
+ enum vmw_bo_domain {
+ VMW_BO_DOMAIN_SYS = BIT(0),
+@@ -85,11 +88,15 @@ struct vmw_bo {
+
+ struct rb_root res_tree;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
++ struct xarray detached_resources;
+
+ atomic_t cpu_writers;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_bo_dirty *dirty;
++
++ bool is_dumb;
++ struct vmw_surface *dumb_surface;
+ };
+
+ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
+@@ -124,15 +131,21 @@ void vmw_bo_fence_single(struct ttm_buff
+ struct vmw_fence_obj *fence);
+
+ void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
++void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
+ void vmw_bo_unmap(struct vmw_bo *vbo);
+
+ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem);
+ void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+
++void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
++void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
++struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
++
+ int vmw_user_bo_lookup(struct drm_file *filp,
+ u32 handle,
+ struct vmw_bo **out);
++
+ /**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1,7 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -763,6 +764,26 @@ extern int vmw_gmr_bind(struct vmw_priva
+ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+ /**
++ * User handles
++ */
++struct vmw_user_object {
++ struct vmw_surface *surface;
++ struct vmw_bo *buffer;
++};
++
++int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
++ u32 handle, struct vmw_user_object *uo);
++struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
++void vmw_user_object_unref(struct vmw_user_object *uo);
++bool vmw_user_object_is_null(struct vmw_user_object *uo);
++struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
++struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
++void *vmw_user_object_map(struct vmw_user_object *uo);
++void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
++void vmw_user_object_unmap(struct vmw_user_object *uo);
++bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
++
++/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+ struct vmw_user_resource_conv;
+@@ -776,11 +797,6 @@ extern int vmw_resource_validate(struct
+ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+ bool no_backup);
+ extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
+-extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+- struct drm_file *filp,
+- uint32_t handle,
+- struct vmw_surface **out_surf,
+- struct vmw_bo **out_buf);
+ extern int vmw_user_resource_lookup_handle(
+ struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+@@ -1057,9 +1073,6 @@ int vmw_kms_suspend(struct drm_device *d
+ int vmw_kms_resume(struct drm_device *dev);
+ void vmw_kms_lost_device(struct drm_device *dev);
+
+-int vmw_dumb_create(struct drm_file *file_priv,
+- struct drm_device *dev,
+- struct drm_mode_create_dumb *args);
+ extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+ extern void vmw_resource_unpin(struct vmw_resource *res);
+ extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+@@ -1176,6 +1189,15 @@ extern int vmw_gb_surface_reference_ext_
+ int vmw_gb_surface_define(struct vmw_private *dev_priv,
+ const struct vmw_surface_metadata *req,
+ struct vmw_surface **srf_out);
++struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
++ struct vmw_bo *bo,
++ u32 handle);
++u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
++ struct vmw_bo *bo,
++ u32 handle);
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args);
+
+ /*
+ * Shader management - vmwgfx_shader.c
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -193,13 +194,16 @@ static u32 vmw_du_cursor_mob_size(u32 w,
+ */
+ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
+ {
+- if (vps->surf) {
+- if (vps->surf_mapped)
+- return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
+- return vps->surf->snooper.image;
+- } else if (vps->bo)
+- return vmw_bo_map_and_cache(vps->bo);
+- return NULL;
++ struct vmw_surface *surf;
++
++ if (vmw_user_object_is_null(&vps->uo))
++ return NULL;
++
++ surf = vmw_user_object_surface(&vps->uo);
++ if (surf && !vmw_user_object_is_mapped(&vps->uo))
++ return surf->snooper.image;
++
++ return vmw_user_object_map(&vps->uo);
+ }
+
+ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
+@@ -536,22 +540,16 @@ void vmw_du_primary_plane_destroy(struct
+ * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
+ *
+ * @vps: plane state associated with the display surface
+- * @unreference: true if we also want to unreference the display.
+ */
+-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
+- bool unreference)
++void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
+ {
+- if (vps->surf) {
++ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
++
++ if (surf) {
+ if (vps->pinned) {
+- vmw_resource_unpin(&vps->surf->res);
++ vmw_resource_unpin(&surf->res);
+ vps->pinned--;
+ }
+-
+- if (unreference) {
+- if (vps->pinned)
+- DRM_ERROR("Surface still pinned\n");
+- vmw_surface_unreference(&vps->surf);
+- }
+ }
+ }
+
+@@ -572,7 +570,7 @@ vmw_du_plane_cleanup_fb(struct drm_plane
+ {
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+- vmw_du_plane_unpin_surf(vps, false);
++ vmw_du_plane_unpin_surf(vps);
+ }
+
+
+@@ -661,25 +659,14 @@ vmw_du_cursor_plane_cleanup_fb(struct dr
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+- if (vps->surf_mapped) {
+- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+- vps->surf_mapped = false;
+- }
++ if (!vmw_user_object_is_null(&vps->uo))
++ vmw_user_object_unmap(&vps->uo);
+
+ vmw_du_cursor_plane_unmap_cm(vps);
+ vmw_du_put_cursor_mob(vcp, vps);
+
+- vmw_du_plane_unpin_surf(vps, false);
+-
+- if (vps->surf) {
+- vmw_surface_unreference(&vps->surf);
+- vps->surf = NULL;
+- }
+-
+- if (vps->bo) {
+- vmw_bo_unreference(&vps->bo);
+- vps->bo = NULL;
+- }
++ vmw_du_plane_unpin_surf(vps);
++ vmw_user_object_unref(&vps->uo);
+ }
+
+
+@@ -698,64 +685,48 @@ vmw_du_cursor_plane_prepare_fb(struct dr
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
++ struct vmw_bo *bo = NULL;
+ int ret = 0;
+
+- if (vps->surf) {
+- if (vps->surf_mapped) {
+- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+- vps->surf_mapped = false;
+- }
+- vmw_surface_unreference(&vps->surf);
+- vps->surf = NULL;
+- }
+-
+- if (vps->bo) {
+- vmw_bo_unreference(&vps->bo);
+- vps->bo = NULL;
++ if (!vmw_user_object_is_null(&vps->uo)) {
++ vmw_user_object_unmap(&vps->uo);
++ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+- vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+- vmw_bo_reference(vps->bo);
++ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
++ vps->uo.surface = NULL;
+ } else {
+- vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
+- vmw_surface_reference(vps->surf);
++ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
++ vmw_user_object_ref(&vps->uo);
+ }
+
+- if (!vps->surf && vps->bo) {
+- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
++ bo = vmw_user_object_buffer(&vps->uo);
++ if (bo) {
++ struct ttm_operation_ctx ctx = {false, false};
+
+- /*
+- * Not using vmw_bo_map_and_cache() helper here as we need to
+- * reserve the ttm_buffer_object first which
+- * vmw_bo_map_and_cache() omits.
+- */
+- ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
+-
+- if (unlikely(ret != 0))
++ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
++ if (ret != 0)
+ return -ENOMEM;
+
+- ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
+-
+- ttm_bo_unreserve(&vps->bo->tbo);
+-
+- if (unlikely(ret != 0))
++ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
++ if (ret != 0)
+ return -ENOMEM;
+- } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
+
+- WARN_ON(vps->surf->snooper.image);
+- ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
+- NULL);
+- if (unlikely(ret != 0))
+- return -ENOMEM;
+- vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
+- ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
+- vps->surf_mapped = true;
++ vmw_bo_pin_reserved(bo, true);
++ if (vmw_framebuffer_to_vfb(fb)->bo) {
++ const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
++
++ (void)vmw_bo_map_and_cache_size(bo, size);
++ } else {
++ vmw_bo_map_and_cache(bo);
++ }
++ ttm_bo_unreserve(&bo->tbo);
+ }
+
+- if (vps->surf || vps->bo) {
++ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_du_get_cursor_mob(vcp, vps);
+ vmw_du_cursor_plane_map_cm(vps);
+ }
+@@ -777,14 +748,17 @@ vmw_du_cursor_plane_atomic_update(struct
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
++ struct vmw_bo *old_bo = NULL;
++ struct vmw_bo *new_bo = NULL;
+ s32 hotspot_x, hotspot_y;
++ int ret;
+
+ hotspot_x = du->hotspot_x + new_state->hotspot_x;
+ hotspot_y = du->hotspot_y + new_state->hotspot_y;
+
+- du->cursor_surface = vps->surf;
++ du->cursor_surface = vmw_user_object_surface(&vps->uo);
+
+- if (!vps->surf && !vps->bo) {
++ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+@@ -792,10 +766,26 @@ vmw_du_cursor_plane_atomic_update(struct
+ vps->cursor.hotspot_x = hotspot_x;
+ vps->cursor.hotspot_y = hotspot_y;
+
+- if (vps->surf) {
++ if (du->cursor_surface)
+ du->cursor_age = du->cursor_surface->snooper.age;
++
++ if (!vmw_user_object_is_null(&old_vps->uo)) {
++ old_bo = vmw_user_object_buffer(&old_vps->uo);
++ ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
++ if (ret != 0)
++ return;
+ }
+
++ if (!vmw_user_object_is_null(&vps->uo)) {
++ new_bo = vmw_user_object_buffer(&vps->uo);
++ if (old_bo != new_bo) {
++ ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
++ if (ret != 0)
++ return;
++ } else {
++ new_bo = NULL;
++ }
++ }
+ if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
+ /*
+ * If it hasn't changed, avoid making the device do extra
+@@ -813,6 +803,11 @@ vmw_du_cursor_plane_atomic_update(struct
+ hotspot_x, hotspot_y);
+ }
+
++ if (old_bo)
++ ttm_bo_unreserve(&old_bo->tbo);
++ if (new_bo)
++ ttm_bo_unreserve(&new_bo->tbo);
++
+ du->cursor_x = new_state->crtc_x + du->set_gui_x;
+ du->cursor_y = new_state->crtc_y + du->set_gui_y;
+
+@@ -913,7 +908,7 @@ int vmw_du_cursor_plane_atomic_check(str
+ }
+
+ if (!vmw_framebuffer_to_vfb(fb)->bo) {
+- surface = vmw_framebuffer_to_vfbs(fb)->surface;
++ surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
+
+ WARN_ON(!surface);
+
+@@ -1074,12 +1069,7 @@ vmw_du_plane_duplicate_state(struct drm_
+ memset(&vps->cursor, 0, sizeof(vps->cursor));
+
+ /* Each ref counted resource needs to be acquired again */
+- if (vps->surf)
+- (void) vmw_surface_reference(vps->surf);
+-
+- if (vps->bo)
+- (void) vmw_bo_reference(vps->bo);
+-
++ vmw_user_object_ref(&vps->uo);
+ state = &vps->base;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
+@@ -1128,11 +1118,7 @@ vmw_du_plane_destroy_state(struct drm_pl
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
+
+ /* Should have been freed by cleanup_fb */
+- if (vps->surf)
+- vmw_surface_unreference(&vps->surf);
+-
+- if (vps->bo)
+- vmw_bo_unreference(&vps->bo);
++ vmw_user_object_unref(&vps->uo);
+
+ drm_atomic_helper_plane_destroy_state(plane, state);
+ }
+@@ -1227,7 +1213,7 @@ static void vmw_framebuffer_surface_dest
+ vmw_framebuffer_to_vfbs(framebuffer);
+
+ drm_framebuffer_cleanup(framebuffer);
+- vmw_surface_unreference(&vfbs->surface);
++ vmw_user_object_unref(&vfbs->uo);
+
+ kfree(vfbs);
+ }
+@@ -1272,29 +1258,41 @@ int vmw_kms_readback(struct vmw_private
+ return -ENOSYS;
+ }
+
++static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
++ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
++
++ return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
++}
+
+ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
++ .create_handle = vmw_framebuffer_surface_create_handle,
+ .destroy = vmw_framebuffer_surface_destroy,
+ .dirty = drm_atomic_helper_dirtyfb,
+ };
+
+ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+- struct vmw_surface *surface,
++ struct vmw_user_object *uo,
+ struct vmw_framebuffer **out,
+ const struct drm_mode_fb_cmd2
+- *mode_cmd,
+- bool is_bo_proxy)
++ *mode_cmd)
+
+ {
+ struct drm_device *dev = &dev_priv->drm;
+ struct vmw_framebuffer_surface *vfbs;
+ enum SVGA3dSurfaceFormat format;
++ struct vmw_surface *surface;
+ int ret;
+
+ /* 3D is only supported on HWv8 and newer hosts */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return -ENOSYS;
+
++ surface = vmw_user_object_surface(uo);
++
+ /*
+ * Sanity checks.
+ */
+@@ -1357,8 +1355,8 @@ static int vmw_kms_new_framebuffer_surfa
+ }
+
+ drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
+- vfbs->surface = vmw_surface_reference(surface);
+- vfbs->is_bo_proxy = is_bo_proxy;
++ memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
++ vmw_user_object_ref(&vfbs->uo);
+
+ *out = &vfbs->base;
+
+@@ -1370,7 +1368,7 @@ static int vmw_kms_new_framebuffer_surfa
+ return 0;
+
+ out_err2:
+- vmw_surface_unreference(&surface);
++ vmw_user_object_unref(&vfbs->uo);
+ kfree(vfbs);
+ out_err1:
+ return ret;
+@@ -1386,7 +1384,6 @@ static int vmw_framebuffer_bo_create_han
+ {
+ struct vmw_framebuffer_bo *vfbd =
+ vmw_framebuffer_to_vfbd(fb);
+-
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
+ }
+
+@@ -1407,86 +1404,6 @@ static const struct drm_framebuffer_func
+ .dirty = drm_atomic_helper_dirtyfb,
+ };
+
+-/**
+- * vmw_create_bo_proxy - create a proxy surface for the buffer object
+- *
+- * @dev: DRM device
+- * @mode_cmd: parameters for the new surface
+- * @bo_mob: MOB backing the buffer object
+- * @srf_out: newly created surface
+- *
+- * When the content FB is a buffer object, we create a surface as a proxy to the
+- * same buffer. This way we can do a surface copy rather than a surface DMA.
+- * This is a more efficient approach
+- *
+- * RETURNS:
+- * 0 on success, error code otherwise
+- */
+-static int vmw_create_bo_proxy(struct drm_device *dev,
+- const struct drm_mode_fb_cmd2 *mode_cmd,
+- struct vmw_bo *bo_mob,
+- struct vmw_surface **srf_out)
+-{
+- struct vmw_surface_metadata metadata = {0};
+- uint32_t format;
+- struct vmw_resource *res;
+- unsigned int bytes_pp;
+- int ret;
+-
+- switch (mode_cmd->pixel_format) {
+- case DRM_FORMAT_ARGB8888:
+- case DRM_FORMAT_XRGB8888:
+- format = SVGA3D_X8R8G8B8;
+- bytes_pp = 4;
+- break;
+-
+- case DRM_FORMAT_RGB565:
+- case DRM_FORMAT_XRGB1555:
+- format = SVGA3D_R5G6B5;
+- bytes_pp = 2;
+- break;
+-
+- case 8:
+- format = SVGA3D_P8;
+- bytes_pp = 1;
+- break;
+-
+- default:
+- DRM_ERROR("Invalid framebuffer format %p4cc\n",
+- &mode_cmd->pixel_format);
+- return -EINVAL;
+- }
+-
+- metadata.format = format;
+- metadata.mip_levels[0] = 1;
+- metadata.num_sizes = 1;
+- metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
+- metadata.base_size.height = mode_cmd->height;
+- metadata.base_size.depth = 1;
+- metadata.scanout = true;
+-
+- ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
+- if (ret) {
+- DRM_ERROR("Failed to allocate proxy content buffer\n");
+- return ret;
+- }
+-
+- res = &(*srf_out)->res;
+-
+- /* Reserve and switch the backing mob. */
+- mutex_lock(&res->dev_priv->cmdbuf_mutex);
+- (void) vmw_resource_reserve(res, false, true);
+- vmw_user_bo_unref(&res->guest_memory_bo);
+- res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
+- res->guest_memory_offset = 0;
+- vmw_resource_unreserve(res, false, false, false, NULL, 0);
+- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+-
+- return 0;
+-}
+-
+-
+-
+ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+ struct vmw_bo *bo,
+ struct vmw_framebuffer **out,
+@@ -1565,55 +1482,24 @@ vmw_kms_srf_ok(struct vmw_private *dev_p
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+- * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+- * Either @bo or @surface must be NULL.
+- * @surface: Pointer to a surface to wrap the kms framebuffer around.
+- * Either @bo or @surface must be NULL.
+- * @only_2d: No presents will occur to this buffer object based framebuffer.
+- * This helps the code to do some important optimizations.
++ * @uo: Pointer to user object to wrap the kms framebuffer around.
++ * Either the buffer or surface inside the user object must be NULL.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+ struct vmw_framebuffer *
+ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+- struct vmw_bo *bo,
+- struct vmw_surface *surface,
+- bool only_2d,
++ struct vmw_user_object *uo,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+ struct vmw_framebuffer *vfb = NULL;
+- bool is_bo_proxy = false;
+ int ret;
+
+- /*
+- * We cannot use the SurfaceDMA command in an non-accelerated VM,
+- * therefore, wrap the buffer object in a surface so we can use the
+- * SurfaceCopy command.
+- */
+- if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
+- bo && only_2d &&
+- mode_cmd->width > 64 && /* Don't create a proxy for cursor */
+- dev_priv->active_display_unit == vmw_du_screen_target) {
+- ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
+- bo, &surface);
+- if (ret)
+- return ERR_PTR(ret);
+-
+- is_bo_proxy = true;
+- }
+-
+ /* Create the new framebuffer depending one what we have */
+- if (surface) {
+- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+- mode_cmd,
+- is_bo_proxy);
+- /*
+- * vmw_create_bo_proxy() adds a reference that is no longer
+- * needed
+- */
+- if (is_bo_proxy)
+- vmw_surface_unreference(&surface);
+- } else if (bo) {
+- ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
++ if (vmw_user_object_surface(uo)) {
++ ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
++ mode_cmd);
++ } else if (uo->buffer) {
++ ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
+ mode_cmd);
+ } else {
+ BUG();
+@@ -1635,14 +1521,12 @@ static struct drm_framebuffer *vmw_kms_f
+ {
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_framebuffer *vfb = NULL;
+- struct vmw_surface *surface = NULL;
+- struct vmw_bo *bo = NULL;
++ struct vmw_user_object uo = {0};
+ int ret;
+
+ /* returns either a bo or surface */
+- ret = vmw_user_lookup_handle(dev_priv, file_priv,
+- mode_cmd->handles[0],
+- &surface, &bo);
++ ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
++ &uo);
+ if (ret) {
+ DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
+ mode_cmd->handles[0], mode_cmd->handles[0]);
+@@ -1650,7 +1534,7 @@ static struct drm_framebuffer *vmw_kms_f
+ }
+
+
+- if (!bo &&
++ if (vmw_user_object_surface(&uo) &&
+ !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
+ DRM_ERROR("Surface size cannot exceed %dx%d\n",
+ dev_priv->texture_max_width,
+@@ -1659,20 +1543,15 @@ static struct drm_framebuffer *vmw_kms_f
+ }
+
+
+- vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+- !(dev_priv->capabilities & SVGA_CAP_3D),
+- mode_cmd);
++ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
+ if (IS_ERR(vfb)) {
+ ret = PTR_ERR(vfb);
+ goto err_out;
+ }
+
+ err_out:
+- /* vmw_user_lookup_handle takes one ref so does new_fb */
+- if (bo)
+- vmw_user_bo_unref(&bo);
+- if (surface)
+- vmw_surface_unreference(&surface);
++ /* vmw_user_object_lookup takes one ref so does new_fb */
++ vmw_user_object_unref(&uo);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+@@ -2585,72 +2464,6 @@ void vmw_kms_helper_validation_finish(st
+ }
+
+ /**
+- * vmw_kms_update_proxy - Helper function to update a proxy surface from
+- * its backing MOB.
+- *
+- * @res: Pointer to the surface resource
+- * @clips: Clip rects in framebuffer (surface) space.
+- * @num_clips: Number of clips in @clips.
+- * @increment: Integer with which to increment the clip counter when looping.
+- * Used to skip a predetermined number of clip rects.
+- *
+- * This function makes sure the proxy surface is updated from its backing MOB
+- * using the region given by @clips. The surface resource @res and its backing
+- * MOB needs to be reserved and validated on call.
+- */
+-int vmw_kms_update_proxy(struct vmw_resource *res,
+- const struct drm_clip_rect *clips,
+- unsigned num_clips,
+- int increment)
+-{
+- struct vmw_private *dev_priv = res->dev_priv;
+- struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
+- struct {
+- SVGA3dCmdHeader header;
+- SVGA3dCmdUpdateGBImage body;
+- } *cmd;
+- SVGA3dBox *box;
+- size_t copy_size = 0;
+- int i;
+-
+- if (!clips)
+- return 0;
+-
+- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+- if (!cmd)
+- return -ENOMEM;
+-
+- for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+- box = &cmd->body.box;
+-
+- cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+- cmd->header.size = sizeof(cmd->body);
+- cmd->body.image.sid = res->id;
+- cmd->body.image.face = 0;
+- cmd->body.image.mipmap = 0;
+-
+- if (clips->x1 > size->width || clips->x2 > size->width ||
+- clips->y1 > size->height || clips->y2 > size->height) {
+- DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+- return -EINVAL;
+- }
+-
+- box->x = clips->x1;
+- box->y = clips->y1;
+- box->z = 0;
+- box->w = clips->x2 - clips->x1;
+- box->h = clips->y2 - clips->y1;
+- box->d = 1;
+-
+- copy_size += sizeof(*cmd);
+- }
+-
+- vmw_cmd_commit(dev_priv, copy_size);
+-
+- return 0;
+-}
+-
+-/**
+ * vmw_kms_create_implicit_placement_property - Set up the implicit placement
+ * property.
+ *
+@@ -2784,8 +2597,9 @@ int vmw_du_helper_plane_update(struct vm
+ } else {
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(update->vfb, typeof(*vfbs), base);
++ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+
+- ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
++ ret = vmw_validation_add_resource(&val_ctx, &surf->res,
+ 0, VMW_RES_DIRTY_NONE, NULL,
+ NULL);
+ }
+@@ -2941,3 +2755,93 @@ int vmw_connector_get_modes(struct drm_c
+
+ return num_modes;
+ }
++
++struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
++{
++ if (uo->buffer)
++ vmw_user_bo_ref(uo->buffer);
++ else if (uo->surface)
++ vmw_surface_reference(uo->surface);
++ return uo;
++}
++
++void vmw_user_object_unref(struct vmw_user_object *uo)
++{
++ if (uo->buffer)
++ vmw_user_bo_unref(&uo->buffer);
++ else if (uo->surface)
++ vmw_surface_unreference(&uo->surface);
++}
++
++struct vmw_bo *
++vmw_user_object_buffer(struct vmw_user_object *uo)
++{
++ if (uo->buffer)
++ return uo->buffer;
++ else if (uo->surface)
++ return uo->surface->res.guest_memory_bo;
++ return NULL;
++}
++
++struct vmw_surface *
++vmw_user_object_surface(struct vmw_user_object *uo)
++{
++ if (uo->buffer)
++ return uo->buffer->dumb_surface;
++ return uo->surface;
++}
++
++void *vmw_user_object_map(struct vmw_user_object *uo)
++{
++ struct vmw_bo *bo = vmw_user_object_buffer(uo);
++
++ WARN_ON(!bo);
++ return vmw_bo_map_and_cache(bo);
++}
++
++void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
++{
++ struct vmw_bo *bo = vmw_user_object_buffer(uo);
++
++ WARN_ON(!bo);
++ return vmw_bo_map_and_cache_size(bo, size);
++}
++
++void vmw_user_object_unmap(struct vmw_user_object *uo)
++{
++ struct vmw_bo *bo = vmw_user_object_buffer(uo);
++ int ret;
++
++ WARN_ON(!bo);
++
++ /* Fence the mob creation so we are guarateed to have the mob */
++ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
++ if (ret != 0)
++ return;
++
++ vmw_bo_unmap(bo);
++ vmw_bo_pin_reserved(bo, false);
++
++ ttm_bo_unreserve(&bo->tbo);
++}
++
++bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
++{
++ struct vmw_bo *bo;
++
++ if (!uo || vmw_user_object_is_null(uo))
++ return false;
++
++ bo = vmw_user_object_buffer(uo);
++
++ if (WARN_ON(!bo))
++ return false;
++
++ WARN_ON(bo->map.bo && !bo->map.virtual);
++ return bo->map.virtual;
++}
++
++bool vmw_user_object_is_null(struct vmw_user_object *uo)
++{
++ return !uo->buffer && !uo->surface;
++}
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -1,7 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR MIT */
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -221,11 +222,9 @@ struct vmw_framebuffer {
+
+ struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+- struct vmw_surface *surface;
+- bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
++ struct vmw_user_object uo;
+ };
+
+-
+ struct vmw_framebuffer_bo {
+ struct vmw_framebuffer base;
+ struct vmw_bo *buffer;
+@@ -277,8 +276,7 @@ struct vmw_cursor_plane_state {
+ */
+ struct vmw_plane_state {
+ struct drm_plane_state base;
+- struct vmw_surface *surf;
+- struct vmw_bo *bo;
++ struct vmw_user_object uo;
+
+ int content_fb_type;
+ unsigned long bo_size;
+@@ -457,9 +455,7 @@ int vmw_kms_readback(struct vmw_private
+ uint32_t num_clips);
+ struct vmw_framebuffer *
+ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+- struct vmw_bo *bo,
+- struct vmw_surface *surface,
+- bool only_2d,
++ struct vmw_user_object *uo,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+ void vmw_guess_mode_timing(struct drm_display_mode *mode);
+ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+@@ -486,8 +482,7 @@ void vmw_du_plane_reset(struct drm_plane
+ struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
+ void vmw_du_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
+- bool unreference);
++void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps);
+
+ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -147,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_fra
+ struct vmw_bo *buf;
+ int ret;
+
+- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
++ buf = vfb->bo ?
++ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
++ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
+
+ if (!buf)
+ return 0;
+@@ -169,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_f
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_bo *buf;
+
+- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
++ buf = vfb->bo ?
++ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
++ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
++
+
+ if (WARN_ON(!buf))
+ return 0;
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -31,6 +32,7 @@
+ */
+
+ #include "vmwgfx_drv.h"
++#include "vmwgfx_bo.h"
+ #include "ttm_object.h"
+ #include <linux/dma-buf.h>
+
+@@ -88,13 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_de
+ uint32_t handle, uint32_t flags,
+ int *prime_fd)
+ {
++ struct vmw_private *vmw = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ struct vmw_bo *vbo;
+ int ret;
++ int surf_handle;
+
+- if (handle > VMWGFX_NUM_MOB)
++ if (handle > VMWGFX_NUM_MOB) {
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+- else
+- ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
++ } else {
++ ret = vmw_user_bo_lookup(file_priv, handle, &vbo);
++ if (ret)
++ return ret;
++ if (vbo && vbo->is_dumb) {
++ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle,
++ flags, prime_fd);
++ } else {
++ surf_handle = vmw_lookup_surface_handle_for_buffer(vmw,
++ vbo,
++ handle);
++ if (surf_handle > 0)
++ ret = ttm_prime_handle_to_fd(tfile, surf_handle,
++ flags, prime_fd);
++ else
++ ret = drm_gem_prime_handle_to_fd(dev, file_priv,
++ handle, flags,
++ prime_fd);
++ }
++ vmw_user_bo_unref(&vbo);
++ }
+
+ return ret;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_
+
+ rb_link_node(&res->mob_node, parent, new);
+ rb_insert_color(&res->mob_node, &gbo->res_tree);
++ vmw_bo_del_detached_resource(gbo, res);
+
+ vmw_bo_prio_add(gbo, res->used_prio);
+ }
+@@ -287,28 +289,35 @@ out_bad_resource:
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
++int vmw_user_object_lookup(struct vmw_private *dev_priv,
+ struct drm_file *filp,
+- uint32_t handle,
+- struct vmw_surface **out_surf,
+- struct vmw_bo **out_buf)
++ u32 handle,
++ struct vmw_user_object *uo)
+ {
+ struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
+ struct vmw_resource *res;
+ int ret;
+
+- BUG_ON(*out_surf || *out_buf);
++ WARN_ON(uo->surface || uo->buffer);
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+- *out_surf = vmw_res_to_srf(res);
++ uo->surface = vmw_res_to_srf(res);
+ return 0;
+ }
+
+- *out_surf = NULL;
+- ret = vmw_user_bo_lookup(filp, handle, out_buf);
++ uo->surface = NULL;
++ ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
++ if (!ret && !uo->buffer->is_dumb) {
++ uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
++ uo->buffer,
++ handle);
++ if (uo->surface)
++ vmw_user_bo_unref(&uo->buffer);
++ }
++
+ return ret;
+ }
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -240,7 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(s
+ struct vmw_connector_state *vmw_conn_state;
+ int x, y;
+
+- sou->buffer = vps->bo;
++ sou->buffer = vmw_user_object_buffer(&vps->uo);
+
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+@@ -376,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+ struct drm_crtc *crtc = plane->state->crtc ?
+ plane->state->crtc : old_state->crtc;
++ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+
+- if (vps->bo)
+- vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+- vmw_bo_unreference(&vps->bo);
++ if (bo)
++ vmw_bo_unpin(vmw_priv(crtc->dev), bo, false);
++ vmw_user_object_unref(&vps->uo);
+ vps->bo_size = 0;
+
+ vmw_du_plane_cleanup_fb(plane, old_state);
+@@ -411,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct
+ .bo_type = ttm_bo_type_device,
+ .pin = true
+ };
++ struct vmw_bo *bo = NULL;
+
+ if (!new_fb) {
+- vmw_bo_unreference(&vps->bo);
++ vmw_user_object_unref(&vps->uo);
+ vps->bo_size = 0;
+
+ return 0;
+@@ -422,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct
+ bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
+ dev_priv = vmw_priv(crtc->dev);
+
+- if (vps->bo) {
++ bo = vmw_user_object_buffer(&vps->uo);
++ if (bo) {
+ if (vps->bo_size == bo_params.size) {
+ /*
+ * Note that this might temporarily up the pin-count
+ * to 2, until cleanup_fb() is called.
+ */
+- return vmw_bo_pin_in_vram(dev_priv, vps->bo,
+- true);
++ return vmw_bo_pin_in_vram(dev_priv, bo, true);
+ }
+
+- vmw_bo_unreference(&vps->bo);
++ vmw_user_object_unref(&vps->uo);
+ vps->bo_size = 0;
+ }
+
+@@ -442,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct
+ * resume the overlays, this is preferred to failing to alloc.
+ */
+ vmw_overlay_pause_all(dev_priv);
+- ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
++ ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ vmw_overlay_resume_all(dev_priv);
+ if (ret)
+ return ret;
+@@ -453,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct
+ * TTM already thinks the buffer is pinned, but make sure the
+ * pin_count is upped.
+ */
+- return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
++ return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true);
+ }
+
+ static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+@@ -580,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip
+ {
+ struct vmw_kms_sou_dirty_cmd *blit = cmd;
+ struct vmw_framebuffer_surface *vfbs;
++ struct vmw_surface *surf = NULL;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+@@ -587,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip
+ blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+ num_hits;
+
+- blit->body.srcImage.sid = vfbs->surface->res.id;
++ surf = vmw_user_object_surface(&vfbs->uo);
++ blit->body.srcImage.sid = surf->res.id;
+ blit->body.destScreenId = update->du->unit;
+
+ /* Update the source and destination bounding box later in post_clip */
+@@ -1104,7 +1109,7 @@ int vmw_kms_sou_do_surface_dirty(struct
+ int ret;
+
+ if (!srf)
+- srf = &vfbs->surface->res;
++ srf = &vmw_user_object_surface(&vfbs->uo)->res;
+
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /******************************************************************************
+ *
+- * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2014-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -29,6 +30,7 @@
+ #include "vmwgfx_kms.h"
+ #include "vmwgfx_vkms.h"
+ #include "vmw_surface_cache.h"
++#include <linux/fsnotify.h>
+
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -735,7 +737,7 @@ int vmw_kms_stdu_surface_dirty(struct vm
+ int ret;
+
+ if (!srf)
+- srf = &vfbs->surface->res;
++ srf = &vmw_user_object_surface(&vfbs->uo)->res;
+
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
+@@ -746,12 +748,6 @@ int vmw_kms_stdu_surface_dirty(struct vm
+ if (ret)
+ goto out_unref;
+
+- if (vfbs->is_bo_proxy) {
+- ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+- if (ret)
+- goto out_finish;
+- }
+-
+ sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+ sdirty.base.clip = vmw_kms_stdu_surface_clip;
+ sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+@@ -765,7 +761,7 @@ int vmw_kms_stdu_surface_dirty(struct vm
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+ dest_x, dest_y, num_clips, inc,
+ &sdirty.base);
+-out_finish:
++
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
+
+@@ -945,9 +941,8 @@ vmw_stdu_primary_plane_cleanup_fb(struct
+ {
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+- if (vps->surf)
++ if (vmw_user_object_surface(&vps->uo))
+ WARN_ON(!vps->pinned);
+-
+ vmw_du_plane_cleanup_fb(plane, old_state);
+
+ vps->content_fb_type = SAME_AS_DISPLAY;
+@@ -955,7 +950,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct
+ }
+
+
+-
+ /**
+ * vmw_stdu_primary_plane_prepare_fb - Readies the display surface
+ *
+@@ -979,13 +973,15 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ enum stdu_content_type new_content_type;
+ struct vmw_framebuffer_surface *new_vfbs;
+ uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
++ struct drm_plane_state *old_state = plane->state;
++ struct drm_rect rect;
+ int ret;
+
+ /* No FB to prepare */
+ if (!new_fb) {
+- if (vps->surf) {
++ if (vmw_user_object_surface(&vps->uo)) {
+ WARN_ON(vps->pinned != 0);
+- vmw_surface_unreference(&vps->surf);
++ vmw_user_object_unref(&vps->uo);
+ }
+
+ return 0;
+@@ -995,8 +991,8 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+ if (new_vfbs &&
+- new_vfbs->surface->metadata.base_size.width == hdisplay &&
+- new_vfbs->surface->metadata.base_size.height == vdisplay)
++ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.width == hdisplay &&
++ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.height == vdisplay)
+ new_content_type = SAME_AS_DISPLAY;
+ else if (vfb->bo)
+ new_content_type = SEPARATE_BO;
+@@ -1034,29 +1030,29 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ metadata.num_sizes = 1;
+ metadata.scanout = true;
+ } else {
+- metadata = new_vfbs->surface->metadata;
++ metadata = vmw_user_object_surface(&new_vfbs->uo)->metadata;
+ }
+
+ metadata.base_size.width = hdisplay;
+ metadata.base_size.height = vdisplay;
+ metadata.base_size.depth = 1;
+
+- if (vps->surf) {
++ if (vmw_user_object_surface(&vps->uo)) {
+ struct drm_vmw_size cur_base_size =
+- vps->surf->metadata.base_size;
++ vmw_user_object_surface(&vps->uo)->metadata.base_size;
+
+ if (cur_base_size.width != metadata.base_size.width ||
+ cur_base_size.height != metadata.base_size.height ||
+- vps->surf->metadata.format != metadata.format) {
++ vmw_user_object_surface(&vps->uo)->metadata.format != metadata.format) {
+ WARN_ON(vps->pinned != 0);
+- vmw_surface_unreference(&vps->surf);
++ vmw_user_object_unref(&vps->uo);
+ }
+
+ }
+
+- if (!vps->surf) {
++ if (!vmw_user_object_surface(&vps->uo)) {
+ ret = vmw_gb_surface_define(dev_priv, &metadata,
+- &vps->surf);
++ &vps->uo.surface);
+ if (ret != 0) {
+ DRM_ERROR("Couldn't allocate STDU surface.\n");
+ return ret;
+@@ -1069,18 +1065,19 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ * The only time we add a reference in prepare_fb is if the
+ * state object doesn't have a reference to begin with
+ */
+- if (vps->surf) {
++ if (vmw_user_object_surface(&vps->uo)) {
+ WARN_ON(vps->pinned != 0);
+- vmw_surface_unreference(&vps->surf);
++ vmw_user_object_unref(&vps->uo);
+ }
+
+- vps->surf = vmw_surface_reference(new_vfbs->surface);
++ memcpy(&vps->uo, &new_vfbs->uo, sizeof(vps->uo));
++ vmw_user_object_ref(&vps->uo);
+ }
+
+- if (vps->surf) {
++ if (vmw_user_object_surface(&vps->uo)) {
+
+ /* Pin new surface before flipping */
+- ret = vmw_resource_pin(&vps->surf->res, false);
++ ret = vmw_resource_pin(&vmw_user_object_surface(&vps->uo)->res, false);
+ if (ret)
+ goto out_srf_unref;
+
+@@ -1090,6 +1087,34 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ vps->content_fb_type = new_content_type;
+
+ /*
++ * The drm fb code will do blit's via the vmap interface, which doesn't
++ * trigger vmw_bo page dirty tracking due to being kernel side (and thus
++ * doesn't require mmap'ing) so we have to update the surface's dirty
++ * regions by hand but we want to be careful to not overwrite the
++ * resource if it has been written to by the gpu (res_dirty).
++ */
++ if (vps->uo.buffer && vps->uo.buffer->is_dumb) {
++ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
++ struct vmw_resource *res = &surf->res;
++
++ if (!res->res_dirty && drm_atomic_helper_damage_merged(old_state,
++ new_state,
++ &rect)) {
++ /*
++ * At some point it might be useful to actually translate
++ * (rect.x1, rect.y1) => start, and (rect.x2, rect.y2) => end,
++ * but currently the fb code will just report the entire fb
++ * dirty so in practice it doesn't matter.
++ */
++ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
++ pgoff_t end = __KERNEL_DIV_ROUND_UP(res->guest_memory_offset +
++ res->guest_memory_size,
++ PAGE_SIZE);
++ vmw_resource_dirty_update(res, start, end);
++ }
++ }
++
++ /*
+ * This should only happen if the buffer object is too large to create a
+ * proxy surface for.
+ */
+@@ -1099,7 +1124,7 @@ vmw_stdu_primary_plane_prepare_fb(struct
+ return 0;
+
+ out_srf_unref:
+- vmw_surface_unreference(&vps->surf);
++ vmw_user_object_unref(&vps->uo);
+ return ret;
+ }
+
+@@ -1241,14 +1266,8 @@ static uint32_t
+ vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+ {
+- struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+- vfbs = container_of(update->vfb, typeof(*vfbs), base);
+-
+- if (vfbs->is_bo_proxy)
+- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+-
+ size += sizeof(struct vmw_stdu_update);
+
+ return size;
+@@ -1257,14 +1276,8 @@ vmw_stdu_surface_fifo_size_same_display(
+ static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+ {
+- struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+- vfbs = container_of(update->vfb, typeof(*vfbs), base);
+-
+- if (vfbs->is_bo_proxy)
+- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+-
+ size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+ num_hits + sizeof(struct vmw_stdu_update);
+
+@@ -1272,47 +1285,6 @@ static uint32_t vmw_stdu_surface_fifo_si
+ }
+
+ static uint32_t
+-vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+-{
+- struct vmw_framebuffer_surface *vfbs;
+- struct drm_plane_state *state = update->plane->state;
+- struct drm_plane_state *old_state = update->old_state;
+- struct vmw_stdu_update_gb_image *cmd_update = cmd;
+- struct drm_atomic_helper_damage_iter iter;
+- struct drm_rect clip;
+- uint32_t copy_size = 0;
+-
+- vfbs = container_of(update->vfb, typeof(*vfbs), base);
+-
+- /*
+- * proxy surface is special where a buffer object type fb is wrapped
+- * in a surface and need an update gb image command to sync with device.
+- */
+- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+- drm_atomic_for_each_plane_damage(&iter, &clip) {
+- SVGA3dBox *box = &cmd_update->body.box;
+-
+- cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+- cmd_update->header.size = sizeof(cmd_update->body);
+- cmd_update->body.image.sid = vfbs->surface->res.id;
+- cmd_update->body.image.face = 0;
+- cmd_update->body.image.mipmap = 0;
+-
+- box->x = clip.x1;
+- box->y = clip.y1;
+- box->z = 0;
+- box->w = drm_rect_width(&clip);
+- box->h = drm_rect_height(&clip);
+- box->d = 1;
+-
+- copy_size += sizeof(*cmd_update);
+- cmd_update++;
+- }
+-
+- return copy_size;
+-}
+-
+-static uint32_t
+ vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits)
+ {
+@@ -1326,7 +1298,7 @@ vmw_stdu_surface_populate_copy(struct vm
+ cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+ num_hits;
+- cmd_copy->body.src.sid = vfbs->surface->res.id;
++ cmd_copy->body.src.sid = vmw_user_object_surface(&vfbs->uo)->res.id;
+ cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+ return sizeof(*cmd_copy);
+@@ -1397,10 +1369,7 @@ static int vmw_stdu_plane_update_surface
+ srf_update.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.intr = true;
+
+- if (vfbs->is_bo_proxy)
+- srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+-
+- if (vfbs->surface->res.id != stdu->display_srf->res.id) {
++ if (vmw_user_object_surface(&vfbs->uo)->res.id != stdu->display_srf->res.id) {
+ srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+ srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+ srf_update.clip = vmw_stdu_surface_populate_clip;
+@@ -1444,7 +1413,7 @@ vmw_stdu_primary_plane_atomic_update(str
+ stdu = vmw_crtc_to_stdu(crtc);
+ dev_priv = vmw_priv(crtc->dev);
+
+- stdu->display_srf = vps->surf;
++ stdu->display_srf = vmw_user_object_surface(&vps->uo);
+ stdu->content_fb_type = vps->content_fb_type;
+ stdu->cpp = vps->cpp;
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -1,7 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+ *
+- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
++ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
++ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+@@ -36,9 +37,6 @@
+ #include <drm/ttm/ttm_placement.h>
+
+ #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+-#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+-#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+- (svga3d_flags & ((uint64_t)U32_MAX))
+
+ /**
+ * struct vmw_user_surface - User-space visible surface resource
+@@ -686,6 +684,14 @@ static void vmw_user_surface_base_releas
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
++
++ /*
++ * Dumb buffers own the resource and they'll unref the
++ * resource themselves
++ */
++ if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
++ return;
++
+ vmw_resource_unreference(&res);
+ }
+
+@@ -812,7 +818,8 @@ int vmw_surface_define_ioctl(struct drm_
+ }
+ }
+ res->guest_memory_size = cur_bo_offset;
+- if (metadata->scanout &&
++ if (!file_priv->atomic &&
++ metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+@@ -864,6 +871,7 @@ int vmw_surface_define_ioctl(struct drm_
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
++ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ }
+
+ tmp = vmw_resource_reference(&srf->res);
+@@ -892,6 +900,113 @@ out_unlock:
+ return ret;
+ }
+
++static struct vmw_user_surface *
++vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
++ u32 handle)
++{
++ struct vmw_user_surface *user_srf = NULL;
++ struct vmw_surface *surf;
++ struct ttm_base_object *base;
++
++ surf = vmw_bo_surface(bo);
++ if (surf) {
++ rcu_read_lock();
++ user_srf = container_of(surf, struct vmw_user_surface, srf);
++ base = &user_srf->prime.base;
++ if (base && !kref_get_unless_zero(&base->refcount)) {
++ drm_dbg_driver(&vmw->drm,
++ "%s: referencing a stale surface handle %d\n",
++ __func__, handle);
++ base = NULL;
++ user_srf = NULL;
++ }
++ rcu_read_unlock();
++ }
++
++ return user_srf;
++}
++
++struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
++ struct vmw_bo *bo,
++ u32 handle)
++{
++ struct vmw_user_surface *user_srf =
++ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
++ struct vmw_surface *surf = NULL;
++ struct ttm_base_object *base;
++
++ if (user_srf) {
++ surf = vmw_surface_reference(&user_srf->srf);
++ base = &user_srf->prime.base;
++ ttm_base_object_unref(&base);
++ }
++ return surf;
++}
++
++u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
++ struct vmw_bo *bo,
++ u32 handle)
++{
++ struct vmw_user_surface *user_srf =
++ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
++ int surf_handle = 0;
++ struct ttm_base_object *base;
++
++ if (user_srf) {
++ base = &user_srf->prime.base;
++ surf_handle = (u32)base->handle;
++ ttm_base_object_unref(&base);
++ }
++ return surf_handle;
++}
++
++static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
++ struct drm_file *file_priv,
++ u32 fd, u32 *handle,
++ struct ttm_base_object **base_p)
++{
++ struct ttm_base_object *base;
++ struct vmw_bo *bo;
++ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ struct vmw_user_surface *user_srf;
++ int ret;
++
++ ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle);
++ if (ret) {
++ drm_warn(&dev_priv->drm,
++ "Wasn't able to find user buffer for fd = %u.\n", fd);
++ return ret;
++ }
++
++ ret = vmw_user_bo_lookup(file_priv, *handle, &bo);
++ if (ret) {
++ drm_warn(&dev_priv->drm,
++ "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
++ return ret;
++ }
++
++ user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle);
++ if (WARN_ON(!user_srf)) {
++ drm_warn(&dev_priv->drm,
++ "User surface fd %d (handle %d) is null.\n", fd, *handle);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ base = &user_srf->prime.base;
++ ret = ttm_ref_object_add(tfile, base, NULL, false);
++ if (ret) {
++ drm_warn(&dev_priv->drm,
++ "Couldn't add an object ref for the buffer (%d).\n", *handle);
++ goto out;
++ }
++
++ *base_p = base;
++out:
++ vmw_user_bo_unref(&bo);
++
++ return ret;
++}
+
+ static int
+ vmw_surface_handle_reference(struct vmw_private *dev_priv,
+@@ -901,15 +1016,19 @@ vmw_surface_handle_reference(struct vmw_
+ struct ttm_base_object **base_p)
+ {
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+- struct vmw_user_surface *user_srf;
++ struct vmw_user_surface *user_srf = NULL;
+ uint32_t handle;
+ struct ttm_base_object *base;
+ int ret;
+
+ if (handle_type == DRM_VMW_HANDLE_PRIME) {
+ ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+- if (unlikely(ret != 0))
+- return ret;
++ if (ret)
++ return vmw_buffer_prime_to_surface_base(dev_priv,
++ file_priv,
++ u_handle,
++ &handle,
++ base_p);
+ } else {
+ handle = u_handle;
+ }
+@@ -1503,7 +1622,12 @@ vmw_gb_surface_define_internal(struct dr
+ ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
+ &res->guest_memory_bo);
+ if (ret == 0) {
+- if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
++ if (res->guest_memory_bo->is_dumb) {
++ VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
++ vmw_user_bo_unref(&res->guest_memory_bo);
++ ret = -EINVAL;
++ goto out_unlock;
++ } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
+ VMW_DEBUG_USER("Surface backup buffer too small.\n");
+ vmw_user_bo_unref(&res->guest_memory_bo);
+ ret = -EINVAL;
+@@ -1560,6 +1684,7 @@ vmw_gb_surface_define_internal(struct dr
+ rep->handle = user_srf->prime.base.handle;
+ rep->backup_size = res->guest_memory_size;
+ if (res->guest_memory_bo) {
++ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ rep->buffer_map_handle =
+ drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
+ rep->buffer_size = res->guest_memory_bo->tbo.base.size;
+@@ -2100,3 +2225,140 @@ int vmw_gb_surface_define(struct vmw_pri
+ out_unlock:
+ return ret;
+ }
++
++static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
++ int bpp)
++{
++ switch (bpp) {
++ case 8: /* DRM_FORMAT_C8 */
++ return SVGA3D_P8;
++ case 16: /* DRM_FORMAT_RGB565 */
++ return SVGA3D_R5G6B5;
++ case 32: /* DRM_FORMAT_XRGB8888 */
++ if (has_sm4_context(vmw))
++ return SVGA3D_B8G8R8X8_UNORM;
++ return SVGA3D_X8R8G8B8;
++ default:
++ drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
++ return SVGA3D_X8R8G8B8;
++ }
++}
++
++/**
++ * vmw_dumb_create - Create a dumb kms buffer
++ *
++ * @file_priv: Pointer to a struct drm_file identifying the caller.
++ * @dev: Pointer to the drm device.
++ * @args: Pointer to a struct drm_mode_create_dumb structure
++ * Return: Zero on success, negative error code on failure.
++ *
++ * This is a driver callback for the core drm create_dumb functionality.
++ * Note that this is very similar to the vmw_bo_alloc ioctl, except
++ * that the arguments have a different format.
++ */
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args)
++{
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ struct vmw_bo *vbo = NULL;
++ struct vmw_resource *res = NULL;
++ union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
++ struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
++ int ret;
++ struct drm_vmw_size drm_size = {
++ .width = args->width,
++ .height = args->height,
++ .depth = 1,
++ };
++ SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp);
++ const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
++ SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
++ SVGA3D_SURFACE_HINT_RENDERTARGET |
++ SVGA3D_SURFACE_SCREENTARGET |
++ SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
++ SVGA3D_SURFACE_BIND_RENDER_TARGET;
++
++ /*
++ * Without mob support we're just going to use raw memory buffer
++ * because we wouldn't be able to support full surface coherency
++ * without mobs
++ */
++ if (!dev_priv->has_mob) {
++ int cpp = DIV_ROUND_UP(args->bpp, 8);
++
++ switch (cpp) {
++ case 1: /* DRM_FORMAT_C8 */
++ case 2: /* DRM_FORMAT_RGB565 */
++ case 4: /* DRM_FORMAT_XRGB8888 */
++ break;
++ default:
++ /*
++ * Dumb buffers don't allow anything else.
++ * This is tested via IGT's dumb_buffers
++ */
++ return -EINVAL;
++ }
++
++ args->pitch = args->width * cpp;
++ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
++
++ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
++ args->size, &args->handle,
++ &vbo);
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_put(&vbo->tbo.base);
++ return ret;
++ }
++
++ req->version = drm_vmw_gb_surface_v1;
++ req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
++ req->quality_level = SVGA3D_MS_QUALITY_NONE;
++ req->buffer_byte_stride = 0;
++ req->must_be_zero = 0;
++ req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
++ req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
++ req->base.format = (uint32_t)format;
++ req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
++ req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
++ req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
++ req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
++ req->base.base_size.width = args->width;
++ req->base.base_size.height = args->height;
++ req->base.base_size.depth = 1;
++ req->base.array_size = 0;
++ req->base.mip_levels = 1;
++ req->base.multisample_count = 0;
++ req->base.buffer_handle = SVGA3D_INVALID_ID;
++ req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
++ ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv);
++ if (ret) {
++ drm_warn(dev, "Unable to create a dumb buffer\n");
++ return ret;
++ }
++
++ args->handle = arg.rep.buffer_handle;
++ args->size = arg.rep.buffer_size;
++ args->pitch = vmw_surface_calculate_pitch(desc, &drm_size);
++
++ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle,
++ user_surface_converter,
++ &res);
++ if (ret) {
++ drm_err(dev, "Created resource handle doesn't exist!\n");
++ goto err;
++ }
++
++ vbo = res->guest_memory_bo;
++ vbo->is_dumb = true;
++ vbo->dumb_surface = vmw_res_to_srf(res);
++
++err:
++ if (res)
++ vmw_resource_unreference(&res);
++ if (ret)
++ ttm_ref_object_base_unref(tfile, arg.rep.handle);
++
++ return ret;
++}
--- /dev/null
+From f0d17d696dfce77c9abc830e4ac2d677890a2dad Mon Sep 17 00:00:00 2001
+From: Tatsunosuke Tobita <tatsunosuke.tobita@wacom.com>
+Date: Tue, 9 Jul 2024 14:57:28 +0900
+Subject: HID: wacom: Modify pen IDs
+
+From: Tatsunosuke Tobita <tatsunosuke.tobita@wacom.com>
+
+commit f0d17d696dfce77c9abc830e4ac2d677890a2dad upstream.
+
+The pen ID, 0x80842, was not the correct ID for wacom driver to
+treat. The ID was corrected to 0x8842.
+Also, 0x4200 was not the expected ID used on any Wacom device.
+Therefore, 0x4200 was removed.
+
+Signed-off-by: Tatsunosuke Tobita <tatsunosuke.tobita@wacom.com>
+Signed-off-by: Tatsunosuke Tobita <tatsunosuke.wacom@gmail.com>
+Fixes: bfdc750c4cb2 ("HID: wacom: add three styli to wacom_intuos_get_tool_type")
+Cc: stable@kernel.org #6.2
+Reviewed-by: Ping Cheng <ping.cheng@wacom.com>
+Link: https://patch.msgid.link/20240709055729.17158-1-tatsunosuke.wacom@gmail.com
+Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/wacom_wac.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -714,13 +714,12 @@ static int wacom_intuos_get_tool_type(in
+ case 0x8e2: /* IntuosHT2 pen */
+ case 0x022:
+ case 0x200: /* Pro Pen 3 */
+- case 0x04200: /* Pro Pen 3 */
+ case 0x10842: /* MobileStudio Pro Pro Pen slim */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
+- case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
++ case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */
+ tool_type = BTN_TOOL_PEN;
+ break;
+
--- /dev/null
+From 2c762be5b798c443612c1bb9b011de4fdaebd1c5 Mon Sep 17 00:00:00 2001
+From: Olivier Langlois <olivier@trillion01.com>
+Date: Mon, 29 Jul 2024 19:03:33 -0400
+Subject: io_uring: keep multishot request NAPI timeout current
+
+From: Olivier Langlois <olivier@trillion01.com>
+
+commit 2c762be5b798c443612c1bb9b011de4fdaebd1c5 upstream.
+
+This refresh statement was originally present in the original patch:
+https://lore.kernel.org/netdev/20221121191437.996297-2-shr@devkernel.io/
+
+It has been removed with no explanation in v6:
+https://lore.kernel.org/netdev/20230201222254.744422-2-shr@devkernel.io/
+
+It is important to make the refresh for multishot requests, because if no
+new requests using the same NAPI device are added to the ring, the entry
+will become stale and be removed silently. The unsuspecting user will
+not know that their ring had busy polling for only 60 seconds before
+being pruned.
+
+Signed-off-by: Olivier Langlois <olivier@trillion01.com>
+Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
+Fixes: 8d0c12a80cdeb ("io-uring: add napi busy poll support")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/0fe61a019ec61e5708cd117cb42ed0dab95e1617.1722294646.git.olivier@trillion01.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/poll.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 0a8e02944689..1f63b60e85e7 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -347,6 +347,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ v &= IO_POLL_REF_MASK;
+ } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
+
++ io_napi_add(req);
+ return IOU_POLL_NO_ACTION;
+ }
+
+--
+2.46.0
+
--- /dev/null
+From efd340bf3d7779a3a8ec954d8ec0fb8a10f24982 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:24 +0200
+Subject: mptcp: distinguish rcv vs sent backup flag in requests
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit efd340bf3d7779a3a8ec954d8ec0fb8a10f24982 upstream.
+
+When sending an MP_JOIN + SYN + ACK, it is possible to mark the subflow
+as 'backup' by setting the flag with the same name. Before this patch,
+the backup was set if the other peer set it in its MP_JOIN + SYN
+request.
+
+It is not correct: the backup flag should be set in the MPJ+SYN+ACK only
+if the host asks for it, and not mirroring what was done by the other
+peer. It is then required to have a dedicated bit for each direction,
+similar to what is done in the subflow context.
+
+Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/options.c | 2 +-
+ net/mptcp/protocol.h | 1 +
+ net/mptcp/subflow.c | 1 +
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -909,7 +909,7 @@ bool mptcp_synack_options(const struct r
+ return true;
+ } else if (subflow_req->mp_join) {
+ opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
+- opts->backup = subflow_req->backup;
++ opts->backup = subflow_req->request_bkup;
+ opts->join_id = subflow_req->local_id;
+ opts->thmac = subflow_req->thmac;
+ opts->nonce = subflow_req->local_nonce;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -443,6 +443,7 @@ struct mptcp_subflow_request_sock {
+ u16 mp_capable : 1,
+ mp_join : 1,
+ backup : 1,
++ request_bkup : 1,
+ csum_reqd : 1,
+ allow_join_id0 : 1;
+ u8 local_id;
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -2005,6 +2005,7 @@ static void subflow_ulp_clone(const stru
+ new_ctx->fully_established = 1;
+ new_ctx->remote_key_valid = 1;
+ new_ctx->backup = subflow_req->backup;
++ new_ctx->request_bkup = subflow_req->request_bkup;
+ WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
+ new_ctx->token = subflow_req->token;
+ new_ctx->thmac = subflow_req->thmac;
--- /dev/null
+From 0a567c2a10033bf04ed618368d179bce6977984b Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 31 Jul 2024 12:10:14 +0200
+Subject: mptcp: fix bad RCVPRUNED mib accounting
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 0a567c2a10033bf04ed618368d179bce6977984b upstream.
+
+Since its introduction, the mentioned MIB accounted for the wrong
+event: wake-up being skipped as not-needed on some edge condition
+instead of incoming skb being dropped after landing in the (subflow)
+receive queue.
+
+Move the increment in the correct location.
+
+Fixes: ce599c516386 ("mptcp: properly account bulk freed memory")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -350,8 +350,10 @@ static bool __mptcp_move_skb(struct mptc
+ skb_orphan(skb);
+
+ /* try to fetch required memory from subflow */
+- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
++ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ goto drop;
++ }
+
+ has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+
+@@ -844,10 +846,8 @@ void mptcp_data_ready(struct sock *sk, s
+ sk_rbuf = ssk_rbuf;
+
+ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
+- if (__mptcp_rmem(sk) > sk_rbuf) {
+- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
++ if (__mptcp_rmem(sk) > sk_rbuf)
+ return;
+- }
+
+ /* Wake-up the reader only for in-sequence data */
+ mptcp_data_lock(sk);
--- /dev/null
+From 68cc924729ffcfe90d0383177192030a9aeb2ee4 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Wed, 31 Jul 2024 12:10:15 +0200
+Subject: mptcp: fix duplicate data handling
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 68cc924729ffcfe90d0383177192030a9aeb2ee4 upstream.
+
+When a subflow receives and discards duplicate data, the mptcp
+stack assumes that the consumed offset inside the current skb is
+zero.
+
+With multiple subflows receiving data simultaneously such assertion
+does not held true. As a result the subflow-level copied_seq will
+be incorrectly increased and later on the same subflow will observe
+a bad mapping, leading to subflow reset.
+
+Address the issue taking into account the skb consumed offset in
+mptcp_subflow_discard_data().
+
+Fixes: 04e4cd4f7ca4 ("mptcp: cleanup mptcp_subflow_discard_data()")
+Cc: stable@vger.kernel.org
+Link: https://github.com/multipath-tcp/mptcp_net-next/issues/501
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/subflow.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1230,14 +1230,22 @@ static void mptcp_subflow_discard_data(s
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+- u32 incr;
++ struct tcp_sock *tp = tcp_sk(ssk);
++ u32 offset, incr, avail_len;
+
+- incr = limit >= skb->len ? skb->len + fin : limit;
++ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
++ if (WARN_ON_ONCE(offset > skb->len))
++ goto out;
+
+- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
+- subflow->map_subflow_seq);
++ avail_len = skb->len - offset;
++ incr = limit >= avail_len ? avail_len + fin : limit;
++
++ pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++ offset, subflow->map_subflow_seq);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ tcp_sk(ssk)->copied_seq += incr;
++
++out:
+ if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
+ sk_eat_skb(ssk, skb);
+ if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
--- /dev/null
+From 4b317e0eb287bd30a1b329513531157c25e8b692 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sat, 27 Jul 2024 11:04:00 +0200
+Subject: mptcp: fix NL PM announced address accounting
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 4b317e0eb287bd30a1b329513531157c25e8b692 upstream.
+
+Currently the per connection announced address counter is never
+decreased. As a consequence, after connection establishment, if
+the NL PM deletes an endpoint and adds a new/different one, no
+additional subflow is created for the new endpoint even if the
+current limits allow that.
+
+Address the issue properly updating the signaled address counter
+every time the NL PM removes such addresses.
+
+Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1401,6 +1401,7 @@ static bool mptcp_pm_remove_anno_addr(st
+ ret = remove_anno_list_by_saddr(msk, addr);
+ if (ret || force) {
+ spin_lock_bh(&msk->pm.lock);
++ msk->pm.add_addr_signaled -= ret;
+ mptcp_pm_remove_addr(msk, &list);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+@@ -1565,17 +1566,18 @@ static void mptcp_pm_remove_addrs_and_su
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, rm_list, list) {
+- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
+- slist.nr < MPTCP_RM_IDS_MAX)
++ if (slist.nr < MPTCP_RM_IDS_MAX &&
++ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
+ slist.ids[slist.nr++] = entry->addr.id;
+
+- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
+- alist.nr < MPTCP_RM_IDS_MAX)
++ if (alist.nr < MPTCP_RM_IDS_MAX &&
++ remove_anno_list_by_saddr(msk, &entry->addr))
+ alist.ids[alist.nr++] = entry->addr.id;
+ }
+
+ if (alist.nr) {
+ spin_lock_bh(&msk->pm.lock);
++ msk->pm.add_addr_signaled -= alist.nr;
+ mptcp_pm_remove_addr(msk, &alist);
+ spin_unlock_bh(&msk->pm.lock);
+ }
--- /dev/null
+From 167b93258d1e2230ee3e8a97669b4db4cc9e90aa Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sat, 27 Jul 2024 11:03:59 +0200
+Subject: mptcp: fix user-space PM announced address accounting
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 167b93258d1e2230ee3e8a97669b4db4cc9e90aa upstream.
+
+Currently the per-connection announced address counter is never
+decreased. When the user-space PM is in use, this just affect
+the information exposed via diag/sockopt, but it could still foul
+the PM to wrong decision.
+
+Add the missing accounting for the user-space PM's sake.
+
+Fixes: 8b1c94da1e48 ("mptcp: only send RM_ADDR in nl_cmd_remove")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1534,16 +1534,25 @@ void mptcp_pm_remove_addrs(struct mptcp_
+ {
+ struct mptcp_rm_list alist = { .nr = 0 };
+ struct mptcp_pm_addr_entry *entry;
++ int anno_nr = 0;
+
+ list_for_each_entry(entry, rm_list, list) {
+- if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
+- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
+- alist.nr < MPTCP_RM_IDS_MAX)
+- alist.ids[alist.nr++] = entry->addr.id;
++ if (alist.nr >= MPTCP_RM_IDS_MAX)
++ break;
++
++ /* only delete if either announced or matching a subflow */
++ if (remove_anno_list_by_saddr(msk, &entry->addr))
++ anno_nr++;
++ else if (!lookup_subflow_by_saddr(&msk->conn_list,
++ &entry->addr))
++ continue;
++
++ alist.ids[alist.nr++] = entry->addr.id;
+ }
+
+ if (alist.nr) {
+ spin_lock_bh(&msk->pm.lock);
++ msk->pm.add_addr_signaled -= anno_nr;
+ mptcp_pm_remove_addr(msk, &alist);
+ spin_unlock_bh(&msk->pm.lock);
+ }
--- /dev/null
+From 4dde0d72ccec500c60c798e036b852e013d6e124 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:26 +0200
+Subject: mptcp: mib: count MPJ with backup flag
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 4dde0d72ccec500c60c798e036b852e013d6e124 upstream.
+
+Without such counters, it is difficult to easily debug issues with MPJ
+not having the backup flags on production servers.
+
+This is not strictly a fix, but it eases to validate the following
+patches without requiring to take packet traces, to query ongoing
+connections with Netlink with admin permissions, or to guess by looking
+at the behaviour of the packet scheduler. Also, the modification is self
+contained, isolated, well controlled, and the increments are done just
+after others, there from the beginning. It looks then safe, and helpful
+to backport this.
+
+Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/mib.c | 2 ++
+ net/mptcp/mib.h | 2 ++
+ net/mptcp/subflow.c | 6 ++++++
+ 3 files changed, 10 insertions(+)
+
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_
+ SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+ SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+ SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
++ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
+ SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
++ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
+ SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
+ SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
+ SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -14,7 +14,9 @@ enum linux_mptcp_mib_field {
+ MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
+ MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
+ MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
++ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
+ MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
++ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
+ MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -168,6 +168,9 @@ static int subflow_check_req(struct requ
+ return 0;
+ } else if (opt_mp_join) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
++
++ if (mp_opt.backup)
++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
+ }
+
+ if (opt_mp_capable && listener->request_mptcp) {
+@@ -577,6 +580,9 @@ static void subflow_finish_connect(struc
+ subflow->mp_join = 1;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+
++ if (subflow->backup)
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
++
+ if (subflow_use_different_dport(msk, sk)) {
+ pr_debug("synack inet_dport=%d %d",
+ ntohs(inet_sk(sk)->inet_dport),
--- /dev/null
+From 6834097fc38c5416701c793da94558cea49c0a1f Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:28 +0200
+Subject: mptcp: pm: fix backup support in signal endpoints
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 6834097fc38c5416701c793da94558cea49c0a1f upstream.
+
+There was a support for signal endpoints, but only when the endpoint's
+flag was changed during a connection. If an endpoint with the signal and
+backup was already present, the MP_JOIN reply was not containing the
+backup flag as expected.
+
+That's confusing to have this inconsistent behaviour. On the other hand,
+the infrastructure to set the backup flag in the SYN + ACK + MP_JOIN was
+already there, it was just never set before. Now when requesting the
+local ID from the path-manager, the backup status is also requested.
+
+Note that when the userspace PM is used, the backup flag can be set if
+the local address was already used before with a backup flag, e.g. if
+the address was announced with the 'backup' flag, or a subflow was
+created with the 'backup' flag.
+
+Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/507
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c | 12 ++++++++++++
+ net/mptcp/pm_netlink.c | 18 ++++++++++++++++++
+ net/mptcp/pm_userspace.c | 18 ++++++++++++++++++
+ net/mptcp/protocol.h | 3 +++
+ net/mptcp/subflow.c | 3 +++
+ 5 files changed, 54 insertions(+)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -426,6 +426,18 @@ int mptcp_pm_get_local_id(struct mptcp_s
+ return mptcp_pm_nl_get_local_id(msk, &skc_local);
+ }
+
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
++{
++ struct mptcp_addr_info skc_local;
++
++ mptcp_local_address((struct sock_common *)skc, &skc_local);
++
++ if (mptcp_pm_is_userspace(msk))
++ return mptcp_userspace_pm_is_backup(msk, &skc_local);
++
++ return mptcp_pm_nl_is_backup(msk, &skc_local);
++}
++
+ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
+ u8 *flags, int *ifindex)
+ {
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1102,6 +1102,24 @@ int mptcp_pm_nl_get_local_id(struct mptc
+ return ret;
+ }
+
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
++{
++ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
++ struct mptcp_pm_addr_entry *entry;
++ bool backup = false;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return backup;
++}
++
+ #define MPTCP_PM_CMD_GRP_OFFSET 0
+ #define MPTCP_PM_EV_GRP_OFFSET 1
+
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -165,6 +165,24 @@ int mptcp_userspace_pm_get_local_id(stru
+ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
+ }
+
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
++ struct mptcp_addr_info *skc)
++{
++ struct mptcp_pm_addr_entry *entry;
++ bool backup = false;
++
++ spin_lock_bh(&msk->pm.lock);
++ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
++ if (mptcp_addresses_equal(&entry->addr, skc, false)) {
++ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
++ break;
++ }
++ }
++ spin_unlock_bh(&msk->pm.lock);
++
++ return backup;
++}
++
+ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1104,6 +1104,9 @@ bool mptcp_pm_rm_addr_signal(struct mptc
+ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
++bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
++bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
+ int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_
+ return NULL;
+ }
+ subflow_req->local_id = local_id;
++ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
+
+ return msk;
+ }
+@@ -620,6 +621,8 @@ static int subflow_chk_local_id(struct s
+ return err;
+
+ subflow_set_local_id(subflow, err);
++ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
++
+ return 0;
+ }
+
--- /dev/null
+From 4258b94831bb7ff28ab80e3c8d94db37db930728 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:25 +0200
+Subject: mptcp: pm: only set request_bkup flag when sending MP_PRIO
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 4258b94831bb7ff28ab80e3c8d94db37db930728 upstream.
+
+The 'backup' flag from mptcp_subflow_context structure is supposed to be
+set only when the other peer flagged a subflow as backup, not the
+opposite.
+
+Fixes: 067065422fcd ("mptcp: add the outgoing MP_PRIO support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -471,7 +471,6 @@ static void __mptcp_pm_send_ack(struct m
+ slow = lock_sock_fast(ssk);
+ if (prio) {
+ subflow->send_mp_prio = 1;
+- subflow->backup = backup;
+ subflow->request_bkup = backup;
+ }
+
--- /dev/null
+From b6a66e521a2032f7fcba2af5a9bcbaeaa19b7ca3 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:23 +0200
+Subject: mptcp: sched: check both directions for backup
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit b6a66e521a2032f7fcba2af5a9bcbaeaa19b7ca3 upstream.
+
+The 'mptcp_subflow_context' structure has two items related to the
+backup flags:
+
+ - 'backup': the subflow has been marked as backup by the other peer
+
+ - 'request_bkup': the backup flag has been set by the host
+
+Before this patch, the scheduler was only looking at the 'backup' flag.
+That can make sense in some cases, but it looks like that's not what we
+wanted for the general use, because either the path-manager was setting
+both of them when sending an MP_PRIO, or the receiver was duplicating
+the 'backup' flag in the subflow request.
+
+Note that the use of these two flags in the path-manager are going to be
+fixed in the next commits, but this change here is needed not to modify
+the behaviour.
+
+Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/trace/events/mptcp.h | 2 +-
+ net/mptcp/protocol.c | 10 ++++++----
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/trace/events/mptcp.h
++++ b/include/trace/events/mptcp.h
+@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
+ struct sock *ssk;
+
+ __entry->active = mptcp_subflow_active(subflow);
+- __entry->backup = subflow->backup;
++ __entry->backup = subflow->backup || subflow->request_bkup;
+
+ if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
+ __entry->free = sk_stream_memory_free(subflow->tcp_sock);
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(stru
+ }
+
+ mptcp_for_each_subflow(msk, subflow) {
++ bool backup = subflow->backup || subflow->request_bkup;
++
+ trace_mptcp_subflow_get_send(subflow);
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
+
+ tout = max(tout, mptcp_timeout_from_subflow(subflow));
+- nr_active += !subflow->backup;
++ nr_active += !backup;
+ pace = subflow->avg_pacing_rate;
+ if (unlikely(!pace)) {
+ /* init pacing rate from socket */
+@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(stru
+ }
+
+ linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
+- if (linger_time < send_info[subflow->backup].linger_time) {
+- send_info[subflow->backup].ssk = ssk;
+- send_info[subflow->backup].linger_time = linger_time;
++ if (linger_time < send_info[backup].linger_time) {
++ send_info[backup].ssk = ssk;
++ send_info[backup].linger_time = linger_time;
+ }
+ }
+ __mptcp_set_timeout(sk, tout);
--- /dev/null
+From 08f3a5c38087d1569e982a121aad1e6acbf145ce Mon Sep 17 00:00:00 2001
+From: Ma Ke <make24@iscas.ac.cn>
+Date: Thu, 25 Jul 2024 10:29:42 +0800
+Subject: net: usb: sr9700: fix uninitialized variable use in sr_mdio_read
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+commit 08f3a5c38087d1569e982a121aad1e6acbf145ce upstream.
+
+It could lead to error happen because the variable res is not updated if
+the call to sr_share_read_word returns an error. In this particular case
+error code was returned and res stayed uninitialized. Same issue also
+applies to sr_read_reg.
+
+This can be avoided by checking the return value of sr_share_read_word
+and sr_read_reg, and propagating the error if the read operation failed.
+
+Found by code review.
+
+Cc: stable@vger.kernel.org
+Fixes: c9b37458e956 ("USB2NET : SR9700 : One chip USB 1.1 USB2NET SR9700Device Driver Support")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Reviewed-by: Shigeru Yoshida <syoshida@redhat.com>
+Reviewed-by: Hariprasad Kelam <hkelam@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/sr9700.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_devic
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+ int rc = 0;
++ int err;
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_devic
+ if (loc == MII_BMSR) {
+ u8 value;
+
+- sr_read_reg(dev, SR_NSR, &value);
++ err = sr_read_reg(dev, SR_NSR, &value);
++ if (err < 0)
++ return err;
++
+ if (value & NSR_LINKST)
+ rc = 1;
+ }
+- sr_share_read_word(dev, 1, loc, &res);
++ err = sr_share_read_word(dev, 1, loc, &res);
++ if (err < 0)
++ return err;
++
+ if (rc == 1)
+ res = le16_to_cpu(res) | BMSR_LSTATUS;
+ else
--- /dev/null
+From c4d6a347ba7babdf9d90a0eb24048c266cae0532 Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 30 Jul 2024 08:31:04 +0200
+Subject: net: wan: fsl_qmc_hdlc: Convert carrier_lock spinlock to a mutex
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit c4d6a347ba7babdf9d90a0eb24048c266cae0532 upstream.
+
+The carrier_lock spinlock protects the carrier detection. While it is
+held, framer_get_status() is called which in turn takes a mutex.
+This is not correct and can lead to a deadlock.
+
+A run with PROVE_LOCKING enabled detected the issue:
+ [ BUG: Invalid wait context ]
+ ...
+ c204ddbc (&framer->mutex){+.+.}-{3:3}, at: framer_get_status+0x40/0x78
+ other info that might help us debug this:
+ context-{4:4}
+ 2 locks held by ifconfig/146:
+ #0: c0926a38 (rtnl_mutex){+.+.}-{3:3}, at: devinet_ioctl+0x12c/0x664
+ #1: c2006a40 (&qmc_hdlc->carrier_lock){....}-{2:2}, at: qmc_hdlc_framer_set_carrier+0x30/0x98
+
+Avoid the spinlock usage and convert carrier_lock to a mutex.
+
+Fixes: 54762918ca85 ("net: wan: fsl_qmc_hdlc: Add framer support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20240730063104.179553-1-herve.codina@bootlin.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wan/fsl_qmc_hdlc.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
+index c5e7ca793c43..64b4bfa6fea7 100644
+--- a/drivers/net/wan/fsl_qmc_hdlc.c
++++ b/drivers/net/wan/fsl_qmc_hdlc.c
+@@ -18,6 +18,7 @@
+ #include <linux/hdlc.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -37,7 +38,7 @@ struct qmc_hdlc {
+ struct qmc_chan *qmc_chan;
+ struct net_device *netdev;
+ struct framer *framer;
+- spinlock_t carrier_lock; /* Protect carrier detection */
++ struct mutex carrier_lock; /* Protect carrier detection */
+ struct notifier_block nb;
+ bool is_crc32;
+ spinlock_t tx_lock; /* Protect tx descriptors */
+@@ -60,7 +61,7 @@ static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
+ if (!qmc_hdlc->framer)
+ return 0;
+
+- guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
++ guard(mutex)(&qmc_hdlc->carrier_lock);
+
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+@@ -706,7 +707,7 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
+
+ qmc_hdlc->dev = dev;
+ spin_lock_init(&qmc_hdlc->tx_lock);
+- spin_lock_init(&qmc_hdlc->carrier_lock);
++ mutex_init(&qmc_hdlc->carrier_lock);
+
+ qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
+ if (IS_ERR(qmc_hdlc->qmc_chan))
+--
+2.46.0
+
--- /dev/null
+From e549360069b4a57e111b8222fc072f3c7c1688ab Mon Sep 17 00:00:00 2001
+From: Herve Codina <herve.codina@bootlin.com>
+Date: Tue, 30 Jul 2024 08:31:33 +0200
+Subject: net: wan: fsl_qmc_hdlc: Discard received CRC
+
+From: Herve Codina <herve.codina@bootlin.com>
+
+commit e549360069b4a57e111b8222fc072f3c7c1688ab upstream.
+
+Received frame from QMC contains the CRC.
+Upper layers don't need this CRC and tcpdump mentioned trailing junk
+data due to this CRC presence.
+
+As some other HDLC driver, simply discard this CRC.
+
+Fixes: d0f2258e79fd ("net: wan: Add support for QMC HDLC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herve Codina <herve.codina@bootlin.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20240730063133.179598-1-herve.codina@bootlin.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wan/fsl_qmc_hdlc.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
+index 64b4bfa6fea7..8fcfbde31a1c 100644
+--- a/drivers/net/wan/fsl_qmc_hdlc.c
++++ b/drivers/net/wan/fsl_qmc_hdlc.c
+@@ -250,6 +250,7 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
++ size_t crc_size;
+ int ret;
+
+ netdev = desc->netdev;
+@@ -268,15 +269,26 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
+ if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
+ netdev->stats.rx_crc_errors++;
+ kfree_skb(desc->skb);
+- } else {
+- netdev->stats.rx_packets++;
+- netdev->stats.rx_bytes += length;
+-
+- skb_put(desc->skb, length);
+- desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+- netif_rx(desc->skb);
++ goto re_queue;
+ }
+
++ /* Discard the CRC */
++ crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
++ if (length < crc_size) {
++ netdev->stats.rx_length_errors++;
++ kfree_skb(desc->skb);
++ goto re_queue;
++ }
++ length -= crc_size;
++
++ netdev->stats.rx_packets++;
++ netdev->stats.rx_bytes += length;
++
++ skb_put(desc->skb, length);
++ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
++ netif_rx(desc->skb);
++
++re_queue:
+ /* Re-queue a transfer using the same descriptor */
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
+ if (ret) {
+--
+2.46.0
+
--- /dev/null
+From 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Wed, 15 May 2024 12:55:41 +1000
+Subject: nouveau: set placement to original placement on uvmm validate.
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 9c685f61722d30a22d55bb8a48f7a48bb2e19bcc upstream.
+
+When a buffer is evicted for memory pressure or TTM evict all,
+the placement is set to the eviction domain, this means the
+buffer never gets revalidated on the next exec to the correct domain.
+
+I think this should be fine to use the initial domain from the
+object creation, as least with VM_BIND this won't change after
+init so this should be the correct answer.
+
+Fixes: b88baab82871 ("drm/nouveau: implement new VM_BIND uAPI")
+Cc: Danilo Krummrich <dakr@redhat.com>
+Cc: <stable@vger.kernel.org> # v6.6
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240515025542.2156774-1-airlied@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_uvmm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+@@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuv
+ {
+ struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
++ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
+ return nouveau_bo_validate(nvbo, true, false);
+ }
+
--- /dev/null
+From 5560a612c20d3daacbf5da7913deefa5c31742f4 Mon Sep 17 00:00:00 2001
+From: Blazej Kucman <blazej.kucman@intel.com>
+Date: Mon, 22 Jul 2024 16:14:40 +0200
+Subject: PCI: pciehp: Retain Power Indicator bits for userspace indicators
+
+From: Blazej Kucman <blazej.kucman@intel.com>
+
+commit 5560a612c20d3daacbf5da7913deefa5c31742f4 upstream.
+
+The sysfs "attention" file normally controls the Slot Control Attention
+Indicator with 0 (off), 1 (on), 2 (blink) settings.
+
+576243b3f9ea ("PCI: pciehp: Allow exclusive userspace control of
+indicators") added pciehp_set_raw_indicator_status() to allow userspace to
+directly control all four bits in both the Attention Indicator and the
+Power Indicator fields via the "attention" file.
+
+This is used on Intel VMD bridges so utilities like "ledmon" can use sysfs
+"attention" to control up to 16 indicators for NVMe device RAID status.
+
+abaaac4845a0 ("PCI: hotplug: Use FIELD_GET/PREP()") broke this by masking
+the sysfs data with PCI_EXP_SLTCTL_AIC, which discards the upper two bits
+intended for the Power Indicator Control field (PCI_EXP_SLTCTL_PIC).
+
+For NVMe devices behind an Intel VMD, ledmon settings that use the
+PCI_EXP_SLTCTL_PIC bits, i.e., ATTENTION_REBUILD (0x5), ATTENTION_LOCATE
+(0x7), ATTENTION_FAILURE (0xD), ATTENTION_OFF (0xF), no longer worked
+correctly.
+
+Mask with PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC to retain both the
+Attention Indicator and the Power Indicator bits.
+
+Fixes: abaaac4845a0 ("PCI: hotplug: Use FIELD_GET/PREP()")
+Link: https://lore.kernel.org/r/20240722141440.7210-1-blazej.kucman@intel.com
+Signed-off-by: Blazej Kucman <blazej.kucman@intel.com>
+[bhelgaas: commit log]
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org # v6.7+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/hotplug/pciehp_hpc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 061f01f60db4..736ad8baa2a5 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -485,7 +485,9 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+
+ pci_config_pm_runtime_get(pdev);
+- pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status),
++
++ /* Attention and Power Indicator Control bits are supported */
++ pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
+ PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
+ pci_config_pm_runtime_put(pdev);
+ return 0;
+--
+2.46.0
+
--- /dev/null
+From df615907f1bf907260af01ccb904d0e9304b5278 Mon Sep 17 00:00:00 2001
+From: Patryk Duda <patrykd@google.com>
+Date: Tue, 30 Jul 2024 10:44:25 +0000
+Subject: platform/chrome: cros_ec_proto: Lock device when updating MKBP version
+
+From: Patryk Duda <patrykd@google.com>
+
+commit df615907f1bf907260af01ccb904d0e9304b5278 upstream.
+
+The cros_ec_get_host_command_version_mask() function requires that the
+caller must have ec_dev->lock mutex before calling it. This requirement
+was not met and as a result it was possible that two commands were sent
+to the device at the same time.
+
+The problem was observed while using UART backend which doesn't use any
+additional locks, unlike SPI backend which locks the controller until
+response is received.
+
+Fixes: f74c7557ed0d ("platform/chrome: cros_ec_proto: Update version on GET_NEXT_EVENT failure")
+Cc: stable@vger.kernel.org
+Signed-off-by: Patryk Duda <patrykd@google.com>
+Link: https://lore.kernel.org/r/20240730104425.607083-1-patrykd@google.com
+Signed-off-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/chrome/cros_ec_proto.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_e
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
++ mutex_lock(&ec_dev->lock);
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
++ mutex_unlock(&ec_dev->lock);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
--- /dev/null
+From 8aa37bde1a7b645816cda8b80df4753ecf172bf1 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Thu, 1 Aug 2024 15:22:22 -0400
+Subject: protect the fetch of ->fd[fd] in do_dup2() from mispredictions
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 8aa37bde1a7b645816cda8b80df4753ecf172bf1 upstream.
+
+both callers have verified that fd is not greater than ->max_fds;
+however, misprediction might end up with
+ tofree = fdt->fd[fd];
+being speculatively executed. That's wrong for the same reasons
+why it's wrong in close_fd()/file_close_fd_locked(); the same
+solution applies - array_index_nospec(fd, fdt->max_fds) could differ
+from fd only in case of speculative execution on mispredicted path.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/file.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1248,6 +1248,7 @@ __releases(&files->file_lock)
+ * tables and this condition does not arise without those.
+ */
+ fdt = files_fdtable(files);
++ fd = array_index_nospec(fd, fdt->max_fds);
+ tofree = fdt->fd[fd];
+ if (!tofree && fd_is_open(fd, fdt))
+ goto Ebusy;
--- /dev/null
+From d516b187a9cc2e842030dd005be2735db3e8f395 Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Tue, 30 Jul 2024 21:51:52 +0200
+Subject: r8169: don't increment tx_dropped in case of NETDEV_TX_BUSY
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit d516b187a9cc2e842030dd005be2735db3e8f395 upstream.
+
+The skb isn't consumed in case of NETDEV_TX_BUSY, therefore don't
+increment the tx_dropped counter.
+
+Fixes: 188f4af04618 ("r8169: use NETDEV_TX_{BUSY/OK}")
+Cc: stable@vger.kernel.org
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
+Link: https://patch.msgid.link/bbba9c48-8bac-4932-9aa1-d2ed63bc9433@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169_main.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4347,7 +4347,8 @@ static netdev_tx_t rtl8169_start_xmit(st
+ if (unlikely(!rtl_tx_slots_avail(tp))) {
+ if (net_ratelimit())
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+- goto err_stop_0;
++ netif_stop_queue(dev);
++ return NETDEV_TX_BUSY;
+ }
+
+ opts[1] = rtl8169_tx_vlan_tag(skb);
+@@ -4403,11 +4404,6 @@ err_dma_0:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+-
+-err_stop_0:
+- netif_stop_queue(dev);
+- dev->stats.tx_dropped++;
+- return NETDEV_TX_BUSY;
+ }
+
+ static unsigned int rtl_last_frag_len(struct sk_buff *skb)
--- /dev/null
+From 6ccf9984d6be3c2f804087b736db05c2ec42664b Mon Sep 17 00:00:00 2001
+From: Edmund Raile <edmund.raile@protonmail.com>
+Date: Tue, 30 Jul 2024 19:53:26 +0000
+Subject: Revert "ALSA: firewire-lib: obsolete workqueue for period update"
+
+From: Edmund Raile <edmund.raile@protonmail.com>
+
+commit 6ccf9984d6be3c2f804087b736db05c2ec42664b upstream.
+
+prepare resolution of AB/BA deadlock competition for substream lock:
+restore workqueue previously used for process context:
+
+revert commit b5b519965c4c ("ALSA: firewire-lib: obsolete workqueue
+for period update")
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/kwryofzdmjvzkuw6j3clftsxmoolynljztxqwg76hzeo4simnl@jn3eo7pe642q/
+Signed-off-by: Edmund Raile <edmund.raile@protonmail.com>
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20240730195318.869840-2-edmund.raile@protonmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/firewire/amdtp-stream.c | 15 +++++++++++++++
+ sound/firewire/amdtp-stream.h | 1 +
+ 2 files changed, 16 insertions(+)
+
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -77,6 +77,8 @@
+ // overrun. Actual device can skip more, then this module stops the packet streaming.
+ #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
+
++static void pcm_period_work(struct work_struct *work);
++
+ /**
+ * amdtp_stream_init - initialize an AMDTP stream structure
+ * @s: the AMDTP stream to initialize
+@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_strea
+ s->flags = flags;
+ s->context = ERR_PTR(-1);
+ mutex_init(&s->mutex);
++ INIT_WORK(&s->period_work, pcm_period_work);
+ s->packet_index = 0;
+
+ init_waitqueue_head(&s->ready_wait);
+@@ -347,6 +350,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_paylo
+ */
+ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
+ {
++ cancel_work_sync(&s->period_work);
+ s->pcm_buffer_pointer = 0;
+ s->pcm_period_pointer = 0;
+ }
+@@ -624,6 +628,16 @@ static void update_pcm_pointers(struct a
+ }
+ }
+
++static void pcm_period_work(struct work_struct *work)
++{
++ struct amdtp_stream *s = container_of(work, struct amdtp_stream,
++ period_work);
++ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
++
++ if (pcm)
++ snd_pcm_period_elapsed(pcm);
++}
++
+ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ bool sched_irq)
+ {
+@@ -1909,6 +1923,7 @@ static void amdtp_stream_stop(struct amd
+ return;
+ }
+
++ cancel_work_sync(&s->period_work);
+ fw_iso_context_stop(s->context);
+ fw_iso_context_destroy(s->context);
+ s->context = ERR_PTR(-1);
+--- a/sound/firewire/amdtp-stream.h
++++ b/sound/firewire/amdtp-stream.h
+@@ -191,6 +191,7 @@ struct amdtp_stream {
+
+ /* For a PCM substream processing. */
+ struct snd_pcm_substream *pcm;
++ struct work_struct period_work;
+ snd_pcm_uframes_t pcm_buffer_pointer;
+ unsigned int pcm_period_pointer;
+ unsigned int pcm_frame_multiplier;
--- /dev/null
+From 3dab73ab925a51ab05543b491bf17463a48ca323 Mon Sep 17 00:00:00 2001
+From: Edmund Raile <edmund.raile@protonmail.com>
+Date: Tue, 30 Jul 2024 19:53:29 +0000
+Subject: Revert "ALSA: firewire-lib: operate for period elapse event in process context"
+
+From: Edmund Raile <edmund.raile@protonmail.com>
+
+commit 3dab73ab925a51ab05543b491bf17463a48ca323 upstream.
+
+Commit 7ba5ca32fe6e ("ALSA: firewire-lib: operate for period elapse event
+in process context") removed the process context workqueue from
+amdtp_domain_stream_pcm_pointer() and update_pcm_pointers() to remove
+its overhead.
+
+With RME Fireface 800, this lead to a regression since
+Kernels 5.14.0, causing an AB/BA deadlock competition for the
+substream lock with eventual system freeze under ALSA operation:
+
+thread 0:
+ * (lock A) acquire substream lock by
+ snd_pcm_stream_lock_irq() in
+ snd_pcm_status64()
+ * (lock B) wait for tasklet to finish by calling
+ tasklet_unlock_spin_wait() in
+ tasklet_disable_in_atomic() in
+ ohci_flush_iso_completions() of ohci.c
+
+thread 1:
+ * (lock B) enter tasklet
+ * (lock A) attempt to acquire substream lock,
+ waiting for it to be released:
+ snd_pcm_stream_lock_irqsave() in
+ snd_pcm_period_elapsed() in
+ update_pcm_pointers() in
+ process_ctx_payloads() in
+ process_rx_packets() of amdtp-stream.c
+
+? tasklet_unlock_spin_wait
+ </NMI>
+ <TASK>
+ohci_flush_iso_completions firewire_ohci
+amdtp_domain_stream_pcm_pointer snd_firewire_lib
+snd_pcm_update_hw_ptr0 snd_pcm
+snd_pcm_status64 snd_pcm
+
+? native_queued_spin_lock_slowpath
+ </NMI>
+ <IRQ>
+_raw_spin_lock_irqsave
+snd_pcm_period_elapsed snd_pcm
+process_rx_packets snd_firewire_lib
+irq_target_callback snd_firewire_lib
+handle_it_packet firewire_ohci
+context_tasklet firewire_ohci
+
+Restore the process context work queue to prevent deadlock
+AB/BA deadlock competition for ALSA substream lock of
+snd_pcm_stream_lock_irq() in snd_pcm_status64()
+and snd_pcm_stream_lock_irqsave() in snd_pcm_period_elapsed().
+
+revert commit 7ba5ca32fe6e ("ALSA: firewire-lib: operate for period
+elapse event in process context")
+
+Replace inline description to prevent future deadlock.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ba5ca32fe6e ("ALSA: firewire-lib: operate for period elapse event in process context")
+Reported-by: edmund.raile <edmund.raile@proton.me>
+Closes: https://lore.kernel.org/r/kwryofzdmjvzkuw6j3clftsxmoolynljztxqwg76hzeo4simnl@jn3eo7pe642q/
+Signed-off-by: Edmund Raile <edmund.raile@protonmail.com>
+Reviewed-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://patch.msgid.link/20240730195318.869840-3-edmund.raile@protonmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/firewire/amdtp-stream.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -615,16 +615,8 @@ static void update_pcm_pointers(struct a
+ // The program in user process should periodically check the status of intermediate
+ // buffer associated to PCM substream to process PCM frames in the buffer, instead
+ // of receiving notification of period elapsed by poll wait.
+- if (!pcm->runtime->no_period_wakeup) {
+- if (in_softirq()) {
+- // In software IRQ context for 1394 OHCI.
+- snd_pcm_period_elapsed(pcm);
+- } else {
+- // In process context of ALSA PCM application under acquired lock of
+- // PCM substream.
+- snd_pcm_period_elapsed_under_stream_lock(pcm);
+- }
+- }
++ if (!pcm->runtime->no_period_wakeup)
++ queue_work(system_highpri_wq, &s->period_work);
+ }
+ }
+
+@@ -1863,11 +1855,14 @@ unsigned long amdtp_domain_stream_pcm_po
+ {
+ struct amdtp_stream *irq_target = d->irq_target;
+
+- // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
+ if (irq_target && amdtp_stream_running(irq_target)) {
+- // In software IRQ context, the call causes dead-lock to disable the tasklet
+- // synchronously.
+- if (!in_softirq())
++ // use wq to prevent AB/BA deadlock competition for
++ // substream lock:
++ // fw_iso_context_flush_completions() acquires
++ // lock by ohci_flush_iso_completions(),
++ // amdtp-stream process_rx_packets() attempts to
++ // acquire same lock by snd_pcm_elapsed()
++ if (current_work() != &s->period_work)
+ fw_iso_context_flush_completions(irq_target->context);
+ }
+
--- /dev/null
+From f126745da81783fb1d082e67bf14c6795e489a88 Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Mon, 29 Jul 2024 14:22:49 +0000
+Subject: rust: SHADOW_CALL_STACK is incompatible with Rust
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit f126745da81783fb1d082e67bf14c6795e489a88 upstream.
+
+When using the shadow call stack sanitizer, all code must be compiled
+with the -ffixed-x18 flag, but this flag is not currently being passed
+to Rust. This results in crashes that are extremely difficult to debug.
+
+To ensure that nobody else has to go through the same debugging session
+that I had to, prevent configurations that enable both SHADOW_CALL_STACK
+and RUST.
+
+It is rather common for people to backport 724a75ac9542 ("arm64: rust:
+Enable Rust support for AArch64"), so I recommend applying this fix all
+the way back to 6.1.
+
+Cc: stable@vger.kernel.org # 6.1 and later
+Fixes: 724a75ac9542 ("arm64: rust: Enable Rust support for AArch64")
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://lore.kernel.org/r/20240729-shadow-call-stack-v4-1-2a664b082ea4@google.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1906,6 +1906,7 @@ config RUST
+ depends on !MODVERSIONS
+ depends on !GCC_PLUGINS
+ depends on !RANDSTRUCT
++ depends on !SHADOW_CALL_STACK
+ depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
+ help
+ Enables Rust support in the kernel.
--- /dev/null
+From 4734406c39238cbeafe66f0060084caa3247ff53 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Thu, 25 Jul 2024 11:31:52 +0200
+Subject: s390/fpu: Re-add exception handling in load_fpu_state()
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 4734406c39238cbeafe66f0060084caa3247ff53 upstream.
+
+With the recent rewrite of the fpu code exception handling for the
+lfpc instruction within load_fpu_state() was erroneously removed.
+
+Add it again to prevent that loading invalid floating point register
+values cause an unhandled specification exception.
+
+Fixes: 8c09871a950a ("s390/fpu: limit save and restore to used registers")
+Cc: stable@vger.kernel.org
+Reported-by: Aristeu Rozanski <aris@redhat.com>
+Tested-by: Aristeu Rozanski <aris@redhat.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/fpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
+index fa90bbdc5ef9..6f2e87920288 100644
+--- a/arch/s390/kernel/fpu.c
++++ b/arch/s390/kernel/fpu.c
+@@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
+ int mask;
+
+ if (flags & KERNEL_FPC)
+- fpu_lfpc(&state->fpc);
++ fpu_lfpc_safe(&state->fpc);
+ if (!cpu_has_vx()) {
+ if (flags & KERNEL_VXR_V0V7)
+ load_fp_regs_vx(state->vxrs);
+--
+2.46.0
+
--- /dev/null
+From 7c70bcc2a84cf925f655ea1ac4b8088062b144a3 Mon Sep 17 00:00:00 2001
+From: Liu Jing <liujing@cmss.chinamobile.com>
+Date: Sat, 27 Jul 2024 11:04:03 +0200
+Subject: selftests: mptcp: always close input's FD if opened
+
+From: Liu Jing <liujing@cmss.chinamobile.com>
+
+commit 7c70bcc2a84cf925f655ea1ac4b8088062b144a3 upstream.
+
+In main_loop_s function, when the open(cfg_input, O_RDONLY) function is
+run, the last fd is not closed if the "--cfg_repeat > 0" branch is not
+taken.
+
+Fixes: 05be5e273c84 ("selftests: mptcp: add disconnect tests")
+Cc: stable@vger.kernel.org
+Signed-off-by: Liu Jing <liujing@cmss.chinamobile.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_connect.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1115,11 +1115,11 @@ again:
+ return 1;
+ }
+
+- if (--cfg_repeat > 0) {
+- if (cfg_input)
+- close(fd);
++ if (cfg_input)
++ close(fd);
++
++ if (--cfg_repeat > 0)
+ goto again;
+- }
+
+ return 0;
+ }
--- /dev/null
+From 4a2f48992ddf4b8c2fba846c6754089edae6db5a Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Sat, 27 Jul 2024 11:04:02 +0200
+Subject: selftests: mptcp: fix error path
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 4a2f48992ddf4b8c2fba846c6754089edae6db5a upstream.
+
+pm_nl_check_endpoint() currently calls an not existing helper
+to mark the test as failed. Fix the wrong call.
+
+Fixes: 03668c65d153 ("selftests: mptcp: join: rework detailed report")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -661,7 +661,7 @@ pm_nl_check_endpoint()
+ done
+
+ if [ -z "${id}" ]; then
+- test_fail "bad test - missing endpoint id"
++ fail_test "bad test - missing endpoint id"
+ return
+ fi
+
--- /dev/null
+From f833470c27832136d4416d8fc55d658082af0989 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:29 +0200
+Subject: selftests: mptcp: join: check backup support in signal endp
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit f833470c27832136d4416d8fc55d658082af0989 upstream.
+
+Before the previous commit, 'signal' endpoints with the 'backup' flag
+were ignored when sending the MP_JOIN.
+
+The MPTCP Join selftest has then been modified to validate this case:
+the "single address, backup" test, is now validating the MP_JOIN with a
+backup flag as it is what we expect it to do with such name. The
+previous version has been kept, but renamed to "single address, switch
+to backup" to avoid confusions.
+
+The "single address with port, backup" test is also now validating the
+MPJ with a backup flag, which makes more sense than checking the switch
+to backup with an MP_PRIO.
+
+The "mpc backup both sides" test is now validating that the backup flag
+is also set in MP_JOIN from and to the addresses used in the initial
+subflow, using the special ID 0.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 34 +++++++++++++++++++-----
+ 1 file changed, 28 insertions(+), 6 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2641,6 +2641,19 @@ backup_tests()
+ if reset "single address, backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
++ pm_nl_set_limits $ns2 1 1
++ sflags=nobackup speed=slow \
++ run_tests $ns1 $ns2 10.0.1.1
++ chk_join_nr 1 1 1
++ chk_add_nr 1 1
++ chk_prio_nr 1 0 0 1
++ fi
++
++ # single address, switch to backup
++ if reset "single address, switch to backup" &&
++ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
++ pm_nl_set_limits $ns1 0 1
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ pm_nl_set_limits $ns2 1 1
+ sflags=backup speed=slow \
+@@ -2654,13 +2667,13 @@ backup_tests()
+ if reset "single address with port, backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
+- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
+ pm_nl_set_limits $ns2 1 1
+- sflags=backup speed=slow \
++ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+- chk_prio_nr 1 1 0 0
++ chk_prio_nr 1 0 0 1
+ fi
+
+ if reset "mpc backup" &&
+@@ -2674,12 +2687,21 @@ backup_tests()
+
+ if reset "mpc backup both sides" &&
+ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+- pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
++ pm_nl_set_limits $ns1 0 2
++ pm_nl_set_limits $ns2 1 2
++ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
++
++ # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
++ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
++ # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++ ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
++
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+- chk_join_nr 0 0 0
+- chk_prio_nr 1 1 0 0
++ chk_join_nr 2 2 2
++ chk_prio_nr 1 1 1 1
+ fi
+
+ if reset "mpc switch to backup" &&
--- /dev/null
+From 935ff5bb8a1cfcdf8e60c8f5c794d0bbbc234437 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Sat, 27 Jul 2024 12:01:27 +0200
+Subject: selftests: mptcp: join: validate backup in MPJ
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 935ff5bb8a1cfcdf8e60c8f5c794d0bbbc234437 upstream.
+
+A peer can notify the other one that a subflow has to be treated as
+"backup" by two different ways: either by sending a dedicated MP_PRIO
+notification, or by setting the backup flag in the MP_JOIN handshake.
+
+The selftests were previously monitoring the former, but not the latter.
+This is what is now done here by looking at these new MIB counters when
+validating the 'backup' cases:
+
+ MPTcpExtMPJoinSynBackupRx
+ MPTcpExtMPJoinSynAckBackupRx
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it will help to validate a new fix for an issue introduced by this
+commit ID.
+
+Fixes: 4596a2c1b7f5 ("mptcp: allow creating non-backup subflows")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 42 ++++++++++++++++++------
+ 1 file changed, 32 insertions(+), 10 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1634,6 +1634,8 @@ chk_prio_nr()
+ {
+ local mp_prio_nr_tx=$1
+ local mp_prio_nr_rx=$2
++ local mpj_syn=$3
++ local mpj_syn_ack=$4
+ local count
+
+ print_check "ptx"
+@@ -1655,6 +1657,26 @@ chk_prio_nr()
+ else
+ print_ok
+ fi
++
++ print_check "syn backup"
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
++ if [ -z "$count" ]; then
++ print_skip
++ elif [ "$count" != "$mpj_syn" ]; then
++ fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn"
++ else
++ print_ok
++ fi
++
++ print_check "synack backup"
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
++ if [ -z "$count" ]; then
++ print_skip
++ elif [ "$count" != "$mpj_syn_ack" ]; then
++ fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
++ else
++ print_ok
++ fi
+ }
+
+ chk_subflow_nr()
+@@ -2612,7 +2634,7 @@ backup_tests()
+ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ fi
+
+ # single address, backup
+@@ -2625,7 +2647,7 @@ backup_tests()
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+
+ # single address with port, backup
+@@ -2638,7 +2660,7 @@ backup_tests()
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+
+ if reset "mpc backup" &&
+@@ -2647,7 +2669,7 @@ backup_tests()
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 0 0
+ fi
+
+ if reset "mpc backup both sides" &&
+@@ -2657,7 +2679,7 @@ backup_tests()
+ speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+
+ if reset "mpc switch to backup" &&
+@@ -2666,7 +2688,7 @@ backup_tests()
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 0 0
+ fi
+
+ if reset "mpc switch to backup both sides" &&
+@@ -2676,7 +2698,7 @@ backup_tests()
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+- chk_prio_nr 1 1
++ chk_prio_nr 1 1 0 0
+ fi
+ }
+
+@@ -3053,7 +3075,7 @@ fullmesh_tests()
+ addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ chk_rm_nr 0 1
+ fi
+
+@@ -3066,7 +3088,7 @@ fullmesh_tests()
+ sflags=nobackup,nofullmesh speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 2 2 2
+- chk_prio_nr 0 1
++ chk_prio_nr 0 1 1 0
+ chk_rm_nr 0 1
+ fi
+ }
+@@ -3318,7 +3340,7 @@ userspace_tests()
+ sflags=backup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 0
+- chk_prio_nr 0 0
++ chk_prio_nr 0 0 0 0
+ fi
+
+ # userspace pm type prevents rm_addr
risc-v-enable-the-ipi-before-workqueue_online_cpu.patch
riscv-fix-linear-mapping-checks-for-non-contiguous-m.patch
arm64-jump_label-ensure-patched-jump_labels-are-visi.patch
+rust-shadow_call_stack-is-incompatible-with-rust.patch
+ceph-force-sending-a-cap-update-msg-back-to-mds-for-revoke-op.patch
+s390-fpu-re-add-exception-handling-in-load_fpu_state.patch
+platform-chrome-cros_ec_proto-lock-device-when-updating-mkbp-version.patch
+hid-wacom-modify-pen-ids.patch
+btrfs-zoned-fix-zone_unusable-accounting-on-making-block-group-read-write-again.patch
+btrfs-do-not-subtract-delalloc-from-avail-bytes.patch
+btrfs-make-cow_file_range_inline-honor-locked_page-on-error.patch
+protect-the-fetch-of-fd-in-do_dup2-from-mispredictions.patch
+mptcp-sched-check-both-directions-for-backup.patch
+alsa-usb-audio-correct-surround-channels-in-uac1-channel-map.patch
+alsa-hda-realtek-add-quirk-for-acer-aspire-e5-574g.patch
+alsa-seq-ump-optimize-conversions-from-sysex-to-ump.patch
+revert-alsa-firewire-lib-obsolete-workqueue-for-period-update.patch
+revert-alsa-firewire-lib-operate-for-period-elapse-event-in-process-context.patch
+pci-pciehp-retain-power-indicator-bits-for-userspace-indicators.patch
+drm-vmwgfx-fix-a-deadlock-in-dma-buf-fence-polling.patch
+drm-vmwgfx-fix-handling-of-dumb-buffers.patch
+drm-ast-astdp-wake-up-during-connector-status-detection.patch
+drm-ast-fix-black-screen-after-resume.patch
+drm-amdgpu-fix-contiguous-handling-for-ib-parsing-v2.patch
+drm-virtio-fix-type-of-dma-fence-context-variable.patch
+drm-i915-fix-possible-int-overflow-in-skl_ddi_calculate_wrpll.patch
+drm-v3d-prevent-out-of-bounds-access-in-performance-query-extensions.patch
+drm-v3d-fix-potential-memory-leak-in-the-timestamp-extension.patch
+drm-v3d-fix-potential-memory-leak-in-the-performance-extension.patch
+drm-v3d-validate-passed-in-drm-syncobj-handles-in-the-timestamp-extension.patch
+drm-v3d-validate-passed-in-drm-syncobj-handles-in-the-performance-extension.patch
+bluetooth-hci_event-fix-setting-discovery_finding-for-passive-scanning.patch
+nouveau-set-placement-to-original-placement-on-uvmm-validate.patch
+wifi-ath12k-fix-soft-lockup-on-suspend.patch
+wifi-mac80211-use-monitor-sdata-with-driver-only-if-desired.patch
+io_uring-keep-multishot-request-napi-timeout-current.patch
+net-usb-sr9700-fix-uninitialized-variable-use-in-sr_mdio_read.patch
+net-wan-fsl_qmc_hdlc-convert-carrier_lock-spinlock-to-a-mutex.patch
+net-wan-fsl_qmc_hdlc-discard-received-crc.patch
+r8169-don-t-increment-tx_dropped-in-case-of-netdev_tx_busy.patch
+mptcp-fix-user-space-pm-announced-address-accounting.patch
+mptcp-distinguish-rcv-vs-sent-backup-flag-in-requests.patch
+mptcp-fix-nl-pm-announced-address-accounting.patch
+mptcp-mib-count-mpj-with-backup-flag.patch
+mptcp-fix-bad-rcvpruned-mib-accounting.patch
+mptcp-pm-fix-backup-support-in-signal-endpoints.patch
+mptcp-pm-only-set-request_bkup-flag-when-sending-mp_prio.patch
+mptcp-fix-duplicate-data-handling.patch
+selftests-mptcp-fix-error-path.patch
+selftests-mptcp-always-close-input-s-fd-if-opened.patch
+selftests-mptcp-join-validate-backup-in-mpj.patch
+selftests-mptcp-join-check-backup-support-in-signal-endp.patch
--- /dev/null
+From a47f3320bb4ba6714abe8dddb36399367b491358 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Tue, 9 Jul 2024 09:31:32 +0200
+Subject: wifi: ath12k: fix soft lockup on suspend
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit a47f3320bb4ba6714abe8dddb36399367b491358 upstream.
+
+The ext interrupts are enabled when the firmware has been started, but
+this may never happen, for example, if the board configuration file is
+missing.
+
+When the system is later suspended, the driver unconditionally tries to
+disable interrupts, which results in an irq disable imbalance and causes
+the driver to spin indefinitely in napi_synchronize().
+
+Make sure that the interrupts have been enabled before attempting to
+disable them.
+
+Fixes: d889913205cf ("wifi: ath12k: driver for Qualcomm Wi-Fi 7 devices")
+Cc: stable@vger.kernel.org # 6.3
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com>
+Link: https://patch.msgid.link/20240709073132.9168-1-johan+linaro@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath12k/pci.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath12k/pci.c
++++ b/drivers/net/wireless/ath/ath12k/pci.c
+@@ -472,7 +472,8 @@ static void __ath12k_pci_ext_irq_disable
+ {
+ int i;
+
+- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
++ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
++ return;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
--- /dev/null
+From 8f4fa0876231c426f880a2bff25ac49fac67d805 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Thu, 25 Jul 2024 18:48:36 +0200
+Subject: wifi: mac80211: use monitor sdata with driver only if desired
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 8f4fa0876231c426f880a2bff25ac49fac67d805 upstream.
+
+In commit 0d9c2beed116 ("wifi: mac80211: fix monitor channel
+with chanctx emulation") I changed mac80211 to always have an
+internal monitor_sdata to have something to have the chanctx
+bound to.
+
+However, if the driver didn't also have the WANT_MONITOR flag
+this would cause mac80211 to allocate it without telling the
+driver (which was intentional) but also use it for later APIs
+to the driver without it ever having known about it which was
+_not_ intentional.
+
+Check through the code and only use the monitor_sdata in the
+relevant places (TX, MU-MIMO follow settings, TX power, and
+interface iteration) when the WANT_MONITOR flag is set.
+
+Cc: stable@vger.kernel.org
+Fixes: 0d9c2beed116 ("wifi: mac80211: fix monitor channel with chanctx emulation")
+Reported-by: ZeroBeat <ZeroBeat@gmx.de>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219086
+Tested-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20240725184836.25d334157a8e.I02574086da2c5cf0e18264ce5807db6f14ffd9c0@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/cfg.c | 7 +++++--
+ net/mac80211/tx.c | 5 +++--
+ net/mac80211/util.c | 2 +-
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -114,7 +114,7 @@ static int ieee80211_set_mon_options(str
+
+ /* apply all changes now - no failures allowed */
+
+- if (monitor_sdata)
++ if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ ieee80211_set_mu_mimo_follow(monitor_sdata, params);
+
+ if (params->flags) {
+@@ -3038,6 +3038,9 @@ static int ieee80211_set_tx_power(struct
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
++ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
++ return -EOPNOTSUPP;
++
+ sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
+ if (!sdata)
+@@ -3100,7 +3103,7 @@ static int ieee80211_set_tx_power(struct
+ if (has_monitor) {
+ sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
+- if (sdata) {
++ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ sdata->deflink.user_power_level = local->user_power_level;
+ if (txp_type != sdata->vif.bss_conf.txpower_type)
+ update_txp_type = true;
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1768,7 +1768,7 @@ static bool __ieee80211_tx(struct ieee80
+ break;
+ }
+ sdata = rcu_dereference(local->monitor_sdata);
+- if (sdata) {
++ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ vif = &sdata->vif;
+ info->hw_queue =
+ vif->hw_queue[skb_get_queue_mapping(skb)];
+@@ -3957,7 +3957,8 @@ begin:
+ break;
+ }
+ tx.sdata = rcu_dereference(local->monitor_sdata);
+- if (tx.sdata) {
++ if (tx.sdata &&
++ ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ vif = &tx.sdata->vif;
+ info->hw_queue =
+ vif->hw_queue[skb_get_queue_mapping(skb)];
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -776,7 +776,7 @@ static void __iterate_interfaces(struct
+ sdata = rcu_dereference_check(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx) ||
+ lockdep_is_held(&local->hw.wiphy->mtx));
+- if (sdata &&
++ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
+ (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
+ sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+ iterator(data, sdata->vif.addr, &sdata->vif);