--- /dev/null
+From d85fc52cbb9a719c8335d93a28d6a79d7acd419f Mon Sep 17 00:00:00 2001
+From: Lianqin Hu <hulianqin@vivo.com>
+Date: Sun, 26 Jan 2025 03:51:11 +0000
+Subject: ALSA: usb-audio: Add delay quirk for iBasso DC07 Pro
+
+From: Lianqin Hu <hulianqin@vivo.com>
+
+commit d85fc52cbb9a719c8335d93a28d6a79d7acd419f upstream.
+
+Audio control requests that sets sampling frequency sometimes fail on
+this card. Adding delay between control messages eliminates that problem.
+
+usb 1-1: New USB device found, idVendor=2fc6, idProduct=f0b7
+usb 1-1: New USB device strings: Mfr=1, Product=2, SerialNumber=3
+usb 1-1: Product: iBasso DC07 Pro
+usb 1-1: Manufacturer: iBasso
+usb 1-1: SerialNumber: CTUA171130B
+
+Signed-off-by: Lianqin Hu <hulianqin@vivo.com>
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/TYUPR06MB62174A48D04E09A37996DF84D2ED2@TYUPR06MB6217.apcprd06.prod.outlook.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/usb/quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2343,6 +2343,8 @@ static const struct usb_audio_quirk_flag
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
++ DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */
++ QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
--- /dev/null
+From d26c4ad3fa53e76a602a9974ade171c8399f2a29 Mon Sep 17 00:00:00 2001
+From: Abel Vesa <abel.vesa@linaro.org>
+Date: Tue, 7 Jan 2025 17:55:23 +0200
+Subject: clk: qcom: gcc-x1e80100: Do not turn off usb_2 controller GDSC
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+commit d26c4ad3fa53e76a602a9974ade171c8399f2a29 upstream.
+
+Allowing the usb_2 controller GDSC to be turned off during system suspend
+renders the controller unable to resume.
+
+So use PWRSTS_RET_ON instead in order to make sure this the GDSC doesn't
+go down.
+
+Fixes: 161b7c401f4b ("clk: qcom: Add Global Clock controller (GCC) driver for X1E80100")
+Cc: stable@vger.kernel.org # 6.8
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Johan Hovold <johan+linaro@kernel.org>
+Tested-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20250107-x1e80100-clk-gcc-fix-usb2-gdsc-pwrsts-v1-1-e15d1a5e7d80@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/gcc-x1e80100.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc =
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
--- /dev/null
+From 235b630eda072d7e7b102ab346d6b8a2c028a772 Mon Sep 17 00:00:00 2001
+From: Sean Rhodes <sean@starlabs.systems>
+Date: Tue, 19 Nov 2024 08:58:15 +0000
+Subject: drivers/card_reader/rtsx_usb: Restore interrupt based detection
+
+From: Sean Rhodes <sean@starlabs.systems>
+
+commit 235b630eda072d7e7b102ab346d6b8a2c028a772 upstream.
+
+This commit reintroduces interrupt-based card detection previously
+used in the rts5139 driver. This functionality was removed in commit
+00d8521dcd23 ("staging: remove rts5139 driver code").
+
+Reintroducing this mechanism fixes presence detection for certain card
+readers, which with the current driver, will taken approximately 20
+seconds to enter S3 as `mmc_rescan` has to be frozen.
+
+Fixes: 00d8521dcd23 ("staging: remove rts5139 driver code")
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sean Rhodes <sean@starlabs.systems>
+Link: https://lore.kernel.org/r/20241119085815.11769-1-sean@starlabs.systems
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cardreader/rtsx_usb.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/misc/cardreader/rtsx_usb.c
++++ b/drivers/misc/cardreader/rtsx_usb.c
+@@ -286,6 +286,7 @@ static int rtsx_usb_get_status_with_bulk
+ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
+ {
+ int ret;
++ u8 interrupt_val = 0;
+ u16 *buf;
+
+ if (!status)
+@@ -308,6 +309,20 @@ int rtsx_usb_get_card_status(struct rtsx
+ ret = rtsx_usb_get_status_with_bulk(ucr, status);
+ }
+
++ rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
++ /* Cross check presence with interrupts */
++ if (*status & XD_CD)
++ if (!(interrupt_val & XD_INT))
++ *status &= ~XD_CD;
++
++ if (*status & SD_CD)
++ if (!(interrupt_val & SD_INT))
++ *status &= ~SD_CD;
++
++ if (*status & MS_CD)
++ if (!(interrupt_val & MS_INT))
++ *status &= ~MS_CD;
++
+ /* usb_control_msg may return positive when success */
+ if (ret < 0)
+ return ret;
--- /dev/null
+From a4e17a8f239a545c463f8ec27db4ed6e74b31841 Mon Sep 17 00:00:00 2001
+From: "Ricardo B. Marliere" <rbm@suse.com>
+Date: Thu, 5 Dec 2024 17:50:35 -0300
+Subject: ktest.pl: Check kernelrelease return in get_version
+
+From: Ricardo B. Marliere <rbm@suse.com>
+
+commit a4e17a8f239a545c463f8ec27db4ed6e74b31841 upstream.
+
+In the case of a test that uses the special option ${KERNEL_VERSION} in one
+of its settings but has no configuration available in ${OUTPUT_DIR}, for
+example if it's a new empty directory, then the `make kernelrelease` call
+will fail and the subroutine will chomp an empty string, silently. Fix that
+by adding an empty configuration and retrying.
+
+Cc: stable@vger.kernel.org
+Cc: John Hawley <warthog9@eaglescrag.net>
+Fixes: 5f9b6ced04a4e ("ktest: Bisecting, install modules, add logging")
+Link: https://lore.kernel.org/20241205-ktest_kver_fallback-v2-1-869dae4c7777@suse.com
+Signed-off-by: Ricardo B. Marliere <rbm@suse.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/ktest/ktest.pl | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2419,6 +2419,11 @@ sub get_version {
+ return if ($have_version);
+ doprint "$make kernelrelease ... ";
+ $version = `$make -s kernelrelease | tail -1`;
++ if (!length($version)) {
++ run_command "$make allnoconfig" or return 0;
++ doprint "$make kernelrelease ... ";
++ $version = `$make -s kernelrelease | tail -1`;
++ }
+ chomp($version);
+ doprint "$version\n";
+ $have_version = 1;
--- /dev/null
+From 0c984a283a3ea3f10bebecd6c57c1d41b2e4f518 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 9 Jan 2025 09:51:43 +0800
+Subject: md: add a new callback pers->bitmap_sector()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 0c984a283a3ea3f10bebecd6c57c1d41b2e4f518 upstream.
+
+This callback will be used in raid5 to convert io ranges from array to
+bitmap.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Xiao Ni <xni@redhat.com>
+Link: https://lore.kernel.org/r/20250109015145.158868-4-yukuai1@huaweicloud.com
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Yu Kuai <yukuai1@huaweicloud.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -746,6 +746,9 @@ struct md_personality
+ void *(*takeover) (struct mddev *mddev);
+ /* Changes the consistency policy of an active array. */
+ int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
++ /* convert io ranges from array to bitmap */
++ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset,
++ unsigned long *sectors);
+ };
+
+ struct md_sysfs_entry {
--- /dev/null
+From cd5fc653381811f1e0ba65f5d169918cab61476f Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 9 Jan 2025 09:51:45 +0800
+Subject: md/md-bitmap: move bitmap_{start, end}write to md upper layer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit cd5fc653381811f1e0ba65f5d169918cab61476f upstream.
+
+There are two BUG reports that raid5 will hang at
+bitmap_startwrite([1],[2]), root cause is that bitmap start write and end
+write is unbalanced, it's not quite clear where, and while reviewing raid5
+code, it's found that bitmap operations can be optimized. For example,
+for a 4 disks raid5, with chunksize=8k, if user issue a IO (0 + 48k) to
+the array:
+
+┌────────────────────────────────────────────────────────────┐
+│chunk 0 │
+│ ┌────────────┬─────────────┬─────────────┬────────────┼
+│ sh0 │A0: 0 + 4k │A1: 8k + 4k │A2: 16k + 4k │A3: P │
+│ ┼────────────┼─────────────┼─────────────┼────────────┼
+│ sh1 │B0: 4k + 4k │B1: 12k + 4k │B2: 20k + 4k │B3: P │
+┼──────┴────────────┴─────────────┴─────────────┴────────────┼
+│chunk 1 │
+│ ┌────────────┬─────────────┬─────────────┬────────────┤
+│ sh2 │C0: 24k + 4k│C1: 32k + 4k │C2: P │C3: 40k + 4k│
+│ ┼────────────┼─────────────┼─────────────┼────────────┼
+│ sh3 │D0: 28k + 4k│D1: 36k + 4k │D2: P │D3: 44k + 4k│
+└──────┴────────────┴─────────────┴─────────────┴────────────┘
+
+Before this patch, 4 stripe head will be used, and each sh will attach
+bio for 3 disks, and each attached bio will trigger
+bitmap_startwrite() once, which means total 12 times.
+ - 3 times (0 + 4k), for (A0, A1 and A2)
+ - 3 times (4 + 4k), for (B0, B1 and B2)
+ - 3 times (8 + 4k), for (C0, C1 and C3)
+ - 3 times (12 + 4k), for (D0, D1 and D3)
+
+After this patch, md upper layer will calculate that IO range (0 + 48k)
+is corresponding to the bitmap (0 + 16k), and call bitmap_startwrite()
+just once.
+
+Noted that this patch will align bitmap ranges to the chunks, for example,
+if user issue a IO (0 + 4k) to array:
+
+- Before this patch, 1 time (0 + 4k), for A0;
+- After this patch, 1 time (0 + 8k) for chunk 0;
+
+Usually, one bitmap bit will represent more than one disk chunk, and this
+doesn't have any difference. And even if user really created a array
+that one chunk contain multiple bits, the overhead is that more data
+will be recovered after power failure.
+
+Also remove STRIPE_BITMAP_PENDING since it's not used anymore.
+
+[1] https://lore.kernel.org/all/CAJpMwyjmHQLvm6zg1cmQErttNNQPDAAXPKM3xgTjMhbfts986Q@mail.gmail.com/
+[2] https://lore.kernel.org/all/ADF7D720-5764-4AF3-B68E-1845988737AA@flyingcircus.io/
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250109015145.158868-6-yukuai1@huaweicloud.com
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Yu Kuai <yukuai1@huaweicloud.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 29 +++++++++++++++++++++++++++
+ drivers/md/md.h | 2 +
+ drivers/md/raid1.c | 4 ---
+ drivers/md/raid10.c | 3 --
+ drivers/md/raid5-cache.c | 2 -
+ drivers/md/raid5.c | 50 +++++------------------------------------------
+ drivers/md/raid5.h | 3 --
+ 7 files changed, 37 insertions(+), 56 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8745,12 +8745,32 @@ void md_submit_discard_bio(struct mddev
+ }
+ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+
++static void md_bitmap_start(struct mddev *mddev,
++ struct md_io_clone *md_io_clone)
++{
++ if (mddev->pers->bitmap_sector)
++ mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
++ &md_io_clone->sectors);
++
++ mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
++ md_io_clone->sectors);
++}
++
++static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
++{
++ mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
++ md_io_clone->sectors);
++}
++
+ static void md_end_clone_io(struct bio *bio)
+ {
+ struct md_io_clone *md_io_clone = bio->bi_private;
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
++ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++ md_bitmap_end(mddev, md_io_clone);
++
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
+
+@@ -8775,6 +8795,12 @@ static void md_clone_bio(struct mddev *m
+ if (blk_queue_io_stat(bdev->bd_disk->queue))
+ md_io_clone->start_time = bio_start_io_acct(*bio);
+
++ if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
++ md_io_clone->offset = (*bio)->bi_iter.bi_sector;
++ md_io_clone->sectors = bio_sectors(*bio);
++ md_bitmap_start(mddev, md_io_clone);
++ }
++
+ clone->bi_end_io = md_end_clone_io;
+ clone->bi_private = md_io_clone;
+ *bio = clone;
+@@ -8793,6 +8819,9 @@ void md_free_cloned_bio(struct bio *bio)
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
++ if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
++ md_bitmap_end(mddev, md_io_clone);
++
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
+
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -831,6 +831,8 @@ struct md_io_clone {
+ struct mddev *mddev;
+ struct bio *orig_bio;
+ unsigned long start_time;
++ sector_t offset;
++ unsigned long sectors;
+ struct bio bio_clone;
+ };
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -422,8 +422,6 @@ static void close_write(struct r1bio *r1
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
+- /* clear the bitmap if all writes complete successfully */
+- mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors);
+ md_write_end(mddev);
+ }
+
+@@ -1632,8 +1630,6 @@ static void raid1_write_request(struct m
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
+- mddev->bitmap_ops->startwrite(mddev, r1_bio->sector,
+- r1_bio->sectors);
+ first_clone = 0;
+ }
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -428,8 +428,6 @@ static void close_write(struct r10bio *r
+ {
+ struct mddev *mddev = r10_bio->mddev;
+
+- /* clear the bitmap if all writes complete successfully */
+- mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors);
+ md_write_end(mddev);
+ }
+
+@@ -1506,7 +1504,6 @@ static void raid10_write_request(struct
+ md_account_bio(mddev, &bio);
+ r10_bio->master_bio = bio;
+ atomic_set(&r10_bio->remaining, 1);
+- mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors);
+
+ for (i = 0; i < conf->copies; i++) {
+ if (r10_bio->devs[i].bio)
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -313,8 +313,6 @@ void r5c_handle_cached_data_endio(struct
+ if (sh->dev[i].written) {
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i]);
+- conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf));
+ }
+ }
+ }
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -906,8 +906,7 @@ static bool stripe_can_batch(struct stri
+ if (raid5_has_log(conf) || raid5_has_ppl(conf))
+ return false;
+ return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+- !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
+- is_full_stripe_write(sh);
++ is_full_stripe_write(sh);
+ }
+
+ /* we only do back search */
+@@ -3545,29 +3544,9 @@ static void __add_stripe_bio(struct stri
+ (*bip)->bi_iter.bi_sector, sh->sector, dd_idx,
+ sh->dev[dd_idx].sector);
+
+- if (conf->mddev->bitmap && firstwrite) {
+- /* Cannot hold spinlock over bitmap_startwrite,
+- * but must ensure this isn't added to a batch until
+- * we have added to the bitmap and set bm_seq.
+- * So set STRIPE_BITMAP_PENDING to prevent
+- * batching.
+- * If multiple __add_stripe_bio() calls race here they
+- * much all set STRIPE_BITMAP_PENDING. So only the first one
+- * to complete "bitmap_startwrite" gets to set
+- * STRIPE_BIT_DELAY. This is important as once a stripe
+- * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+- * any more.
+- */
+- set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+- spin_unlock_irq(&sh->stripe_lock);
+- conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector,
+- RAID5_STRIPE_SECTORS(conf));
+- spin_lock_irq(&sh->stripe_lock);
+- clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+- if (!sh->batch_head) {
+- sh->bm_seq = conf->seq_flush+1;
+- set_bit(STRIPE_BIT_DELAY, &sh->state);
+- }
++ if (conf->mddev->bitmap && firstwrite && !sh->batch_head) {
++ sh->bm_seq = conf->seq_flush+1;
++ set_bit(STRIPE_BIT_DELAY, &sh->state);
+ }
+ }
+
+@@ -3618,7 +3597,6 @@ handle_failed_stripe(struct r5conf *conf
+ BUG_ON(sh->batch_head);
+ for (i = disks; i--; ) {
+ struct bio *bi;
+- int bitmap_end = 0;
+
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ struct md_rdev *rdev = conf->disks[i].rdev;
+@@ -3643,8 +3621,6 @@ handle_failed_stripe(struct r5conf *conf
+ sh->dev[i].towrite = NULL;
+ sh->overwrite_disks = 0;
+ spin_unlock_irq(&sh->stripe_lock);
+- if (bi)
+- bitmap_end = 1;
+
+ log_stripe_write_finished(sh);
+
+@@ -3659,10 +3635,6 @@ handle_failed_stripe(struct r5conf *conf
+ bio_io_error(bi);
+ bi = nextbi;
+ }
+- if (bitmap_end)
+- conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf));
+- bitmap_end = 0;
+ /* and fail all 'written' */
+ bi = sh->dev[i].written;
+ sh->dev[i].written = NULL;
+@@ -3671,7 +3643,6 @@ handle_failed_stripe(struct r5conf *conf
+ sh->dev[i].page = sh->dev[i].orig_page;
+ }
+
+- if (bi) bitmap_end = 1;
+ while (bi && bi->bi_iter.bi_sector <
+ sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
+ struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
+@@ -3705,9 +3676,6 @@ handle_failed_stripe(struct r5conf *conf
+ bi = nextbi;
+ }
+ }
+- if (bitmap_end)
+- conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf));
+ /* If we were in the middle of a write the parity block might
+ * still be locked - so just clear all R5_LOCKED flags
+ */
+@@ -4056,8 +4024,7 @@ returnbi:
+ bio_endio(wbi);
+ wbi = wbi2;
+ }
+- conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf));
++
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+@@ -4882,8 +4849,7 @@ static void break_stripe_batch_list(stru
+ (1 << STRIPE_COMPUTE_RUN) |
+ (1 << STRIPE_DISCARD) |
+ (1 << STRIPE_BATCH_READY) |
+- (1 << STRIPE_BATCH_ERR) |
+- (1 << STRIPE_BITMAP_PENDING)),
++ (1 << STRIPE_BATCH_ERR)),
+ "stripe state: %lx\n", sh->state);
+ WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+ (1 << STRIPE_REPLACED)),
+@@ -5774,10 +5740,6 @@ static void make_discard_request(struct
+ }
+ spin_unlock_irq(&sh->stripe_lock);
+ if (conf->mddev->bitmap) {
+- for (d = 0; d < conf->raid_disks - conf->max_degraded;
+- d++)
+- mddev->bitmap_ops->startwrite(mddev, sh->sector,
+- RAID5_STRIPE_SECTORS(conf));
+ sh->bm_seq = conf->seq_flush + 1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
+ }
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -371,9 +371,6 @@ enum {
+ STRIPE_ON_RELEASE_LIST,
+ STRIPE_BATCH_READY,
+ STRIPE_BATCH_ERR,
+- STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
+- * to batch yet.
+- */
+ STRIPE_LOG_TRAPPED, /* trapped into log (see raid5-cache.c)
+ * this bit is used in two scenarios:
+ *
--- /dev/null
+From 4f0e7d0e03b7b80af84759a9e7cfb0f81ac4adae Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 9 Jan 2025 09:51:42 +0800
+Subject: md/md-bitmap: remove the last parameter for bimtap_ops->endwrite()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 4f0e7d0e03b7b80af84759a9e7cfb0f81ac4adae upstream.
+
+For the case that IO failed for one rdev, the bit will be mark as NEEDED
+in following cases:
+
+1) If badblocks is set and rdev is not faulty;
+2) If rdev is faulty;
+
+Case 1) is useless because synchronize data to badblocks make no sense.
+Case 2) can be replaced with mddev->degraded.
+
+Also remove R1BIO_Degraded, R10BIO_Degraded and STRIPE_DEGRADED since
+case 2) no longer use them.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250109015145.158868-3-yukuai1@huaweicloud.com
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Yu Kuai <yukuai1@huaweicloud.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md-bitmap.c | 19 ++++++++++---------
+ drivers/md/md-bitmap.h | 2 +-
+ drivers/md/raid1.c | 26 +++-----------------------
+ drivers/md/raid1.h | 1 -
+ drivers/md/raid10.c | 23 +++--------------------
+ drivers/md/raid10.h | 1 -
+ drivers/md/raid5-cache.c | 3 +--
+ drivers/md/raid5.c | 15 +++------------
+ drivers/md/raid5.h | 1 -
+ 9 files changed, 21 insertions(+), 70 deletions(-)
+
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1726,7 +1726,7 @@ static int bitmap_startwrite(struct mdde
+ }
+
+ static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
+- unsigned long sectors, bool success)
++ unsigned long sectors)
+ {
+ struct bitmap *bitmap = mddev->bitmap;
+
+@@ -1745,15 +1745,16 @@ static void bitmap_endwrite(struct mddev
+ return;
+ }
+
+- if (success && !bitmap->mddev->degraded &&
+- bitmap->events_cleared < bitmap->mddev->events) {
+- bitmap->events_cleared = bitmap->mddev->events;
+- bitmap->need_sync = 1;
+- sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
+- }
+-
+- if (!success && !NEEDED(*bmc))
++ if (!bitmap->mddev->degraded) {
++ if (bitmap->events_cleared < bitmap->mddev->events) {
++ bitmap->events_cleared = bitmap->mddev->events;
++ bitmap->need_sync = 1;
++ sysfs_notify_dirent_safe(
++ bitmap->sysfs_can_clear);
++ }
++ } else if (!NEEDED(*bmc)) {
+ *bmc |= NEEDED_MASK;
++ }
+
+ if (COUNTER(*bmc) == COUNTER_MAX)
+ wake_up(&bitmap->overflow_wait);
+--- a/drivers/md/md-bitmap.h
++++ b/drivers/md/md-bitmap.h
+@@ -92,7 +92,7 @@ struct bitmap_operations {
+ int (*startwrite)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors);
+ void (*endwrite)(struct mddev *mddev, sector_t offset,
+- unsigned long sectors, bool success);
++ unsigned long sectors);
+ bool (*start_sync)(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded);
+ void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -423,8 +423,7 @@ static void close_write(struct r1bio *r1
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->end_behind_write(mddev);
+ /* clear the bitmap if all writes complete successfully */
+- mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors,
+- !test_bit(R1BIO_Degraded, &r1_bio->state));
++ mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors);
+ md_write_end(mddev);
+ }
+
+@@ -481,8 +480,6 @@ static void raid1_end_write_request(stru
+ if (!test_bit(Faulty, &rdev->flags))
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ else {
+- /* Fail the request */
+- set_bit(R1BIO_Degraded, &r1_bio->state);
+ /* Finished with this branch */
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+@@ -1536,11 +1533,8 @@ static void raid1_write_request(struct m
+ write_behind = true;
+
+ r1_bio->bios[i] = NULL;
+- if (!rdev || test_bit(Faulty, &rdev->flags)) {
+- if (i < conf->raid_disks)
+- set_bit(R1BIO_Degraded, &r1_bio->state);
++ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+- }
+
+ atomic_inc(&rdev->nr_pending);
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+@@ -1559,16 +1553,6 @@ static void raid1_write_request(struct m
+ */
+ max_sectors = bad_sectors;
+ rdev_dec_pending(rdev, mddev);
+- /* We don't set R1BIO_Degraded as that
+- * only applies if the disk is
+- * missing, so it might be re-added,
+- * and we want to know to recover this
+- * chunk.
+- * In this case the device is here,
+- * and the fact that this chunk is not
+- * in-sync is recorded in the bad
+- * block log
+- */
+ continue;
+ }
+ if (is_bad) {
+@@ -2616,12 +2600,10 @@ static void handle_write_finished(struct
+ * errors.
+ */
+ fail = true;
+- if (!narrow_write_error(r1_bio, m)) {
++ if (!narrow_write_error(r1_bio, m))
+ md_error(conf->mddev,
+ conf->mirrors[m].rdev);
+ /* an I/O failed, we can't clear the bitmap */
+- set_bit(R1BIO_Degraded, &r1_bio->state);
+- }
+ rdev_dec_pending(conf->mirrors[m].rdev,
+ conf->mddev);
+ }
+@@ -2712,8 +2694,6 @@ static void raid1d(struct md_thread *thr
+ list_del(&r1_bio->retry_list);
+ idx = sector_to_idx(r1_bio->sector);
+ atomic_dec(&conf->nr_queued[idx]);
+- if (mddev->degraded)
+- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+--- a/drivers/md/raid1.h
++++ b/drivers/md/raid1.h
+@@ -188,7 +188,6 @@ struct r1bio {
+ enum r1bio_state {
+ R1BIO_Uptodate,
+ R1BIO_IsSync,
+- R1BIO_Degraded,
+ R1BIO_BehindIO,
+ /* Set ReadError on bios that experience a readerror so that
+ * raid1d knows what to do with them.
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -429,8 +429,7 @@ static void close_write(struct r10bio *r
+ struct mddev *mddev = r10_bio->mddev;
+
+ /* clear the bitmap if all writes complete successfully */
+- mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors,
+- !test_bit(R10BIO_Degraded, &r10_bio->state));
++ mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors);
+ md_write_end(mddev);
+ }
+
+@@ -500,7 +499,6 @@ static void raid10_end_write_request(str
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ else {
+ /* Fail the request */
+- set_bit(R10BIO_Degraded, &r10_bio->state);
+ r10_bio->devs[slot].bio = NULL;
+ to_put = bio;
+ dec_rdev = 1;
+@@ -1437,10 +1435,8 @@ static void raid10_write_request(struct
+ r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].repl_bio = NULL;
+
+- if (!rdev && !rrdev) {
+- set_bit(R10BIO_Degraded, &r10_bio->state);
++ if (!rdev && !rrdev)
+ continue;
+- }
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ sector_t dev_sector = r10_bio->devs[i].addr;
+@@ -1457,14 +1453,6 @@ static void raid10_write_request(struct
+ * to other devices yet
+ */
+ max_sectors = bad_sectors;
+- /* We don't set R10BIO_Degraded as that
+- * only applies if the disk is missing,
+- * so it might be re-added, and we want to
+- * know to recover this chunk.
+- * In this case the device is here, and the
+- * fact that this chunk is not in-sync is
+- * recorded in the bad block log.
+- */
+ continue;
+ }
+ if (is_bad) {
+@@ -2964,11 +2952,8 @@ static void handle_write_completed(struc
+ rdev_dec_pending(rdev, conf->mddev);
+ } else if (bio != NULL && bio->bi_status) {
+ fail = true;
+- if (!narrow_write_error(r10_bio, m)) {
++ if (!narrow_write_error(r10_bio, m))
+ md_error(conf->mddev, rdev);
+- set_bit(R10BIO_Degraded,
+- &r10_bio->state);
+- }
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ bio = r10_bio->devs[m].repl_bio;
+@@ -3027,8 +3012,6 @@ static void raid10d(struct md_thread *th
+ r10_bio = list_first_entry(&tmp, struct r10bio,
+ retry_list);
+ list_del(&r10_bio->retry_list);
+- if (mddev->degraded)
+- set_bit(R10BIO_Degraded, &r10_bio->state);
+
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+--- a/drivers/md/raid10.h
++++ b/drivers/md/raid10.h
+@@ -161,7 +161,6 @@ enum r10bio_state {
+ R10BIO_IsSync,
+ R10BIO_IsRecover,
+ R10BIO_IsReshape,
+- R10BIO_Degraded,
+ /* Set ReadError on bios that experience a read error
+ * so that raid10d knows what to do with them.
+ */
+--- a/drivers/md/raid5-cache.c
++++ b/drivers/md/raid5-cache.c
+@@ -314,8 +314,7 @@ void r5c_handle_cached_data_endio(struct
+ set_bit(R5_UPTODATE, &sh->dev[i].flags);
+ r5c_return_dev_pending_writes(conf, &sh->dev[i]);
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf),
+- !test_bit(STRIPE_DEGRADED, &sh->state));
++ sh->sector, RAID5_STRIPE_SECTORS(conf));
+ }
+ }
+ }
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1345,8 +1345,6 @@ again:
+ submit_bio_noacct(rbi);
+ }
+ if (!rdev && !rrdev) {
+- if (op_is_write(op))
+- set_bit(STRIPE_DEGRADED, &sh->state);
+ pr_debug("skip op %d on disc %d for sector %llu\n",
+ bi->bi_opf, i, (unsigned long long)sh->sector);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+@@ -2884,7 +2882,6 @@ static void raid5_end_write_request(stru
+ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ } else {
+ if (bi->bi_status) {
+- set_bit(STRIPE_DEGRADED, &sh->state);
+ set_bit(WriteErrorSeen, &rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+@@ -3664,8 +3661,7 @@ handle_failed_stripe(struct r5conf *conf
+ }
+ if (bitmap_end)
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf),
+- false);
++ sh->sector, RAID5_STRIPE_SECTORS(conf));
+ bitmap_end = 0;
+ /* and fail all 'written' */
+ bi = sh->dev[i].written;
+@@ -3711,8 +3707,7 @@ handle_failed_stripe(struct r5conf *conf
+ }
+ if (bitmap_end)
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf),
+- false);
++ sh->sector, RAID5_STRIPE_SECTORS(conf));
+ /* If we were in the middle of a write the parity block might
+ * still be locked - so just clear all R5_LOCKED flags
+ */
+@@ -4062,8 +4057,7 @@ returnbi:
+ wbi = wbi2;
+ }
+ conf->mddev->bitmap_ops->endwrite(conf->mddev,
+- sh->sector, RAID5_STRIPE_SECTORS(conf),
+- !test_bit(STRIPE_DEGRADED, &sh->state));
++ sh->sector, RAID5_STRIPE_SECTORS(conf));
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+@@ -4340,7 +4334,6 @@ static void handle_parity_checks5(struct
+ s->locked++;
+ set_bit(R5_Wantwrite, &dev->flags);
+
+- clear_bit(STRIPE_DEGRADED, &sh->state);
+ set_bit(STRIPE_INSYNC, &sh->state);
+ break;
+ case check_state_run:
+@@ -4497,7 +4490,6 @@ static void handle_parity_checks6(struct
+ clear_bit(R5_Wantwrite, &dev->flags);
+ s->locked--;
+ }
+- clear_bit(STRIPE_DEGRADED, &sh->state);
+
+ set_bit(STRIPE_INSYNC, &sh->state);
+ break;
+@@ -4899,7 +4891,6 @@ static void break_stripe_batch_list(stru
+
+ set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+- (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
+ head_sh->state & (1 << STRIPE_INSYNC));
+
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -358,7 +358,6 @@ enum {
+ STRIPE_REPLACED,
+ STRIPE_PREREAD_ACTIVE,
+ STRIPE_DELAYED,
+- STRIPE_DEGRADED,
+ STRIPE_BIT_DELAY,
+ STRIPE_EXPANDING,
+ STRIPE_EXPAND_SOURCE,
--- /dev/null
+From 9c89f604476cf15c31fbbdb043cff7fbf1dbe0cb Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Thu, 9 Jan 2025 09:51:44 +0800
+Subject: md/raid5: implement pers->bitmap_sector()
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 9c89f604476cf15c31fbbdb043cff7fbf1dbe0cb upstream.
+
+Bitmap is used for the whole array for raid1/raid10, hence IO for the
+array can be used directly for bitmap. However, bitmap is used for
+underlying disks for raid5, hence IO for the array can't be used
+directly for bitmap.
+
+Implement pers->bitmap_sector() for raid5 to convert IO ranges from the
+array to the underlying disks.
+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250109015145.158868-5-yukuai1@huaweicloud.com
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Yu Kuai <yukuai1@huaweicloud.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid5.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5918,6 +5918,54 @@ static enum reshape_loc get_reshape_loc(
+ return LOC_BEHIND_RESHAPE;
+ }
+
++static void raid5_bitmap_sector(struct mddev *mddev, sector_t *offset,
++ unsigned long *sectors)
++{
++ struct r5conf *conf = mddev->private;
++ sector_t start = *offset;
++ sector_t end = start + *sectors;
++ sector_t prev_start = start;
++ sector_t prev_end = end;
++ int sectors_per_chunk;
++ enum reshape_loc loc;
++ int dd_idx;
++
++ sectors_per_chunk = conf->chunk_sectors *
++ (conf->raid_disks - conf->max_degraded);
++ start = round_down(start, sectors_per_chunk);
++ end = round_up(end, sectors_per_chunk);
++
++ start = raid5_compute_sector(conf, start, 0, &dd_idx, NULL);
++ end = raid5_compute_sector(conf, end, 0, &dd_idx, NULL);
++
++ /*
++ * For LOC_INSIDE_RESHAPE, this IO will wait for reshape to make
++ * progress, hence it's the same as LOC_BEHIND_RESHAPE.
++ */
++ loc = get_reshape_loc(mddev, conf, prev_start);
++ if (likely(loc != LOC_AHEAD_OF_RESHAPE)) {
++ *offset = start;
++ *sectors = end - start;
++ return;
++ }
++
++ sectors_per_chunk = conf->prev_chunk_sectors *
++ (conf->previous_raid_disks - conf->max_degraded);
++ prev_start = round_down(prev_start, sectors_per_chunk);
++ prev_end = round_down(prev_end, sectors_per_chunk);
++
++ prev_start = raid5_compute_sector(conf, prev_start, 1, &dd_idx, NULL);
++ prev_end = raid5_compute_sector(conf, prev_end, 1, &dd_idx, NULL);
++
++ /*
++ * for LOC_AHEAD_OF_RESHAPE, reshape can make progress before this IO
++ * is handled in make_stripe_request(), we can't know this here hence
++ * we set bits for both.
++ */
++ *offset = min(start, prev_start);
++ *sectors = max(end, prev_end) - *offset;
++}
++
+ static enum stripe_result make_stripe_request(struct mddev *mddev,
+ struct r5conf *conf, struct stripe_request_ctx *ctx,
+ sector_t logical_sector, struct bio *bi)
+@@ -8966,6 +9014,7 @@ static struct md_personality raid6_perso
+ .takeover = raid6_takeover,
+ .change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
++ .bitmap_sector = raid5_bitmap_sector,
+ };
+ static struct md_personality raid5_personality =
+ {
+@@ -8991,6 +9040,7 @@ static struct md_personality raid5_perso
+ .takeover = raid5_takeover,
+ .change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
++ .bitmap_sector = raid5_bitmap_sector,
+ };
+
+ static struct md_personality raid4_personality =
+@@ -9017,6 +9067,7 @@ static struct md_personality raid4_perso
+ .takeover = raid4_takeover,
+ .change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
++ .bitmap_sector = raid5_bitmap_sector,
+ };
+
+ static int __init raid5_init(void)
--- /dev/null
+From 90b7f2961798793275b4844348619b622f983907 Mon Sep 17 00:00:00 2001
+From: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+Date: Fri, 24 Jan 2025 01:30:20 -0800
+Subject: net: usb: rtl8150: enable basic endpoint checking
+
+From: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+
+commit 90b7f2961798793275b4844348619b622f983907 upstream.
+
+Syzkaller reports [1] encountering a common issue of utilizing a wrong
+usb endpoint type during URB submitting stage. This, in turn, triggers
+a warning shown below.
+
+For now, enable simple endpoint checking (specifically, bulk and
+interrupt eps, testing control one is not essential) to mitigate
+the issue with a view to do other related cosmetic changes later,
+if they are necessary.
+
+[1] Syzkaller report:
+usb 1-1: BOGUS urb xfer, pipe 3 != type 1
+WARNING: CPU: 1 PID: 2586 at drivers/usb/core/urb.c:503 usb_submit_urb+0xe4b/0x1730 driv>
+Modules linked in:
+CPU: 1 UID: 0 PID: 2586 Comm: dhcpcd Not tainted 6.11.0-rc4-syzkaller-00069-gfc88bb11617>
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/06/2024
+RIP: 0010:usb_submit_urb+0xe4b/0x1730 drivers/usb/core/urb.c:503
+Code: 84 3c 02 00 00 e8 05 e4 fc fc 4c 89 ef e8 fd 25 d7 fe 45 89 e0 89 e9 4c 89 f2 48 8>
+RSP: 0018:ffffc9000441f740 EFLAGS: 00010282
+RAX: 0000000000000000 RBX: ffff888112487a00 RCX: ffffffff811a99a9
+RDX: ffff88810df6ba80 RSI: ffffffff811a99b6 RDI: 0000000000000001
+RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000001
+R13: ffff8881023bf0a8 R14: ffff888112452a20 R15: ffff888112487a7c
+FS: 00007fc04eea5740(0000) GS:ffff8881f6300000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f0a1de9f870 CR3: 000000010dbd0000 CR4: 00000000003506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ rtl8150_open+0x300/0xe30 drivers/net/usb/rtl8150.c:733
+ __dev_open+0x2d4/0x4e0 net/core/dev.c:1474
+ __dev_change_flags+0x561/0x720 net/core/dev.c:8838
+ dev_change_flags+0x8f/0x160 net/core/dev.c:8910
+ devinet_ioctl+0x127a/0x1f10 net/ipv4/devinet.c:1177
+ inet_ioctl+0x3aa/0x3f0 net/ipv4/af_inet.c:1003
+ sock_do_ioctl+0x116/0x280 net/socket.c:1222
+ sock_ioctl+0x22e/0x6c0 net/socket.c:1341
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl fs/ioctl.c:893 [inline]
+ __x64_sys_ioctl+0x193/0x220 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xcd/0x250 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7fc04ef73d49
+...
+
+This change has not been tested on real hardware.
+
+Reported-and-tested-by: syzbot+d7e968426f644b567e31@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=d7e968426f644b567e31
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikita Zhandarovich <n.zhandarovich@fintech.ru>
+Link: https://patch.msgid.link/20250124093020.234642-1-n.zhandarovich@fintech.ru
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/rtl8150.c | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -71,6 +71,14 @@
+ #define MSR_SPEED (1<<3)
+ #define MSR_LINK (1<<2)
+
++/* USB endpoints */
++enum rtl8150_usb_ep {
++ RTL8150_USB_EP_CONTROL = 0,
++ RTL8150_USB_EP_BULK_IN = 1,
++ RTL8150_USB_EP_BULK_OUT = 2,
++ RTL8150_USB_EP_INT_IN = 3,
++};
++
+ /* Interrupt pipe data */
+ #define INT_TSR 0x00
+ #define INT_RSR 0x01
+@@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_inte
+ struct usb_device *udev = interface_to_usbdev(intf);
+ rtl8150_t *dev;
+ struct net_device *netdev;
++ static const u8 bulk_ep_addr[] = {
++ RTL8150_USB_EP_BULK_IN | USB_DIR_IN,
++ RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT,
++ 0};
++ static const u8 int_ep_addr[] = {
++ RTL8150_USB_EP_INT_IN | USB_DIR_IN,
++ 0};
+
+ netdev = alloc_etherdev(sizeof(rtl8150_t));
+ if (!netdev)
+@@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_inte
+ return -ENOMEM;
+ }
+
++ /* Verify that all required endpoints are present */
++ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
++ !usb_check_int_endpoints(intf, int_ep_addr)) {
++ dev_err(&intf->dev, "couldn't find required endpoints\n");
++ goto out;
++ }
++
+ tasklet_setup(&dev->tl, rx_fixup);
+ spin_lock_init(&dev->rx_pool_lock);
+
--- /dev/null
+From 1b9335a8000fb70742f7db10af314104b6ace220 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 28 Jan 2025 12:26:33 +0100
+Subject: netfilter: nf_tables: reject mismatching sum of field_len with set key length
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit 1b9335a8000fb70742f7db10af314104b6ace220 upstream.
+
+The field length description provides the length of each separated key
+field in the concatenation, each field gets rounded up to 32-bits to
+calculate the pipapo rule width from pipapo_init(). The set key length
+provides the total size of the key aligned to 32-bits.
+
+Register-based arithmetics still allows for combining mismatching set
+key length and field length description, eg. set key length 10 and field
+description [ 5, 4 ] leading to pipapo width of 12.
+
+Cc: stable@vger.kernel.org
+Fixes: 3ce67e3793f4 ("netfilter: nf_tables: do not allow mismatch field size and set key length")
+Reported-by: Noam Rathaus <noamr@ssd-disclosure.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5074,7 +5074,7 @@ static int nft_set_desc_concat_parse(con
+ static int nft_set_desc_concat(struct nft_set_desc *desc,
+ const struct nlattr *nla)
+ {
+- u32 num_regs = 0, key_num_regs = 0;
++ u32 len = 0, num_regs;
+ struct nlattr *attr;
+ int rem, err, i;
+
+@@ -5088,12 +5088,12 @@ static int nft_set_desc_concat(struct nf
+ }
+
+ for (i = 0; i < desc->field_count; i++)
+- num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
++ len += round_up(desc->field_len[i], sizeof(u32));
+
+- key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+- if (key_num_regs != num_regs)
++ if (len != desc->klen)
+ return -EINVAL;
+
++ num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+ if (num_regs > NFT_REG32_COUNT)
+ return -E2BIG;
+
--- /dev/null
+From 966a675da844f1a764bb44557c21561cc3d09840 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 2 Jan 2025 20:00:01 -0500
+Subject: Revert "SUNRPC: Reduce thread wake-up rate when receiving large RPC messages"
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 966a675da844f1a764bb44557c21561cc3d09840 upstream.
+
+I noticed that a handful of NFSv3 fstests were taking an
+unexpectedly long time to run. Troubleshooting showed that the
+server's TCP window closed and never re-opened, which caused the
+client to trigger an RPC retransmit timeout after 180 seconds.
+
+The client's recovery action was to establish a fresh connection
+and retransmit the timed-out requests. This worked, but it adds a
+long delay.
+
+I tracked the problem to the commit that attempted to reduce the
+rate at which the network layer delivers TCP socket data_ready
+callbacks. Under most circumstances this change worked as expected,
+but for NFSv3, which has no session or other type of throttling, it
+can overwhelm the receiver on occasion.
+
+I'm sure I could tweak the lowat settings, but the small benefit
+doesn't seem worth the bother. Just revert it.
+
+Fixes: 2b877fc53e97 ("SUNRPC: Reduce thread wake-up rate when receiving large RPC messages")
+Cc: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/svcsock.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1083,9 +1083,6 @@ static void svc_tcp_fragment_received(st
+ /* If we have more data, signal svc_xprt_enqueue() to try again */
+ svsk->sk_tcplen = 0;
+ svsk->sk_marker = xdr_zero;
+-
+- smp_wmb();
+- tcp_set_rcvlowat(svsk->sk_sk, 1);
+ }
+
+ /**
+@@ -1175,17 +1172,10 @@ err_incomplete:
+ goto err_delete;
+ if (len == want)
+ svc_tcp_fragment_received(svsk);
+- else {
+- /* Avoid more ->sk_data_ready() calls until the rest
+- * of the message has arrived. This reduces service
+- * thread wake-ups on large incoming messages. */
+- tcp_set_rcvlowat(svsk->sk_sk,
+- svc_sock_reclen(svsk) - svsk->sk_tcplen);
+-
++ else
+ trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
+ svc_sock_reclen(svsk),
+ svsk->sk_tcplen - sizeof(rpc_fraghdr));
+- }
+ goto err_noclose;
+ error:
+ if (len != -EAGAIN)
--- /dev/null
+From 159ca65c42d90d5ab98fc31b708b12c0be2c26e0 Mon Sep 17 00:00:00 2001
+From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
+Date: Tue, 7 Jan 2025 21:11:07 +0900
+Subject: selftests/ftrace: Fix to use remount when testing mount GID option
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+commit 159ca65c42d90d5ab98fc31b708b12c0be2c26e0 upstream.
+
+Fix mount_options.tc to use remount option to mount the tracefs.
+Since the current implementation does not umount the tracefs,
+this test always fails because of -EBUSY error.
+Using remount option will allow us to change the mount option.
+
+Link: https://lore.kernel.org/r/173625186741.1383744.16707876180798573039.stgit@devnote2
+Fixes: 8b55572e5180 ("tracing/selftests: Add tracefs mount options test")
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
++++ b/tools/testing/selftests/ftrace/test.d/00basic/mount_options.tc
+@@ -15,11 +15,11 @@ find_alternate_gid() {
+ tac /etc/group | grep -v ":$original_gid:" | head -1 | cut -d: -f3
+ }
+
+-mount_tracefs_with_options() {
++remount_tracefs_with_options() {
+ local mount_point="$1"
+ local options="$2"
+
+- mount -t tracefs -o "$options" nodev "$mount_point"
++ mount -t tracefs -o "remount,$options" nodev "$mount_point"
+
+ setup
+ }
+@@ -81,7 +81,7 @@ test_gid_mount_option() {
+
+ # Unmount existing tracefs instance and mount with new GID
+ unmount_tracefs "$mount_point"
+- mount_tracefs_with_options "$mount_point" "$new_options"
++ remount_tracefs_with_options "$mount_point" "$new_options"
+
+ check_gid "$mount_point" "$other_group"
+
+@@ -92,7 +92,7 @@ test_gid_mount_option() {
+
+ # Unmount and remount with the original GID
+ unmount_tracefs "$mount_point"
+- mount_tracefs_with_options "$mount_point" "$mount_options"
++ remount_tracefs_with_options "$mount_point" "$mount_options"
+ check_gid "$mount_point" "$original_group"
+ }
+
--- /dev/null
+From 336d02bc4c6bec5c3d933e5d470a94970f830957 Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Tue, 14 Jan 2025 09:51:32 -0500
+Subject: selftests/rseq: Fix handling of glibc without rseq support
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit 336d02bc4c6bec5c3d933e5d470a94970f830957 upstream.
+
+When porting librseq commit:
+
+commit c7b45750fa85 ("Adapt to glibc __rseq_size feature detection")
+
+from librseq to the kernel selftests, the following line was missed
+at the end of rseq_init():
+
+ rseq_size = get_rseq_kernel_feature_size();
+
+which effectively leaves rseq_size initialized to -1U when glibc does not
+have rseq support. glibc supports rseq from version 2.35 onwards.
+
+In a following librseq commit
+
+commit c67d198627c2 ("Only set 'rseq_size' on first thread registration")
+
+to mimic the libc behavior, a new approach is taken: don't set the
+feature size in 'rseq_size' until at least one thread has successfully
+registered. This allows using 'rseq_size' in fast-paths to test for both
+registration status and available features. The caveat is that on libc
+either all threads are registered or none are, while with bare librseq
+it is the responsability of the user to register all threads using rseq.
+
+This combines the changes from the following librseq git commits:
+
+commit c7b45750fa85 ("Adapt to glibc __rseq_size feature detection")
+commit c67d198627c2 ("Only set 'rseq_size' on first thread registration")
+
+Fixes: a0cc649353bb ("selftests/rseq: Fix mm_cid test failure")
+Reported-by: Raghavendra Rao Ananta <rananta@google.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Raghavendra Rao Ananta <rananta@google.com>
+Cc: Shuah Khan <skhan@linuxfoundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Carlos O'Donell <carlos@redhat.com>
+Cc: Florian Weimer <fweimer@redhat.com>
+Cc: Michael Jeanson <mjeanson@efficios.com>
+Cc: linux-kselftest@vger.kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/rseq/rseq.c | 32 +++++++++++++++++++++++++-------
+ tools/testing/selftests/rseq/rseq.h | 9 ++++++++-
+ 2 files changed, 33 insertions(+), 8 deletions(-)
+
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -61,7 +61,6 @@ unsigned int rseq_size = -1U;
+ unsigned int rseq_flags;
+
+ static int rseq_ownership;
+-static int rseq_reg_success; /* At least one rseq registration has succeded. */
+
+ /* Allocate a large area for the TLS. */
+ #define RSEQ_THREAD_AREA_ALLOC_SIZE 1024
+@@ -152,14 +151,27 @@ int rseq_register_current_thread(void)
+ }
+ rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+ if (rc) {
+- if (RSEQ_READ_ONCE(rseq_reg_success)) {
++ /*
++ * After at least one thread has registered successfully
++ * (rseq_size > 0), the registration of other threads should
++ * never fail.
++ */
++ if (RSEQ_READ_ONCE(rseq_size) > 0) {
+ /* Incoherent success/failure within process. */
+ abort();
+ }
+ return -1;
+ }
+ assert(rseq_current_cpu_raw() >= 0);
+- RSEQ_WRITE_ONCE(rseq_reg_success, 1);
++
++ /*
++ * The first thread to register sets the rseq_size to mimic the libc
++ * behavior.
++ */
++ if (RSEQ_READ_ONCE(rseq_size) == 0) {
++ RSEQ_WRITE_ONCE(rseq_size, get_rseq_kernel_feature_size());
++ }
++
+ return 0;
+ }
+
+@@ -235,12 +247,18 @@ void rseq_init(void)
+ return;
+ }
+ rseq_ownership = 1;
+- if (!rseq_available()) {
+- rseq_size = 0;
+- return;
+- }
++
++ /* Calculate the offset of the rseq area from the thread pointer. */
+ rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
++
++ /* rseq flags are deprecated, always set to 0. */
+ rseq_flags = 0;
++
++ /*
++ * Set the size to 0 until at least one thread registers to mimic the
++ * libc behavior.
++ */
++ rseq_size = 0;
+ }
+
+ static __attribute__((destructor))
+--- a/tools/testing/selftests/rseq/rseq.h
++++ b/tools/testing/selftests/rseq/rseq.h
+@@ -60,7 +60,14 @@
+ extern ptrdiff_t rseq_offset;
+
+ /*
+- * Size of the registered rseq area. 0 if the registration was
++ * The rseq ABI is composed of extensible feature fields. The extensions
++ * are done by appending additional fields at the end of the structure.
++ * The rseq_size defines the size of the active feature set which can be
++ * used by the application for the current rseq registration. Features
++ * starting at offset >= rseq_size are inactive and should not be used.
++ *
++ * The rseq_size is the intersection between the available allocation
++ * size for the rseq area and the feature size supported by the kernel.
+ * unsuccessful.
+ */
+ extern unsigned int rseq_size;
kernel-be-more-careful-about-dup_mmap-failures-and-u.patch
f2fs-introduce-linear-search-for-dentries.patch
md-md-bitmap-factor-behind-write-counters-out-from-bitmap_-start-end-write.patch
+md-md-bitmap-remove-the-last-parameter-for-bimtap_ops-endwrite.patch
+md-add-a-new-callback-pers-bitmap_sector.patch
+md-raid5-implement-pers-bitmap_sector.patch
+md-md-bitmap-move-bitmap_-start-end-write-to-md-upper-layer.patch
+revert-sunrpc-reduce-thread-wake-up-rate-when-receiving-large-rpc-messages.patch
+netfilter-nf_tables-reject-mismatching-sum-of-field_len-with-set-key-length.patch
+selftests-rseq-fix-handling-of-glibc-without-rseq-support.patch
+selftests-ftrace-fix-to-use-remount-when-testing-mount-gid-option.patch
+ktest.pl-check-kernelrelease-return-in-get_version.patch
+xfs-check-for-dead-buffers-in-xfs_buf_find_insert.patch
+xfs-fix-mount-hang-during-primary-superblock-recovery-failure.patch
+xfs-don-t-over-report-free-space-or-inodes-in-statvfs.patch
+xfs-release-the-dquot-buf-outside-of-qli_lock.patch
+xfs-don-t-shut-down-the-filesystem-for-media-failures-beyond-end-of-log.patch
+alsa-usb-audio-add-delay-quirk-for-ibasso-dc07-pro.patch
+net-usb-rtl8150-enable-basic-endpoint-checking.patch
+usb-xhci-fix-null-pointer-dereference-on-certain-command-aborts.patch
+drivers-card_reader-rtsx_usb-restore-interrupt-based-detection.patch
+usb-gadget-f_tcm-fix-get-setinterface-return-value.patch
+usb-dwc3-am62-fix-an-of-node-leak-in-phy_syscon_pll_refclk.patch
+usb-dwc3-core-defer-the-probe-until-usb-power-supply-ready.patch
+usb-dwc3-skip-resume-if-pm_runtime_set_active-fails.patch
+usb-typec-tcpm-set-src_send_capabilities-timeout-to-pd_t_sender_response.patch
+usb-typec-tcpci-prevent-sink-disconnection-before-vppsshutdown-in-spr-pps.patch
+clk-qcom-gcc-x1e80100-do-not-turn-off-usb_2-controller-gdsc.patch
--- /dev/null
+From a266462b937beba065e934a563efe13dd246a164 Mon Sep 17 00:00:00 2001
+From: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+Date: Thu, 9 Jan 2025 09:16:38 +0900
+Subject: usb: dwc3-am62: Fix an OF node leak in phy_syscon_pll_refclk()
+
+From: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+
+commit a266462b937beba065e934a563efe13dd246a164 upstream.
+
+phy_syscon_pll_refclk() leaks an OF node obtained by
+of_parse_phandle_with_fixed_args(), thus add an of_node_put() call.
+
+Cc: stable <stable@kernel.org>
+Fixes: e8784c0aec03 ("drivers: usb: dwc3: Add AM62 USB wrapper driver")
+Signed-off-by: Joe Hattori <joe@pf.is.s.u-tokyo.ac.jp>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/20250109001638.70033-1-joe@pf.is.s.u-tokyo.ac.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/dwc3-am62.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/dwc3/dwc3-am62.c
++++ b/drivers/usb/dwc3/dwc3-am62.c
+@@ -166,6 +166,7 @@ static int phy_syscon_pll_refclk(struct
+ if (ret)
+ return ret;
+
++ of_node_put(args.np);
+ am62->offset = args.args[0];
+
+ /* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
--- /dev/null
+From 66e0ea341a2a78d14336117f19763bd9be26d45d Mon Sep 17 00:00:00 2001
+From: Kyle Tso <kyletso@google.com>
+Date: Wed, 15 Jan 2025 12:45:48 +0800
+Subject: usb: dwc3: core: Defer the probe until USB power supply ready
+
+From: Kyle Tso <kyletso@google.com>
+
+commit 66e0ea341a2a78d14336117f19763bd9be26d45d upstream.
+
+Currently, DWC3 driver attempts to acquire the USB power supply only
+once during the probe. If the USB power supply is not ready at that
+time, the driver simply ignores the failure and continues the probe,
+leading to permanent non-functioning of the gadget vbus_draw callback.
+
+Address this problem by delaying the dwc3 driver initialization until
+the USB power supply is registered.
+
+Fixes: 6f0764b5adea ("usb: dwc3: add a power supply for current control")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Kyle Tso <kyletso@google.com>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/20250115044548.2701138-1-kyletso@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/core.c | 30 +++++++++++++++++++++---------
+ 1 file changed, 21 insertions(+), 9 deletions(-)
+
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1664,8 +1664,6 @@ static void dwc3_get_properties(struct d
+ u8 tx_thr_num_pkt_prd = 0;
+ u8 tx_max_burst_prd = 0;
+ u8 tx_fifo_resize_max_num;
+- const char *usb_psy_name;
+- int ret;
+
+ /* default to highest possible threshold */
+ lpm_nyet_threshold = 0xf;
+@@ -1700,13 +1698,6 @@ static void dwc3_get_properties(struct d
+
+ dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+
+- ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
+- if (ret >= 0) {
+- dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
+- if (!dwc->usb_psy)
+- dev_err(dev, "couldn't get usb power supply\n");
+- }
+-
+ dwc->has_lpm_erratum = device_property_read_bool(dev,
+ "snps,has-lpm-erratum");
+ device_property_read_u8(dev, "snps,lpm-nyet-threshold",
+@@ -2109,6 +2100,23 @@ static int dwc3_get_num_ports(struct dwc
+ return 0;
+ }
+
++static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
++{
++ struct power_supply *usb_psy;
++ const char *usb_psy_name;
++ int ret;
++
++ ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name);
++ if (ret < 0)
++ return NULL;
++
++ usb_psy = power_supply_get_by_name(usb_psy_name);
++ if (!usb_psy)
++ return ERR_PTR(-EPROBE_DEFER);
++
++ return usb_psy;
++}
++
+ static int dwc3_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -2165,6 +2173,10 @@ static int dwc3_probe(struct platform_de
+
+ dwc3_get_software_properties(dwc);
+
++ dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
++ if (IS_ERR(dwc->usb_psy))
++ return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
++
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ if (IS_ERR(dwc->reset)) {
+ ret = PTR_ERR(dwc->reset);
--- /dev/null
+From e3a9bd247cddfb6fa0c29c2361f70b76c359eaa0 Mon Sep 17 00:00:00 2001
+From: Ray Chi <raychi@google.com>
+Date: Mon, 6 Jan 2025 16:22:37 +0800
+Subject: usb: dwc3: Skip resume if pm_runtime_set_active() fails
+
+From: Ray Chi <raychi@google.com>
+
+commit e3a9bd247cddfb6fa0c29c2361f70b76c359eaa0 upstream.
+
+When the system begins to enter suspend mode, dwc3_suspend() is called
+by PM suspend. There is a problem that if someone interrupt the system
+suspend process between dwc3_suspend() and pm_suspend() of its parent
+device, PM suspend will be canceled and attempt to resume suspended
+devices so that dwc3_resume() will be called. However, dwc3 and its
+parent device (like the power domain or glue driver) may already be
+suspended by runtime PM in fact. If this sutiation happened, the
+pm_runtime_set_active() in dwc3_resume() will return an error since
+parent device was suspended. This can lead to unexpected behavior if
+DWC3 proceeds to execute dwc3_resume_common().
+
+EX.
+RPM suspend: ... -> dwc3_runtime_suspend()
+ -> rpm_suspend() of parent device
+...
+PM suspend: ... -> dwc3_suspend() -> pm_suspend of parent device
+ ^ interrupt, so resume suspended device
+ ... <- dwc3_resume() <-/
+ ^ pm_runtime_set_active() returns error
+
+To prevent the problem, this commit will skip dwc3_resume_common() and
+return the error if pm_runtime_set_active() fails.
+
+Fixes: 68c26fe58182 ("usb: dwc3: set pm runtime active before resume common")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Ray Chi <raychi@google.com>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/20250106082240.3822059-1-raychi@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2601,12 +2601,15 @@ static int dwc3_resume(struct device *de
+ pinctrl_pm_select_default_state(dev);
+
+ pm_runtime_disable(dev);
+- pm_runtime_set_active(dev);
++ ret = pm_runtime_set_active(dev);
++ if (ret)
++ goto out;
+
+ ret = dwc3_resume_common(dwc, PMSG_RESUME);
+ if (ret)
+ pm_runtime_set_suspended(dev);
+
++out:
+ pm_runtime_enable(dev);
+
+ return ret;
--- /dev/null
+From 3b997089903b909684114aca6f79d683e5c64a0e Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Wed, 11 Dec 2024 00:31:55 +0000
+Subject: usb: gadget: f_tcm: Fix Get/SetInterface return value
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 3b997089903b909684114aca6f79d683e5c64a0e upstream.
+
+Check to make sure that the GetInterface and SetInterface are for valid
+interface. Return proper alternate setting number on GetInterface.
+
+Fixes: 0b8b1a1fede0 ("usb: gadget: f_tcm: Provide support to get alternate setting in tcm function")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/ffd91b4640945ea4d3b4f4091cf1abbdbd9cf4fc.1733876548.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/f_tcm.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -2051,9 +2051,14 @@ static void tcm_delayed_set_alt(struct w
+
+ static int tcm_get_alt(struct usb_function *f, unsigned intf)
+ {
+- if (intf == bot_intf_desc.bInterfaceNumber)
++ struct f_uas *fu = to_f_uas(f);
++
++ if (fu->iface != intf)
++ return -EOPNOTSUPP;
++
++ if (fu->flags & USBG_IS_BOT)
+ return USB_G_ALT_INT_BBB;
+- if (intf == uasp_intf_desc.bInterfaceNumber)
++ else if (fu->flags & USBG_IS_UAS)
+ return USB_G_ALT_INT_UAS;
+
+ return -EOPNOTSUPP;
+@@ -2063,6 +2068,9 @@ static int tcm_set_alt(struct usb_functi
+ {
+ struct f_uas *fu = to_f_uas(f);
+
++ if (fu->iface != intf)
++ return -EOPNOTSUPP;
++
+ if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+ struct guas_setup_wq *work;
+
--- /dev/null
+From 4d27afbf256028a1f54363367f30efc8854433c3 Mon Sep 17 00:00:00 2001
+From: Kyle Tso <kyletso@google.com>
+Date: Tue, 14 Jan 2025 22:24:35 +0800
+Subject: usb: typec: tcpci: Prevent Sink disconnection before vPpsShutdown in SPR PPS
+
+From: Kyle Tso <kyletso@google.com>
+
+commit 4d27afbf256028a1f54363367f30efc8854433c3 upstream.
+
+The Source can drop its output voltage to the minimum of the requested
+PPS APDO voltage range when it is in Current Limit Mode. If this voltage
+falls within the range of vPpsShutdown, the Source initiates a Hard
+Reset and discharges Vbus. However, currently the Sink may disconnect
+before the voltage reaches vPpsShutdown, leading to unexpected behavior.
+
+Prevent premature disconnection by setting the Sink's disconnect
+threshold to the minimum vPpsShutdown value. Additionally, consider the
+voltage drop due to IR drop when calculating the appropriate threshold.
+This ensures a robust and reliable interaction between the Source and
+Sink during SPR PPS Current Limit Mode operation.
+
+Fixes: 4288debeaa4e ("usb: typec: tcpci: Fix up sink disconnect thresholds for PD")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Kyle Tso <kyletso@google.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Reviewed-by: Badhri Jagan Sridharan <badhri@google.com>
+Link: https://lore.kernel.org/r/20250114142435.2093857-1-kyletso@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpci.c | 13 +++++++++----
+ drivers/usb/typec/tcpm/tcpm.c | 8 +++++---
+ include/linux/usb/tcpm.h | 3 ++-
+ 3 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -27,6 +27,7 @@
+ #define VPPS_NEW_MIN_PERCENT 95
+ #define VPPS_VALID_MIN_MV 100
+ #define VSINKDISCONNECT_PD_MIN_PERCENT 90
++#define VPPS_SHUTDOWN_MIN_PERCENT 85
+
+ struct tcpci {
+ struct device *dev;
+@@ -366,7 +367,8 @@ static int tcpci_enable_auto_vbus_discha
+ }
+
+ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+- bool pps_active, u32 requested_vbus_voltage_mv)
++ bool pps_active, u32 requested_vbus_voltage_mv,
++ u32 apdo_min_voltage_mv)
+ {
+ struct tcpci *tcpci = tcpc_to_tcpci(dev);
+ unsigned int pwr_ctrl, threshold = 0;
+@@ -388,9 +390,12 @@ static int tcpci_set_auto_vbus_discharge
+ threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+ } else if (mode == TYPEC_PWR_MODE_PD) {
+ if (pps_active)
+- threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+- VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
+- VSINKDISCONNECT_PD_MIN_PERCENT / 100;
++ /*
++ * To prevent disconnect when the source is in Current Limit Mode.
++ * Set the threshold to the lowest possible voltage vPpsShutdown (min)
++ */
++ threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 -
++ VSINKPD_MIN_IR_DROP_MV;
+ else
+ threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
+ VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2943,10 +2943,12 @@ static int tcpm_set_auto_vbus_discharge_
+ return 0;
+
+ ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
+- requested_vbus_voltage);
++ requested_vbus_voltage,
++ port->pps_data.min_volt);
+ tcpm_log_force(port,
+- "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
+- mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
++ "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
++ mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
++ port->pps_data.min_volt, ret);
+
+ return ret;
+ }
+--- a/include/linux/usb/tcpm.h
++++ b/include/linux/usb/tcpm.h
+@@ -163,7 +163,8 @@ struct tcpc_dev {
+ void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+ int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+ int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+- bool pps_active, u32 requested_vbus_voltage);
++ bool pps_active, u32 requested_vbus_voltage,
++ u32 pps_apdo_min_voltage);
+ bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
+ void (*check_contaminant)(struct tcpc_dev *dev);
--- /dev/null
+From 2eb3da037c2c20fa30bc502bc092479b2a1aaae2 Mon Sep 17 00:00:00 2001
+From: Jos Wang <joswang@lenovo.com>
+Date: Sun, 5 Jan 2025 21:52:45 +0800
+Subject: usb: typec: tcpm: set SRC_SEND_CAPABILITIES timeout to PD_T_SENDER_RESPONSE
+
+From: Jos Wang <joswang@lenovo.com>
+
+commit 2eb3da037c2c20fa30bc502bc092479b2a1aaae2 upstream.
+
+As PD2.0 spec ("8.3.3.2.3 PE_SRC_Send_Capabilities state"), after the
+Source receives the GoodCRC Message from the Sink in response to the
+Source_Capabilities message, it should start the SenderResponseTimer,
+after the timer times out, the state machine transitions to the
+HARD_RESET state.
+
+Fixes: f0690a25a140 ("staging: typec: USB Type-C Port Manager (tcpm)")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jos Wang <joswang@lenovo.com>
+Reviewed-by: Badhri Jagan Sridharan <badhri@google.com>
+Link: https://lore.kernel.org/r/20250105135245.7493-1-joswang1221@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -4772,7 +4772,7 @@ static void run_state_machine(struct tcp
+ port->caps_count = 0;
+ port->pd_capable = true;
+ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
+- PD_T_SEND_SOURCE_CAP);
++ PD_T_SENDER_RESPONSE);
+ }
+ break;
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
--- /dev/null
+From 1e0a19912adb68a4b2b74fd77001c96cd83eb073 Mon Sep 17 00:00:00 2001
+From: Michal Pecio <michal.pecio@gmail.com>
+Date: Fri, 27 Dec 2024 14:01:40 +0200
+Subject: usb: xhci: Fix NULL pointer dereference on certain command aborts
+
+From: Michal Pecio <michal.pecio@gmail.com>
+
+commit 1e0a19912adb68a4b2b74fd77001c96cd83eb073 upstream.
+
+If a command is queued to the final usable TRB of a ring segment, the
+enqueue pointer is advanced to the subsequent link TRB and no further.
+If the command is later aborted, when the abort completion is handled
+the dequeue pointer is advanced to the first TRB of the next segment.
+
+If no further commands are queued, xhci_handle_stopped_cmd_ring() sees
+the ring pointers unequal and assumes that there is a pending command,
+so it calls xhci_mod_cmd_timer() which crashes if cur_cmd was NULL.
+
+Don't attempt timer setup if cur_cmd is NULL. The subsequent doorbell
+ring likely is unnecessary too, but it's harmless. Leave it alone.
+
+This is probably Bug 219532, but no confirmation has been received.
+
+The issue has been independently reproduced and confirmed fixed using
+a USB MCU programmed to NAK the Status stage of SET_ADDRESS forever.
+Everything continued working normally after several prevented crashes.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=219532
+Fixes: c311e391a7ef ("xhci: rework command timeout and cancellation,")
+CC: stable@vger.kernel.org
+Signed-off-by: Michal Pecio <michal.pecio@gmail.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20241227120142.1035206-4-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-ring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -422,7 +422,8 @@ static void xhci_handle_stopped_cmd_ring
+ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+ !(xhci->xhc_state & XHCI_STATE_DYING)) {
+ xhci->current_cmd = cur_cmd;
+- xhci_mod_cmd_timer(xhci);
++ if (cur_cmd)
++ xhci_mod_cmd_timer(xhci);
+ xhci_ring_cmd_db(xhci);
+ }
+ }
--- /dev/null
+From 07eae0fa67ca4bbb199ad85645e0f9dfaef931cd Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 16 Jan 2025 07:01:41 +0100
+Subject: xfs: check for dead buffers in xfs_buf_find_insert
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 07eae0fa67ca4bbb199ad85645e0f9dfaef931cd upstream.
+
+Commit 32dd4f9c506b ("xfs: remove a superflous hash lookup when inserting
+new buffers") converted xfs_buf_find_insert to use
+rhashtable_lookup_get_insert_fast and thus an operation that returns the
+existing buffer when an insert would duplicate the hash key. But this
+code path misses the check for a buffer with a reference count of zero,
+which could lead to reusing an about to be freed buffer. Fix this by
+using the same atomic_inc_not_zero pattern as xfs_buf_insert.
+
+Fixes: 32dd4f9c506b ("xfs: remove a superflous hash lookup when inserting new buffers")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Cc: stable@vger.kernel.org # v6.0
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_buf.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -663,9 +663,8 @@ xfs_buf_find_insert(
+ spin_unlock(&bch->bc_lock);
+ goto out_free_buf;
+ }
+- if (bp) {
++ if (bp && atomic_inc_not_zero(&bp->b_hold)) {
+ /* found an existing buffer */
+- atomic_inc(&bp->b_hold);
+ spin_unlock(&bch->bc_lock);
+ error = xfs_buf_find_lock(bp, flags);
+ if (error)
--- /dev/null
+From 4b8d867ca6e2fc6d152f629fdaf027053b81765a Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Thu, 12 Dec 2024 14:37:56 -0800
+Subject: xfs: don't over-report free space or inodes in statvfs
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 4b8d867ca6e2fc6d152f629fdaf027053b81765a upstream.
+
+Emmanual Florac reports a strange occurrence when project quota limits
+are enabled, free space is lower than the remaining quota, and someone
+runs statvfs:
+
+ # mkfs.xfs -f /dev/sda
+ # mount /dev/sda /mnt -o prjquota
+ # xfs_quota -x -c 'limit -p bhard=2G 55' /mnt
+ # mkdir /mnt/dir
+ # xfs_io -c 'chproj 55' -c 'chattr +P' -c 'stat -vvvv' /mnt/dir
+ # fallocate -l 19g /mnt/a
+ # df /mnt /mnt/dir
+ Filesystem Size Used Avail Use% Mounted on
+ /dev/sda 20G 20G 345M 99% /mnt
+ /dev/sda 2.0G 0 2.0G 0% /mnt
+
+I think the bug here is that xfs_fill_statvfs_from_dquot unconditionally
+assigns to f_bfree without checking that the filesystem has enough free
+space to fill the remaining project quota. However, this is a
+longstanding behavior of xfs so it's unclear what to do here.
+
+Cc: <stable@vger.kernel.org> # v2.6.18
+Fixes: 932f2c323196c2 ("[XFS] statvfs component of directory/project quota support, code originally by Glen.")
+Reported-by: Emmanuel Florac <eflorac@intellique.com>
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_qm_bhv.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/fs/xfs/xfs_qm_bhv.c
++++ b/fs/xfs/xfs_qm_bhv.c
+@@ -32,21 +32,28 @@ xfs_fill_statvfs_from_dquot(
+ limit = blkres->softlimit ?
+ blkres->softlimit :
+ blkres->hardlimit;
+- if (limit && statp->f_blocks > limit) {
+- statp->f_blocks = limit;
+- statp->f_bfree = statp->f_bavail =
+- (statp->f_blocks > blkres->reserved) ?
+- (statp->f_blocks - blkres->reserved) : 0;
++ if (limit) {
++ uint64_t remaining = 0;
++
++ if (limit > blkres->reserved)
++ remaining = limit - blkres->reserved;
++
++ statp->f_blocks = min(statp->f_blocks, limit);
++ statp->f_bfree = min(statp->f_bfree, remaining);
++ statp->f_bavail = min(statp->f_bavail, remaining);
+ }
+
+ limit = dqp->q_ino.softlimit ?
+ dqp->q_ino.softlimit :
+ dqp->q_ino.hardlimit;
+- if (limit && statp->f_files > limit) {
+- statp->f_files = limit;
+- statp->f_ffree =
+- (statp->f_files > dqp->q_ino.reserved) ?
+- (statp->f_files - dqp->q_ino.reserved) : 0;
++ if (limit) {
++ uint64_t remaining = 0;
++
++ if (limit > dqp->q_ino.reserved)
++ remaining = limit - dqp->q_ino.reserved;
++
++ statp->f_files = min(statp->f_files, limit);
++ statp->f_ffree = min(statp->f_ffree, remaining);
+ }
+ }
+
--- /dev/null
+From f4ed93037966aea07ae6b10ab208976783d24e2e Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Tue, 17 Dec 2024 13:43:06 -0800
+Subject: xfs: don't shut down the filesystem for media failures beyond end of log
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit f4ed93037966aea07ae6b10ab208976783d24e2e upstream.
+
+If the filesystem has an external log device on pmem and the pmem
+reports a media error beyond the end of the log area, don't shut down
+the filesystem because we don't use that space.
+
+Cc: <stable@vger.kernel.org> # v6.0
+Fixes: 6f643c57d57c56 ("xfs: implement ->notify_failure() for XFS")
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_notify_failure.c | 121 +++++++++++++++++++++++++++++---------------
+ 1 file changed, 82 insertions(+), 39 deletions(-)
+
+--- a/fs/xfs/xfs_notify_failure.c
++++ b/fs/xfs/xfs_notify_failure.c
+@@ -154,6 +154,79 @@ xfs_dax_notify_failure_thaw(
+ }
+
+ static int
++xfs_dax_translate_range(
++ struct xfs_buftarg *btp,
++ u64 offset,
++ u64 len,
++ xfs_daddr_t *daddr,
++ uint64_t *bblen)
++{
++ u64 dev_start = btp->bt_dax_part_off;
++ u64 dev_len = bdev_nr_bytes(btp->bt_bdev);
++ u64 dev_end = dev_start + dev_len - 1;
++
++ /* Notify failure on the whole device. */
++ if (offset == 0 && len == U64_MAX) {
++ offset = dev_start;
++ len = dev_len;
++ }
++
++ /* Ignore the range out of filesystem area */
++ if (offset + len - 1 < dev_start)
++ return -ENXIO;
++ if (offset > dev_end)
++ return -ENXIO;
++
++ /* Calculate the real range when it touches the boundary */
++ if (offset > dev_start)
++ offset -= dev_start;
++ else {
++ len -= dev_start - offset;
++ offset = 0;
++ }
++ if (offset + len - 1 > dev_end)
++ len = dev_end - offset + 1;
++
++ *daddr = BTOBB(offset);
++ *bblen = BTOBB(len);
++ return 0;
++}
++
++static int
++xfs_dax_notify_logdev_failure(
++ struct xfs_mount *mp,
++ u64 offset,
++ u64 len,
++ int mf_flags)
++{
++ xfs_daddr_t daddr;
++ uint64_t bblen;
++ int error;
++
++ /*
++ * Return ENXIO instead of shutting down the filesystem if the failed
++ * region is beyond the end of the log.
++ */
++ error = xfs_dax_translate_range(mp->m_logdev_targp,
++ offset, len, &daddr, &bblen);
++ if (error)
++ return error;
++
++ /*
++ * In the pre-remove case the failure notification is attempting to
++ * trigger a force unmount. The expectation is that the device is
++ * still present, but its removal is in progress and can not be
++ * cancelled, proceed with accessing the log device.
++ */
++ if (mf_flags & MF_MEM_PRE_REMOVE)
++ return 0;
++
++ xfs_err(mp, "ondisk log corrupt, shutting down fs!");
++ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
++ return -EFSCORRUPTED;
++}
++
++static int
+ xfs_dax_notify_ddev_failure(
+ struct xfs_mount *mp,
+ xfs_daddr_t daddr,
+@@ -263,8 +336,9 @@ xfs_dax_notify_failure(
+ int mf_flags)
+ {
+ struct xfs_mount *mp = dax_holder(dax_dev);
+- u64 ddev_start;
+- u64 ddev_end;
++ xfs_daddr_t daddr;
++ uint64_t bblen;
++ int error;
+
+ if (!(mp->m_super->s_flags & SB_BORN)) {
+ xfs_warn(mp, "filesystem is not ready for notify_failure()!");
+@@ -279,17 +353,7 @@ xfs_dax_notify_failure(
+
+ if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev &&
+ mp->m_logdev_targp != mp->m_ddev_targp) {
+- /*
+- * In the pre-remove case the failure notification is attempting
+- * to trigger a force unmount. The expectation is that the
+- * device is still present, but its removal is in progress and
+- * can not be cancelled, proceed with accessing the log device.
+- */
+- if (mf_flags & MF_MEM_PRE_REMOVE)
+- return 0;
+- xfs_err(mp, "ondisk log corrupt, shutting down fs!");
+- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK);
+- return -EFSCORRUPTED;
++ return xfs_dax_notify_logdev_failure(mp, offset, len, mf_flags);
+ }
+
+ if (!xfs_has_rmapbt(mp)) {
+@@ -297,33 +361,12 @@ xfs_dax_notify_failure(
+ return -EOPNOTSUPP;
+ }
+
+- ddev_start = mp->m_ddev_targp->bt_dax_part_off;
+- ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1;
+-
+- /* Notify failure on the whole device. */
+- if (offset == 0 && len == U64_MAX) {
+- offset = ddev_start;
+- len = bdev_nr_bytes(mp->m_ddev_targp->bt_bdev);
+- }
+-
+- /* Ignore the range out of filesystem area */
+- if (offset + len - 1 < ddev_start)
+- return -ENXIO;
+- if (offset > ddev_end)
+- return -ENXIO;
+-
+- /* Calculate the real range when it touches the boundary */
+- if (offset > ddev_start)
+- offset -= ddev_start;
+- else {
+- len -= ddev_start - offset;
+- offset = 0;
+- }
+- if (offset + len - 1 > ddev_end)
+- len = ddev_end - offset + 1;
++ error = xfs_dax_translate_range(mp->m_ddev_targp, offset, len, &daddr,
++ &bblen);
++ if (error)
++ return error;
+
+- return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len),
+- mf_flags);
++ return xfs_dax_notify_ddev_failure(mp, daddr, bblen, mf_flags);
+ }
+
+ const struct dax_holder_operations xfs_dax_holder_operations = {
--- /dev/null
+From efebe42d95fbba91dca6e3e32cb9e0612eb56de5 Mon Sep 17 00:00:00 2001
+From: Long Li <leo.lilong@huawei.com>
+Date: Sat, 11 Jan 2025 15:05:44 +0800
+Subject: xfs: fix mount hang during primary superblock recovery failure
+
+From: Long Li <leo.lilong@huawei.com>
+
+commit efebe42d95fbba91dca6e3e32cb9e0612eb56de5 upstream.
+
+When mounting an image containing a log with sb modifications that require
+log replay, the mount process hang all the time and stack as follows:
+
+ [root@localhost ~]# cat /proc/557/stack
+ [<0>] xfs_buftarg_wait+0x31/0x70
+ [<0>] xfs_buftarg_drain+0x54/0x350
+ [<0>] xfs_mountfs+0x66e/0xe80
+ [<0>] xfs_fs_fill_super+0x7f1/0xec0
+ [<0>] get_tree_bdev_flags+0x186/0x280
+ [<0>] get_tree_bdev+0x18/0x30
+ [<0>] xfs_fs_get_tree+0x1d/0x30
+ [<0>] vfs_get_tree+0x2d/0x110
+ [<0>] path_mount+0xb59/0xfc0
+ [<0>] do_mount+0x92/0xc0
+ [<0>] __x64_sys_mount+0xc2/0x160
+ [<0>] x64_sys_call+0x2de4/0x45c0
+ [<0>] do_syscall_64+0xa7/0x240
+ [<0>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+During log recovery, while updating the in-memory superblock from the
+primary SB buffer, if an error is encountered, such as superblock
+corruption occurs or some other reasons, we will proceed to out_release
+and release the xfs_buf. However, this is insufficient because the
+xfs_buf's log item has already been initialized and the xfs_buf is held
+by the buffer log item as follows, the xfs_buf will not be released,
+causing the mount thread to hang.
+
+ xlog_recover_do_primary_sb_buffer
+ xlog_recover_do_reg_buffer
+ xlog_recover_validate_buf_type
+ xfs_buf_item_init(bp, mp)
+
+The solution is straightforward, we simply need to allow it to be
+handled by the normal buffer write process. The filesystem will be
+shutdown before the submission of buffer_list in xlog_do_recovery_pass(),
+ensuring the correct release of the xfs_buf as follows:
+
+ xlog_do_recovery_pass
+ error = xlog_recover_process
+ xlog_recover_process_data
+ xlog_recover_process_ophdr
+ xlog_recovery_process_trans
+ ...
+ xlog_recover_buf_commit_pass2
+ error = xlog_recover_do_primary_sb_buffer
+ //Encounter error and return
+ if (error)
+ goto out_writebuf
+ ...
+ out_writebuf:
+ xfs_buf_delwri_queue(bp, buffer_list) //add bp to list
+ return error
+ ...
+ if (!list_empty(&buffer_list))
+ if (error)
+ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); //shutdown first
+ xfs_buf_delwri_submit(&buffer_list); //submit buffers in list
+ __xfs_buf_submit
+ if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log))
+ xfs_buf_ioend_fail(bp) //release bp correctly
+
+Fixes: 6a18765b54e2 ("xfs: update the file system geometry after recoverying superblock buffers")
+Cc: stable@vger.kernel.org # v6.12
+Signed-off-by: Long Li <leo.lilong@huawei.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Carlos Maiolino <cem@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_buf_item_recover.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -1079,7 +1079,7 @@ xlog_recover_buf_commit_pass2(
+ error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f,
+ current_lsn);
+ if (error)
+- goto out_release;
++ goto out_writebuf;
+
+ /* Update the rt superblock if we have one. */
+ if (xfs_has_rtsb(mp) && mp->m_rtsb_bp) {
+@@ -1097,6 +1097,15 @@ xlog_recover_buf_commit_pass2(
+ }
+
+ /*
++ * Buffer held by buf log item during 'normal' buffer recovery must
++ * be committed through buffer I/O submission path to ensure proper
++ * release. When error occurs during sb buffer recovery, log shutdown
++ * will be done before submitting buffer list so that buffers can be
++ * released correctly through ioend failure path.
++ */
++out_writebuf:
++
++ /*
+ * Perform delayed write on the buffer. Asynchronous writes will be
+ * slower when taking into account all the buffers to be flushed.
+ *
--- /dev/null
+From 1aacd3fac248902ea1f7607f2d12b93929a4833b Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Tue, 17 Dec 2024 15:00:49 -0800
+Subject: xfs: release the dquot buf outside of qli_lock
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit 1aacd3fac248902ea1f7607f2d12b93929a4833b upstream.
+
+Lai Yi reported a lockdep complaint about circular locking:
+
+ Chain exists of:
+ &lp->qli_lock --> &bch->bc_lock --> &l->lock
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(&l->lock);
+ lock(&bch->bc_lock);
+ lock(&l->lock);
+ lock(&lp->qli_lock);
+
+I /think/ the problem here is that xfs_dquot_attach_buf during
+quotacheck will release the buffer while it's holding the qli_lock.
+Because this is a cached buffer, xfs_buf_rele_cached takes b_lock before
+decrementing b_hold. Other threads have taught lockdep that a locking
+dependency chain is bp->b_lock -> bch->bc_lock -> l(ru)->lock; and that
+another chain is l(ru)->lock -> lp->qli_lock. Hence we do not want to
+take b_lock while holding qli_lock.
+
+Reported-by: syzbot+3126ab3db03db42e7a31@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org> # v6.13-rc3
+Fixes: ca378189fdfa89 ("xfs: convert quotacheck to attach dquot buffers")
+Tested-by: syzbot+3126ab3db03db42e7a31@syzkaller.appspotmail.com
+Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_dquot.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1316,7 +1316,8 @@ out_abort:
+
+ /*
+ * Attach a dquot buffer to this dquot to avoid allocating a buffer during a
+- * dqflush, since dqflush can be called from reclaim context.
++ * dqflush, since dqflush can be called from reclaim context. Caller must hold
++ * the dqlock.
+ */
+ int
+ xfs_dquot_attach_buf(
+@@ -1337,13 +1338,16 @@ xfs_dquot_attach_buf(
+ return error;
+
+ /*
+- * Attach the dquot to the buffer so that the AIL does not have
+- * to read the dquot buffer to push this item.
++ * Hold the dquot buffer so that we retain our ref to it after
++ * detaching it from the transaction, then give that ref to the
++ * dquot log item so that the AIL does not have to read the
++ * dquot buffer to push this item.
+ */
+ xfs_buf_hold(bp);
++ xfs_trans_brelse(tp, bp);
++
+ spin_lock(&qlip->qli_lock);
+ lip->li_buf = bp;
+- xfs_trans_brelse(tp, bp);
+ }
+ qlip->qli_dirty = true;
+ spin_unlock(&qlip->qli_lock);