--- /dev/null
+From 50151b7f1c79a09117837eb95b76c2de76841dab Mon Sep 17 00:00:00 2001
+From: Bob Zhou <bob.zhou@amd.com>
+Date: Fri, 31 May 2024 15:01:22 +0800
+Subject: drm/amd/pm: Fix the null pointer dereference for vega10_hwmgr
+
+From: Bob Zhou <bob.zhou@amd.com>
+
+commit 50151b7f1c79a09117837eb95b76c2de76841dab upstream.
+
+Check return value and conduct null pointer handling to avoid null pointer dereference.
+
+Signed-off-by: Bob Zhou <bob.zhou@amd.com>
+Reviewed-by: Tim Huang <Tim.Huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Mukul Sikka <mukul.sikka@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 30 +++++++++++++++---
+ 1 file changed, 26 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3415,13 +3415,17 @@ static int vega10_find_dpm_states_clocks
+ const struct vega10_power_state *vega10_ps =
+ cast_const_phw_vega10_power_state(states->pnew_state);
+ struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+- uint32_t sclk = vega10_ps->performance_levels
+- [vega10_ps->performance_level_count - 1].gfx_clock;
+ struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+- uint32_t mclk = vega10_ps->performance_levels
+- [vega10_ps->performance_level_count - 1].mem_clock;
++ uint32_t sclk, mclk;
+ uint32_t i;
+
++ if (vega10_ps == NULL)
++ return -EINVAL;
++ sclk = vega10_ps->performance_levels
++ [vega10_ps->performance_level_count - 1].gfx_clock;
++ mclk = vega10_ps->performance_levels
++ [vega10_ps->performance_level_count - 1].mem_clock;
++
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+@@ -3728,6 +3732,9 @@ static int vega10_generate_dpm_level_ena
+ cast_const_phw_vega10_power_state(states->pnew_state);
+ int i;
+
++ if (vega10_ps == NULL)
++ return -EINVAL;
++
+ PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
+ "Attempt to Trim DPM States Failed!",
+ return -1);
+@@ -4865,6 +4872,9 @@ static int vega10_check_states_equal(str
+
+ psa = cast_const_phw_vega10_power_state(pstate1);
+ psb = cast_const_phw_vega10_power_state(pstate2);
++ if (psa == NULL || psb == NULL)
++ return -EINVAL;
++
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+@@ -4990,6 +5000,8 @@ static int vega10_set_sclk_od(struct pp_
+ return -EINVAL;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return -EINVAL;
+
+ vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].gfx_clock =
+@@ -5041,6 +5053,8 @@ static int vega10_set_mclk_od(struct pp_
+ return -EINVAL;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return -EINVAL;
+
+ vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].mem_clock =
+@@ -5278,6 +5292,9 @@ static void vega10_odn_update_power_stat
+ return;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return;
++
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5300,6 +5317,9 @@ static void vega10_odn_update_power_stat
+
+ ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++ if (vega10_ps == NULL)
++ return;
++
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5490,6 +5510,8 @@ static int vega10_get_performance_level(
+ return -EINVAL;
+
+ ps = cast_const_phw_vega10_power_state(state);
++ if (ps == NULL)
++ return -EINVAL;
+
+ i = index > ps->performance_level_count - 1 ?
+ ps->performance_level_count - 1 : index;
--- /dev/null
+From 04e568a3b31cfbd545c04c8bfc35c20e5ccfce0f Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 7 Dec 2022 12:27:04 +0100
+Subject: ext4: handle redirtying in ext4_bio_write_page()
+
+From: Jan Kara <jack@suse.cz>
+
+commit 04e568a3b31cfbd545c04c8bfc35c20e5ccfce0f upstream.
+
+Since we want to transition transaction commits to use ext4_writepages()
+for writing back ordered, add handling of page redirtying into
+ext4_bio_write_page(). Also move buffer dirty bit clearing into the same
+place other buffer state handling.
+
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20221207112722.22220-1-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/page-io.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -490,6 +490,13 @@ int ext4_bio_write_page(struct ext4_io_s
+ /* A hole? We can safely clear the dirty bit */
+ if (!buffer_mapped(bh))
+ clear_buffer_dirty(bh);
++ /*
++ * Keeping dirty some buffer we cannot write? Make
++ * sure to redirty the page. This happens e.g. when
++ * doing writeout for transaction commit.
++ */
++ if (buffer_dirty(bh) && !PageDirty(page))
++ redirty_page_for_writepage(wbc, page);
+ if (io->io_bio)
+ ext4_io_submit(io);
+ continue;
+@@ -497,6 +504,7 @@ int ext4_bio_write_page(struct ext4_io_s
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ set_buffer_async_write(bh);
++ clear_buffer_dirty(bh);
+ nr_to_submit++;
+ } while ((bh = bh->b_this_page) != head);
+
+@@ -539,7 +547,10 @@ int ext4_bio_write_page(struct ext4_io_s
+ printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ redirty_page_for_writepage(wbc, page);
+ do {
+- clear_buffer_async_write(bh);
++ if (buffer_async_write(bh)) {
++ clear_buffer_async_write(bh);
++ set_buffer_dirty(bh);
++ }
+ bh = bh->b_this_page;
+ } while (bh != head);
+ goto unlock;
+@@ -552,7 +563,6 @@ int ext4_bio_write_page(struct ext4_io_s
+ continue;
+ io_submit_add_bh(io, inode, page, bounce_page, bh);
+ nr_submitted++;
+- clear_buffer_dirty(bh);
+ } while ((bh = bh->b_this_page) != head);
+
+ unlock:
--- /dev/null
+From 8216776ccff6fcd40e3fdaa109aa4150ebe760b3 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Mon, 14 Aug 2023 11:29:01 -0700
+Subject: ext4: reject casefold inode flag without casefold feature
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 8216776ccff6fcd40e3fdaa109aa4150ebe760b3 upstream.
+
+It is invalid for the casefold inode flag to be set without the casefold
+superblock feature flag also being set. e2fsck already considers this
+case to be invalid and handles it by offering to clear the casefold flag
+on the inode. __ext4_iget() also already considered this to be invalid,
+sort of, but it only got so far as logging an error message; it didn't
+actually reject the inode. Make it reject the inode so that other code
+doesn't have to handle this case. This matches what f2fs does.
+
+Note: we could check 's_encoding != NULL' instead of
+ext4_has_feature_casefold(). This would make the check robust against
+the casefold feature being enabled by userspace writing to the page
+cache of the mounted block device. However, it's unsolvable in general
+for filesystems to be robust against concurrent writes to the page cache
+of the mounted block device. Though this very particular scenario
+involving the casefold feature is solvable, we should not pretend that
+we can support this model, so let's just check the casefold feature.
+tune2fs already forbids enabling casefold on a mounted filesystem.
+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Link: https://lore.kernel.org/r/20230814182903.37267-2-ebiggers@kernel.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/inode.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4921,9 +4921,12 @@ struct inode *__ext4_iget(struct super_b
+ "iget: bogus i_mode (%o)", inode->i_mode);
+ goto bad_inode;
+ }
+- if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
++ if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) {
+ ext4_error_inode(inode, function, line, 0,
+ "casefold flag without casefold feature");
++ ret = -EFSCORRUPTED;
++ goto bad_inode;
++ }
+ if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+ ext4_error_inode(inode, function, line, 0, err_str);
+ ret = -EFSCORRUPTED;
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- drivers/media/usb/uvc/uvc_driver.c | 18 ++++++++++++++----
+ drivers/media/usb/uvc/uvc_driver.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
-diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
-index b19c75a6f595..ef5788899503 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
-@@ -936,16 +936,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+@@ -936,16 +936,26 @@ static int uvc_parse_streaming(struct uv
goto error;
}
streaming->format = format;
streaming->nformats = 0;
---
-2.43.0
-
--- /dev/null
+From cc5645fddb0ce28492b15520306d092730dffa48 Mon Sep 17 00:00:00 2001
+From: Nikita Kiryushin <kiryushin@ancud.ru>
+Date: Wed, 27 Mar 2024 20:47:47 +0300
+Subject: rcu-tasks: Fix show_rcu_tasks_trace_gp_kthread buffer overflow
+
+From: Nikita Kiryushin <kiryushin@ancud.ru>
+
+commit cc5645fddb0ce28492b15520306d092730dffa48 upstream.
+
+There is a possibility of buffer overflow in
+show_rcu_tasks_trace_gp_kthread() if counters, passed
+to sprintf() are huge. Counter numbers, needed for this
+are unrealistically high, but buffer overflow is still
+possible.
+
+Use snprintf() with buffer size instead of sprintf().
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: edf3775f0ad6 ("rcu-tasks: Add count for idle tasks on offline CPUs")
+Signed-off-by: Nikita Kiryushin <kiryushin@ancud.ru>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Vamsi Krishna Brahmajosyula <vamsi-krishna.brahmajosyula@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tasks.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1323,7 +1323,7 @@ void show_rcu_tasks_trace_gp_kthread(voi
+ {
+ char buf[64];
+
+- sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
++ snprintf(buf, sizeof(buf), "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
+ data_race(n_heavy_reader_ofl_updates),
+ data_race(n_heavy_reader_updates),
+ data_race(n_heavy_reader_attempts));
block-remove-the-blk_flush_integrity-call-in-blk_int.patch
drm-amd-display-skip-wbscl_set_scaler_filter-if-filt.patch
media-uvcvideo-enforce-alignment-of-frame-and-interv.patch
+drm-amd-pm-fix-the-null-pointer-dereference-for-vega10_hwmgr.patch
+virtio_net-fix-napi_skb_cache_put-warning.patch
+rcu-tasks-fix-show_rcu_tasks_trace_gp_kthread-buffer-overflow.patch
+ext4-reject-casefold-inode-flag-without-casefold-feature.patch
+udf-limit-file-size-to-4tb.patch
+ext4-handle-redirtying-in-ext4_bio_write_page.patch
--- /dev/null
+From c2efd13a2ed4f29bf9ef14ac2fbb7474084655f8 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 25 Jan 2023 17:56:06 +0100
+Subject: udf: Limit file size to 4TB
+
+From: Jan Kara <jack@suse.cz>
+
+commit c2efd13a2ed4f29bf9ef14ac2fbb7474084655f8 upstream.
+
+UDF disk format supports in principle file sizes up to 1<<64-1. However
+the file space (including holes) is described by a linked list of
+extents, each of which can have at most 1GB. Thus the creation and
+handling of extents gets unusably slow beyond certain point. Limit the
+file size to 4TB to avoid locking up the kernel too easily.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/super.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -86,6 +86,13 @@ enum {
+ #define UDF_MAX_LVID_NESTING 1000
+
+ enum { UDF_MAX_LINKS = 0xffff };
++/*
++ * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
++ * more but because the file space is described by a linked list of extents,
++ * each of which can have at most 1GB, the creation and handling of extents
++ * gets unusably slow beyond certain point...
++ */
++#define UDF_MAX_FILESIZE (1ULL << 42)
+
+ /* These are the "meat" - everything else is stuffing */
+ static int udf_fill_super(struct super_block *, void *, int);
+@@ -2302,7 +2309,7 @@ static int udf_fill_super(struct super_b
+ ret = -ENOMEM;
+ goto error_out;
+ }
+- sb->s_maxbytes = MAX_LFS_FILESIZE;
++ sb->s_maxbytes = UDF_MAX_FILESIZE;
+ sb->s_max_links = UDF_MAX_LINKS;
+ return 0;
+
--- /dev/null
+From f8321fa75102246d7415a6af441872f6637c93ab Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Fri, 12 Jul 2024 04:53:25 -0700
+Subject: virtio_net: Fix napi_skb_cache_put warning
+
+From: Breno Leitao <leitao@debian.org>
+
+commit f8321fa75102246d7415a6af441872f6637c93ab upstream.
+
+After the commit bdacf3e34945 ("net: Use nested-BH locking for
+napi_alloc_cache.") was merged, the following warning began to appear:
+
+ WARNING: CPU: 5 PID: 1 at net/core/skbuff.c:1451 napi_skb_cache_put+0x82/0x4b0
+
+ __warn+0x12f/0x340
+ napi_skb_cache_put+0x82/0x4b0
+ napi_skb_cache_put+0x82/0x4b0
+ report_bug+0x165/0x370
+ handle_bug+0x3d/0x80
+ exc_invalid_op+0x1a/0x50
+ asm_exc_invalid_op+0x1a/0x20
+ __free_old_xmit+0x1c8/0x510
+ napi_skb_cache_put+0x82/0x4b0
+ __free_old_xmit+0x1c8/0x510
+ __free_old_xmit+0x1c8/0x510
+ __pfx___free_old_xmit+0x10/0x10
+
+The issue arises because virtio is assuming it's running in NAPI context
+even when it's not, such as in the netpoll case.
+
+To resolve this, modify virtnet_poll_tx() to only set NAPI when budget
+is available. Same for virtnet_poll_cleantx(), which always assumed that
+it was in a NAPI context.
+
+Fixes: df133f3f9625 ("virtio_net: bulk free tx skbs")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Heng Qi <hengqi@linux.alibaba.com>
+Link: https://patch.msgid.link/20240712115325.54175-1-leitao@debian.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[Shivani: Modified to apply on v6.6.y]
+Signed-off-by: Shivani Agarwal <shivani.agarwal@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/virtio_net.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1548,7 +1548,7 @@ static bool is_xdp_raw_buffer_queue(stru
+ return false;
+ }
+
+-static void virtnet_poll_cleantx(struct receive_queue *rq)
++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
+ {
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ unsigned int index = vq2rxq(rq->vq);
+@@ -1561,7 +1561,7 @@ static void virtnet_poll_cleantx(struct
+ if (__netif_tx_trylock(txq)) {
+ do {
+ virtqueue_disable_cb(sq->vq);
+- free_old_xmit_skbs(sq, true);
++ free_old_xmit_skbs(sq, !!budget);
+ } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+@@ -1580,7 +1580,7 @@ static int virtnet_poll(struct napi_stru
+ unsigned int received;
+ unsigned int xdp_xmit = 0;
+
+- virtnet_poll_cleantx(rq);
++ virtnet_poll_cleantx(rq, budget);
+
+ received = virtnet_receive(rq, budget, &xdp_xmit);
+
+@@ -1683,7 +1683,7 @@ static int virtnet_poll_tx(struct napi_s
+ txq = netdev_get_tx_queue(vi->dev, index);
+ __netif_tx_lock(txq, raw_smp_processor_id());
+ virtqueue_disable_cb(sq->vq);
+- free_old_xmit_skbs(sq, true);
++ free_old_xmit_skbs(sq, !!budget);
+
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ netif_tx_wake_queue(txq);