--- /dev/null
+From af1c89ddb74f170eccd5a57001d7317560b638ea Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Wed, 25 Jan 2023 10:45:05 +0100
+Subject: ARM: dts: exynos: correct HDMI phy compatible in Exynos4
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit af1c89ddb74f170eccd5a57001d7317560b638ea upstream.
+
+The HDMI phy compatible was missing vendor prefix.
+
+Fixes: ed80d4cab772 ("ARM: dts: add hdmi related nodes for exynos4 SoCs")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20230125094513.155063-1-krzysztof.kozlowski@linaro.org
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/exynos4.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/exynos4.dtsi
++++ b/arch/arm/boot/dts/exynos4.dtsi
+@@ -605,7 +605,7 @@
+ status = "disabled";
+
+ hdmi_i2c_phy: hdmiphy@38 {
+- compatible = "exynos4210-hdmiphy";
++ compatible = "samsung,exynos4210-hdmiphy";
+ reg = <0x38>;
+ };
+ };
--- /dev/null
+From 2b5463fcbdfb24e898916bcae2b1359042d26963 Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Thu, 12 Jan 2023 16:05:11 -0800
+Subject: btrfs: hold block group refcount during async discard
+
+From: Boris Burkov <boris@bur.io>
+
+commit 2b5463fcbdfb24e898916bcae2b1359042d26963 upstream.
+
+Async discard does not acquire the block group reference count while it
+holds a reference on the discard list. This is generally OK, as the
+paths which destroy block groups tend to try to synchronize on
+cancelling async discard work. However, relying on cancelling work
+requires careful analysis to be sure it is safe from races with
+unpinning scheduling more work.
+
+While I am unable to find a race with unpinning in the current code for
+either the unused bgs or relocation paths, I believe we have one in an
+older version of auto relocation in a Meta internal build. This suggests
+that this is in fact an error prone model, and could be fragile to
+future changes to these bg deletion paths.
+
+To make this ownership more clear, add a refcount for async discard. If
+work is queued for a block group, its refcount should be incremented,
+and when work is completed or canceled, it should be decremented.
+
+CC: stable@vger.kernel.org # 5.15+
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/discard.c | 41 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 38 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -78,6 +78,7 @@ static struct list_head *get_discard_lis
+ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ struct btrfs_block_group *block_group)
+ {
++ lockdep_assert_held(&discard_ctl->lock);
+ if (!btrfs_run_discard_work(discard_ctl))
+ return;
+
+@@ -89,6 +90,8 @@ static void __add_to_discard_list(struct
+ BTRFS_DISCARD_DELAY);
+ block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
+ }
++ if (list_empty(&block_group->discard_list))
++ btrfs_get_block_group(block_group);
+
+ list_move_tail(&block_group->discard_list,
+ get_discard_list(discard_ctl, block_group));
+@@ -108,8 +111,12 @@ static void add_to_discard_list(struct b
+ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
+ struct btrfs_block_group *block_group)
+ {
++ bool queued;
++
+ spin_lock(&discard_ctl->lock);
+
++ queued = !list_empty(&block_group->discard_list);
++
+ if (!btrfs_run_discard_work(discard_ctl)) {
+ spin_unlock(&discard_ctl->lock);
+ return;
+@@ -121,6 +128,8 @@ static void add_to_discard_unused_list(s
+ block_group->discard_eligible_time = (ktime_get_ns() +
+ BTRFS_DISCARD_UNUSED_DELAY);
+ block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
++ if (!queued)
++ btrfs_get_block_group(block_group);
+ list_add_tail(&block_group->discard_list,
+ &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
+
+@@ -131,6 +140,7 @@ static bool remove_from_discard_list(str
+ struct btrfs_block_group *block_group)
+ {
+ bool running = false;
++ bool queued = false;
+
+ spin_lock(&discard_ctl->lock);
+
+@@ -140,7 +150,16 @@ static bool remove_from_discard_list(str
+ }
+
+ block_group->discard_eligible_time = 0;
++ queued = !list_empty(&block_group->discard_list);
+ list_del_init(&block_group->discard_list);
++ /*
++ * If the block group is currently running in the discard workfn, we
++ * don't want to deref it, since it's still being used by the workfn.
++ * The workfn will notice this case and deref the block group when it is
++ * finished.
++ */
++ if (queued && !running)
++ btrfs_put_block_group(block_group);
+
+ spin_unlock(&discard_ctl->lock);
+
+@@ -214,10 +233,12 @@ again:
+ if (block_group && now >= block_group->discard_eligible_time) {
+ if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
+ block_group->used != 0) {
+- if (btrfs_is_block_group_data_only(block_group))
++ if (btrfs_is_block_group_data_only(block_group)) {
+ __add_to_discard_list(discard_ctl, block_group);
+- else
++ } else {
+ list_del_init(&block_group->discard_list);
++ btrfs_put_block_group(block_group);
++ }
+ goto again;
+ }
+ if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
+@@ -511,6 +532,15 @@ static void btrfs_discard_workfn(struct
+ spin_lock(&discard_ctl->lock);
+ discard_ctl->prev_discard = trimmed;
+ discard_ctl->prev_discard_time = now;
++ /*
++ * If the block group was removed from the discard list while it was
++ * running in this workfn, then we didn't deref it, since this function
++ * still owned that reference. But we set the discard_ctl->block_group
++ * back to NULL, so we can use that condition to know that now we need
++ * to deref the block_group.
++ */
++ if (discard_ctl->block_group == NULL)
++ btrfs_put_block_group(block_group);
+ discard_ctl->block_group = NULL;
+ __btrfs_discard_schedule_work(discard_ctl, now, false);
+ spin_unlock(&discard_ctl->lock);
+@@ -651,8 +681,12 @@ void btrfs_discard_punt_unused_bgs_list(
+ list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
+ bg_list) {
+ list_del_init(&block_group->bg_list);
+- btrfs_put_block_group(block_group);
+ btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
++ /*
++ * This put is for the get done by btrfs_mark_bg_unused.
++ * Queueing discard incremented it for discard's reference.
++ */
++ btrfs_put_block_group(block_group);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -683,6 +717,7 @@ static void btrfs_discard_purge_list(str
+ if (block_group->used == 0)
+ btrfs_mark_bg_unused(block_group);
+ spin_lock(&discard_ctl->lock);
++ btrfs_put_block_group(block_group);
+ }
+ }
+ spin_unlock(&discard_ctl->lock);
--- /dev/null
+From b7625f461da6734a21c38ba6e7558538a116a2e3 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 13 Jan 2023 19:11:39 +0800
+Subject: btrfs: sysfs: update fs features directory asynchronously
+
+From: Qu Wenruo <wqu@suse.com>
+
+commit b7625f461da6734a21c38ba6e7558538a116a2e3 upstream.
+
+[BUG]
+Since the introduction of per-fs feature sysfs interface
+(/sys/fs/btrfs/<UUID>/features/), the content of that directory is never
+updated.
+
+Thus for the following case, that directory will not show the new
+features like RAID56:
+
+ # mkfs.btrfs -f $dev1 $dev2 $dev3
+ # mount $dev1 $mnt
+ # btrfs balance start -f -mconvert=raid5 $mnt
+ # ls /sys/fs/btrfs/$uuid/features/
+ extended_iref free_space_tree no_holes skinny_metadata
+
+While after unmount and mount, we got the correct features:
+
+ # umount $mnt
+ # mount $dev1 $mnt
+ # ls /sys/fs/btrfs/$uuid/features/
+ extended_iref free_space_tree no_holes raid56 skinny_metadata
+
+[CAUSE]
+Because we never really try to update the content of per-fs features/
+directory.
+
+We had an attempt to update the features directory dynamically in commit
+14e46e04958d ("btrfs: synchronize incompat feature bits with sysfs
+files"), but unfortunately it get reverted in commit e410e34fad91
+("Revert "btrfs: synchronize incompat feature bits with sysfs files"").
+The problem in the original patch is, in the context of
+btrfs_create_chunk(), we can not afford to update the sysfs group.
+
+The exported but never utilized function, btrfs_sysfs_feature_update()
+is the leftover of such attempt. As even if we go sysfs_update_group(),
+new files will need extra memory allocation, and we have no way to
+specify the sysfs update to go GFP_NOFS.
+
+[FIX]
+This patch will address the old problem by doing asynchronous sysfs
+update in the cleaner thread.
+
+This involves the following changes:
+
+- Make __btrfs_(set|clear)_fs_(incompat|compat_ro) helpers to set
+ BTRFS_FS_FEATURE_CHANGED flag when needed
+
+- Update btrfs_sysfs_feature_update() to use sysfs_update_group()
+ And drop unnecessary arguments.
+
+- Call btrfs_sysfs_feature_update() in cleaner_kthread
+ If we have the BTRFS_FS_FEATURE_CHANGED flag set.
+
+- Wake up cleaner_kthread in btrfs_commit_transaction if we have
+ BTRFS_FS_FEATURE_CHANGED flag
+
+By this, all the previously dangerous call sites like
+btrfs_create_chunk() need no new changes, as above helpers would
+have already set the BTRFS_FS_FEATURE_CHANGED flag.
+
+The real work happens at cleaner_kthread, thus we pay the cost of
+delaying the update to sysfs directory, but the delayed time should be
+small enough that end user can not distinguish though it might get
+delayed if the cleaner thread is busy with removing subvolumes or
+defrag.
+
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/disk-io.c | 3 +++
+ fs/btrfs/fs.c | 4 ++++
+ fs/btrfs/fs.h | 6 ++++++
+ fs/btrfs/sysfs.c | 29 ++++++++---------------------
+ fs/btrfs/sysfs.h | 3 +--
+ fs/btrfs/transaction.c | 5 +++++
+ 6 files changed, 27 insertions(+), 23 deletions(-)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1910,6 +1910,9 @@ static int cleaner_kthread(void *arg)
+ goto sleep;
+ }
+
++ if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
++ btrfs_sysfs_feature_update(fs_info);
++
+ btrfs_run_delayed_iputs(fs_info);
+
+ again = btrfs_clean_one_deleted_snapshot(fs_info);
+--- a/fs/btrfs/fs.c
++++ b/fs/btrfs/fs.c
+@@ -24,6 +24,7 @@ void __btrfs_set_fs_incompat(struct btrf
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
++ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+ }
+
+@@ -46,6 +47,7 @@ void __btrfs_clear_fs_incompat(struct bt
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
++ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+ }
+
+@@ -68,6 +70,7 @@ void __btrfs_set_fs_compat_ro(struct btr
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
++ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+ }
+
+@@ -90,5 +93,6 @@ void __btrfs_clear_fs_compat_ro(struct b
+ name, flag);
+ }
+ spin_unlock(&fs_info->super_lock);
++ set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ }
+ }
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -125,6 +125,12 @@ enum {
+ */
+ BTRFS_FS_NO_OVERCOMMIT,
+
++ /*
++ * Indicate if we have some features changed, this is mostly for
++ * cleaner thread to update the sysfs interface.
++ */
++ BTRFS_FS_FEATURE_CHANGED,
++
+ #if BITS_PER_LONG == 32
+ /* Indicate if we have error/warn message printed on 32bit systems */
+ BTRFS_FS_32BIT_ERROR,
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -2272,36 +2272,23 @@ void btrfs_sysfs_del_one_qgroup(struct b
+ * Change per-fs features in /sys/fs/btrfs/UUID/features to match current
+ * values in superblock. Call after any changes to incompat/compat_ro flags
+ */
+-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+- u64 bit, enum btrfs_feature_set set)
++void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info)
+ {
+- struct btrfs_fs_devices *fs_devs;
+ struct kobject *fsid_kobj;
+- u64 __maybe_unused features;
+- int __maybe_unused ret;
++ int ret;
+
+ if (!fs_info)
+ return;
+
+- /*
+- * See 14e46e04958df74 and e410e34fad913dd, feature bit updates are not
+- * safe when called from some contexts (eg. balance)
+- */
+- features = get_features(fs_info, set);
+- ASSERT(bit & supported_feature_masks[set]);
+-
+- fs_devs = fs_info->fs_devices;
+- fsid_kobj = &fs_devs->fsid_kobj;
+-
++ fsid_kobj = &fs_info->fs_devices->fsid_kobj;
+ if (!fsid_kobj->state_initialized)
+ return;
+
+- /*
+- * FIXME: this is too heavy to update just one value, ideally we'd like
+- * to use sysfs_update_group but some refactoring is needed first.
+- */
+- sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group);
+- ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
++ ret = sysfs_update_group(fsid_kobj, &btrfs_feature_attr_group);
++ if (ret < 0)
++ btrfs_warn(fs_info,
++ "failed to update /sys/fs/btrfs/%pU/features: %d",
++ fs_info->fs_devices->fsid, ret);
+ }
+
+ int __init btrfs_init_sysfs(void)
+--- a/fs/btrfs/sysfs.h
++++ b/fs/btrfs/sysfs.h
+@@ -19,8 +19,7 @@ void btrfs_sysfs_remove_device(struct bt
+ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs);
+ void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
+ void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices);
+-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+- u64 bit, enum btrfs_feature_set set);
++void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info);
+ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
+
+ int __init btrfs_init_sysfs(void);
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2464,6 +2464,11 @@ int btrfs_commit_transaction(struct btrf
+ wake_up(&fs_info->transaction_wait);
+ btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
+
++ /* If we have features changed, wake up the cleaner to update sysfs. */
++ if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
++ fs_info->cleaner_kthread)
++ wake_up_process(fs_info->cleaner_kthread);
++
+ ret = btrfs_write_and_wait_transaction(trans);
+ if (ret) {
+ btrfs_handle_fs_error(fs_info, ret,
--- /dev/null
+From fb533473d1595fe79ecb528fda1de33552b07178 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sat, 11 Feb 2023 00:27:34 +0900
+Subject: ksmbd: do not allow the actual frame length to be smaller than the rfc1002 length
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit fb533473d1595fe79ecb528fda1de33552b07178 upstream.
+
+ksmbd allowed the actual frame length to be smaller than the rfc1002
+length. If allowed, it is possible to allocates a large amount of memory
+that can be limited by credit management and can eventually cause memory
+exhaustion problem. This patch do not allow it except SMB2 Negotiate
+request which will be validated when message handling proceeds.
+Also, Allow a message that padded to 8byte boundary.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2misc.c | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -408,20 +408,19 @@ int ksmbd_smb2_check_message(struct ksmb
+ goto validate_credit;
+
+ /*
+- * windows client also pad up to 8 bytes when compounding.
+- * If pad is longer than eight bytes, log the server behavior
+- * (once), since may indicate a problem but allow it and
+- * continue since the frame is parseable.
++ * SMB2 NEGOTIATE request will be validated when message
++ * handling proceeds.
+ */
+- if (clc_len < len) {
+- ksmbd_debug(SMB,
+- "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+- len, clc_len, command,
+- le64_to_cpu(hdr->MessageId));
++ if (command == SMB2_NEGOTIATE_HE)
+ goto validate_credit;
+- }
+
+- ksmbd_debug(SMB,
++ /*
++ * Allow a message that padded to 8byte boundary.
++ */
++ if (clc_len < len && (len - clc_len) < 8)
++ goto validate_credit;
++
++ pr_err_ratelimited(
+ "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
+ len, clc_len, command,
+ le64_to_cpu(hdr->MessageId));
--- /dev/null
+From d3ca9f7aeba793d74361d88a8800b2f205c9236b Mon Sep 17 00:00:00 2001
+From: Hangyu Hua <hbh25y@gmail.com>
+Date: Fri, 17 Feb 2023 22:29:34 +0900
+Subject: ksmbd: fix possible memory leak in smb2_lock()
+
+From: Hangyu Hua <hbh25y@gmail.com>
+
+commit d3ca9f7aeba793d74361d88a8800b2f205c9236b upstream.
+
+argv needs to be free when setup_async_work fails or when the current
+process is woken up.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 28 +++++++++++++---------------
+ fs/ksmbd/vfs_cache.c | 5 ++---
+ 2 files changed, 15 insertions(+), 18 deletions(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -6644,7 +6644,7 @@ int smb2_cancel(struct ksmbd_work *work)
+ struct ksmbd_conn *conn = work->conn;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+ struct smb2_hdr *chdr;
+- struct ksmbd_work *cancel_work = NULL, *iter;
++ struct ksmbd_work *iter;
+ struct list_head *command_list;
+
+ ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+@@ -6666,7 +6666,9 @@ int smb2_cancel(struct ksmbd_work *work)
+ "smb2 with AsyncId %llu cancelled command = 0x%x\n",
+ le64_to_cpu(hdr->Id.AsyncId),
+ le16_to_cpu(chdr->Command));
+- cancel_work = iter;
++ iter->state = KSMBD_WORK_CANCELLED;
++ if (iter->cancel_fn)
++ iter->cancel_fn(iter->cancel_argv);
+ break;
+ }
+ spin_unlock(&conn->request_lock);
+@@ -6685,18 +6687,12 @@ int smb2_cancel(struct ksmbd_work *work)
+ "smb2 with mid %llu cancelled command = 0x%x\n",
+ le64_to_cpu(hdr->MessageId),
+ le16_to_cpu(chdr->Command));
+- cancel_work = iter;
++ iter->state = KSMBD_WORK_CANCELLED;
+ break;
+ }
+ spin_unlock(&conn->request_lock);
+ }
+
+- if (cancel_work) {
+- cancel_work->state = KSMBD_WORK_CANCELLED;
+- if (cancel_work->cancel_fn)
+- cancel_work->cancel_fn(cancel_work->cancel_argv);
+- }
+-
+ /* For SMB2_CANCEL command itself send no response*/
+ work->send_no_response = 1;
+ return 0;
+@@ -7061,6 +7057,14 @@ skip:
+
+ ksmbd_vfs_posix_lock_wait(flock);
+
++ spin_lock(&work->conn->request_lock);
++ spin_lock(&fp->f_lock);
++ list_del(&work->fp_entry);
++ work->cancel_fn = NULL;
++ kfree(argv);
++ spin_unlock(&fp->f_lock);
++ spin_unlock(&work->conn->request_lock);
++
+ if (work->state != KSMBD_WORK_ACTIVE) {
+ list_del(&smb_lock->llist);
+ spin_lock(&work->conn->llist_lock);
+@@ -7069,9 +7073,6 @@ skip:
+ locks_free_lock(flock);
+
+ if (work->state == KSMBD_WORK_CANCELLED) {
+- spin_lock(&fp->f_lock);
+- list_del(&work->fp_entry);
+- spin_unlock(&fp->f_lock);
+ rsp->hdr.Status =
+ STATUS_CANCELLED;
+ kfree(smb_lock);
+@@ -7093,9 +7094,6 @@ skip:
+ list_del(&smb_lock->clist);
+ spin_unlock(&work->conn->llist_lock);
+
+- spin_lock(&fp->f_lock);
+- list_del(&work->fp_entry);
+- spin_unlock(&fp->f_lock);
+ goto retry;
+ } else if (!rc) {
+ spin_lock(&work->conn->llist_lock);
+--- a/fs/ksmbd/vfs_cache.c
++++ b/fs/ksmbd/vfs_cache.c
+@@ -364,12 +364,11 @@ static void __put_fd_final(struct ksmbd_
+
+ static void set_close_state_blocked_works(struct ksmbd_file *fp)
+ {
+- struct ksmbd_work *cancel_work, *ctmp;
++ struct ksmbd_work *cancel_work;
+
+ spin_lock(&fp->f_lock);
+- list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
++ list_for_each_entry(cancel_work, &fp->blocked_works,
+ fp_entry) {
+- list_del(&cancel_work->fp_entry);
+ cancel_work->state = KSMBD_WORK_CLOSED;
+ cancel_work->cancel_fn(cancel_work->cancel_argv);
+ }
--- /dev/null
+From 8f8c43b125882ac14372f8dca0c8e50a59e78d79 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 8 Feb 2023 09:50:46 +0900
+Subject: ksmbd: fix wrong data area length for smb2 lock request
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 8f8c43b125882ac14372f8dca0c8e50a59e78d79 upstream.
+
+When turning debug mode on, The following error message from
+ksmbd_smb2_check_message() is coming.
+
+ksmbd: cli req padded more than expected. Length 112 not 88 for cmd:10 mid:14
+
+data area length calculation for smb2 lock request in smb2_get_data_area_len() is
+incorrect.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2misc.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -149,15 +149,11 @@ static int smb2_get_data_area_len(unsign
+ break;
+ case SMB2_LOCK:
+ {
+- int lock_count;
++ unsigned short lock_count;
+
+- /*
+- * smb2_lock request size is 48 included single
+- * smb2_lock_element structure size.
+- */
+- lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount) - 1;
++ lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount);
+ if (lock_count > 0) {
+- *off = __SMB2_HEADER_STRUCTURE_SIZE + 48;
++ *off = offsetof(struct smb2_lock_req, locks);
+ *len = sizeof(struct smb2_lock_element) * lock_count;
+ }
+ break;
--- /dev/null
+From b613c7f31476c44316bfac1af7cac714b7d6bef9 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Wed, 25 Jan 2023 19:36:25 -0500
+Subject: locking/rwsem: Prevent non-first waiter from spinning in down_write() slowpath
+
+From: Waiman Long <longman@redhat.com>
+
+commit b613c7f31476c44316bfac1af7cac714b7d6bef9 upstream.
+
+A non-first waiter can potentially spin in the for loop of
+rwsem_down_write_slowpath() without sleeping but fail to acquire the
+lock even if the rwsem is free if the following sequence happens:
+
+ Non-first RT waiter First waiter Lock holder
+ ------------------- ------------ -----------
+ Acquire wait_lock
+ rwsem_try_write_lock():
+ Set handoff bit if RT or
+ wait too long
+ Set waiter->handoff_set
+ Release wait_lock
+ Acquire wait_lock
+ Inherit waiter->handoff_set
+ Release wait_lock
+ Clear owner
+ Release lock
+ if (waiter.handoff_set) {
+ rwsem_spin_on_owner(();
+ if (OWNER_NULL)
+ goto trylock_again;
+ }
+ trylock_again:
+ Acquire wait_lock
+ rwsem_try_write_lock():
+ if (first->handoff_set && (waiter != first))
+ return false;
+ Release wait_lock
+
+A non-first waiter cannot really acquire the rwsem even if it mistakenly
+believes that it can spin on OWNER_NULL value. If that waiter happens
+to be an RT task running on the same CPU as the first waiter, it can
+block the first waiter from acquiring the rwsem leading to live lock.
+Fix this problem by making sure that a non-first waiter cannot spin in
+the slowpath loop without sleeping.
+
+Fixes: d257cc8cb8d5 ("locking/rwsem: Make handoff bit handling more consistent")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230126003628.365092-2-longman@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/rwsem.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -624,18 +624,16 @@ static inline bool rwsem_try_write_lock(
+ */
+ if (first->handoff_set && (waiter != first))
+ return false;
+-
+- /*
+- * First waiter can inherit a previously set handoff
+- * bit and spin on rwsem if lock acquisition fails.
+- */
+- if (waiter == first)
+- waiter->handoff_set = true;
+ }
+
+ new = count;
+
+ if (count & RWSEM_LOCK_MASK) {
++ /*
++ * A waiter (first or not) can set the handoff bit
++ * if it is an RT task or wait in the wait queue
++ * for too long.
++ */
+ if (has_handoff || (!rt_task(waiter->task) &&
+ !time_after(jiffies, waiter->timeout)))
+ return false;
+@@ -651,11 +649,12 @@ static inline bool rwsem_try_write_lock(
+ } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
+
+ /*
+- * We have either acquired the lock with handoff bit cleared or
+- * set the handoff bit.
++ * We have either acquired the lock with handoff bit cleared or set
++ * the handoff bit. Only the first waiter can have its handoff_set
++ * set here to enable optimistic spinning in slowpath loop.
+ */
+ if (new & RWSEM_FLAG_HANDOFF) {
+- waiter->handoff_set = true;
++ first->handoff_set = true;
+ lockevent_inc(rwsem_wlock_handoff);
+ return false;
+ }
scsi-mpi3mr-fix-missing-mrioc-evtack_cmds-initialization.patch
scsi-mpi3mr-fix-issues-in-mpi3mr_get_all_tgt_info.patch
scsi-mpi3mr-remove-unnecessary-memcpy-to-alltgt_info-dmi.patch
+btrfs-hold-block-group-refcount-during-async-discard.patch
+btrfs-sysfs-update-fs-features-directory-asynchronously.patch
+locking-rwsem-prevent-non-first-waiter-from-spinning-in-down_write-slowpath.patch
+ksmbd-fix-wrong-data-area-length-for-smb2-lock-request.patch
+ksmbd-do-not-allow-the-actual-frame-length-to-be-smaller-than-the-rfc1002-length.patch
+ksmbd-fix-possible-memory-leak-in-smb2_lock.patch
+torture-fix-hang-during-kthread-shutdown-phase.patch
+arm-dts-exynos-correct-hdmi-phy-compatible-in-exynos4.patch
--- /dev/null
+From d52d3a2bf408ff86f3a79560b5cce80efb340239 Mon Sep 17 00:00:00 2001
+From: "Joel Fernandes (Google)" <joel@joelfernandes.org>
+Date: Sun, 1 Jan 2023 06:15:55 +0000
+Subject: torture: Fix hang during kthread shutdown phase
+
+From: Joel Fernandes (Google) <joel@joelfernandes.org>
+
+commit d52d3a2bf408ff86f3a79560b5cce80efb340239 upstream.
+
+During rcutorture shutdown, the rcu_torture_cleanup() function calls
+torture_cleanup_begin(), which sets the fullstop global variable to
+FULLSTOP_RMMOD. This causes the rcutorture threads for readers and
+fakewriters to exit all of their "while" loops and start shutting down.
+
+They then call torture_kthread_stopping(), which in turn waits for
+kthread_stop() to be called. However, rcu_torture_cleanup() has
+not yet called kthread_stop() on those threads, and before it gets a
+chance to do so, multiple instances of torture_kthread_stopping() invoke
+schedule_timeout_interruptible(1) in a tight loop. Tracing confirms that
+TIMER_SOFTIRQ can then continuously execute timer callbacks. If that
+TIMER_SOFTIRQ preempts the task executing rcu_torture_cleanup(), that
+task might never invoke kthread_stop().
+
+This commit improves this situation by increasing the timeout passed to
+schedule_timeout_interruptible() from one jiffy to 1/20th of a second.
+This change prevents TIMER_SOFTIRQ from monopolizing its CPU, thus
+allowing rcu_torture_cleanup() to carry out the needed kthread_stop()
+invocations. Testing has shown 100 runs of TREE07 passing reliably,
+as oppose to the tens-of-percent failure rates seen beforehand.
+
+Cc: Paul McKenney <paulmck@kernel.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Zhouyi Zhou <zhouzhouyi@gmail.com>
+Cc: <stable@vger.kernel.org> # 6.0.x
+Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Tested-by: Zhouyi Zhou <zhouzhouyi@gmail.com>
+Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/torture.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -915,7 +915,7 @@ void torture_kthread_stopping(char *titl
+ VERBOSE_TOROUT_STRING(buf);
+ while (!kthread_should_stop()) {
+ torture_shutdown_absorb(title);
+- schedule_timeout_uninterruptible(1);
++ schedule_timeout_uninterruptible(HZ / 20);
+ }
+ }
+ EXPORT_SYMBOL_GPL(torture_kthread_stopping);