--- /dev/null
+From d413eabff18d640031fc955d107ad9c03c3bf9f1 Mon Sep 17 00:00:00 2001
+From: Ralph Boehme <slow@samba.org>
+Date: Thu, 14 Nov 2024 11:05:13 +0100
+Subject: fs/smb/client: implement chmod() for SMB3 POSIX Extensions
+
+From: Ralph Boehme <slow@samba.org>
+
+commit d413eabff18d640031fc955d107ad9c03c3bf9f1 upstream.
+
+The NT ACL format for an SMB3 POSIX Extensions chmod() is a single ACE with the
+magic S-1-5-88-3-mode SID:
+
+ NT Security Descriptor
+ Revision: 1
+ Type: 0x8004, Self Relative, DACL Present
+ Offset to owner SID: 56
+ Offset to group SID: 124
+ Offset to SACL: 0
+ Offset to DACL: 20
+ Owner: S-1-5-21-3177838999-3893657415-1037673384-1000
+ Group: S-1-22-2-1000
+ NT User (DACL) ACL
+ Revision: NT4 (2)
+ Size: 36
+ Num ACEs: 1
+ NT ACE: S-1-5-88-3-438, flags 0x00, Access Allowed, mask 0x00000000
+ Type: Access Allowed
+ NT ACE Flags: 0x00
+ Size: 28
+ Access required: 0x00000000
+ SID: S-1-5-88-3-438
+
+Owner and Group should be NULL, but the server is not required to fail the
+request if they are present.
+
+Signed-off-by: Ralph Boehme <slow@samba.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsacl.c | 50 +++++++++++++++++++++++++++-------------------
+ fs/smb/client/cifsproto.h | 4 ++-
+ fs/smb/client/inode.c | 4 ++-
+ fs/smb/client/smb2pdu.c | 2 -
+ 4 files changed, 37 insertions(+), 23 deletions(-)
+
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -885,12 +885,17 @@ unsigned int setup_authusers_ACE(struct
+ * Fill in the special SID based on the mode. See
+ * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+ */
+-unsigned int setup_special_mode_ACE(struct smb_ace *pntace, __u64 nmode)
++unsigned int setup_special_mode_ACE(struct smb_ace *pntace,
++ bool posix,
++ __u64 nmode)
+ {
+ int i;
+ unsigned int ace_size = 28;
+
+- pntace->type = ACCESS_DENIED_ACE_TYPE;
++ if (posix)
++ pntace->type = ACCESS_ALLOWED_ACE_TYPE;
++ else
++ pntace->type = ACCESS_DENIED_ACE_TYPE;
+ pntace->flags = 0x0;
+ pntace->access_req = 0;
+ pntace->sid.num_subauth = 3;
+@@ -933,7 +938,8 @@ static void populate_new_aces(char *nacl
+ struct smb_sid *pownersid,
+ struct smb_sid *pgrpsid,
+ __u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+- bool modefromsid)
++ bool modefromsid,
++ bool posix)
+ {
+ __u64 nmode;
+ u32 num_aces = 0;
+@@ -950,13 +956,15 @@ static void populate_new_aces(char *nacl
+ num_aces = *pnum_aces;
+ nsize = *pnsize;
+
+- if (modefromsid) {
+- pnntace = (struct smb_ace *) (nacl_base + nsize);
+- nsize += setup_special_mode_ACE(pnntace, nmode);
+- num_aces++;
++ if (modefromsid || posix) {
+ pnntace = (struct smb_ace *) (nacl_base + nsize);
+- nsize += setup_authusers_ACE(pnntace);
++ nsize += setup_special_mode_ACE(pnntace, posix, nmode);
+ num_aces++;
++ if (modefromsid) {
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
++ nsize += setup_authusers_ACE(pnntace);
++ num_aces++;
++ }
+ goto set_size;
+ }
+
+@@ -1076,7 +1084,7 @@ static __u16 replace_sids_and_copy_aces(
+
+ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
+ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+- __u64 *pnmode, bool mode_from_sid)
++ __u64 *pnmode, bool mode_from_sid, bool posix)
+ {
+ int i;
+ u16 size = 0;
+@@ -1094,11 +1102,11 @@ static int set_chmod_dacl(struct smb_acl
+ nsize = sizeof(struct smb_acl);
+
+ /* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
+- if (!pdacl) {
++ if (!pdacl || posix) {
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+ goto finalize_dacl;
+ }
+
+@@ -1115,7 +1123,7 @@ static int set_chmod_dacl(struct smb_acl
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+
+ new_aces_set = true;
+ }
+@@ -1144,7 +1152,7 @@ next_ace:
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+
+ new_aces_set = true;
+ }
+@@ -1251,7 +1259,7 @@ static int parse_sec_desc(struct cifs_sb
+ /* Convert permission bits from mode to equivalent CIFS ACL */
+ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ __u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
+- bool mode_from_sid, bool id_from_sid, int *aclflag)
++ bool mode_from_sid, bool id_from_sid, bool posix, int *aclflag)
+ {
+ int rc = 0;
+ __u32 dacloffset;
+@@ -1288,7 +1296,7 @@ static int build_sec_desc(struct smb_nts
+ ndacl_ptr->num_aces = cpu_to_le32(0);
+
+ rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+- pnmode, mode_from_sid);
++ pnmode, mode_from_sid, posix);
+
+ sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+ /* copy the non-dacl portion of secdesc */
+@@ -1587,6 +1595,7 @@ id_mode_to_cifs_acl(struct inode *inode,
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct smb_version_operations *ops;
+ bool mode_from_sid, id_from_sid;
++ bool posix = tlink_tcon(tlink)->posix_extensions;
+ const u32 info = 0;
+
+ if (IS_ERR(tlink))
+@@ -1622,12 +1631,13 @@ id_mode_to_cifs_acl(struct inode *inode,
+ id_from_sid = false;
+
+ /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
+- nsecdesclen = secdesclen;
+ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+- if (mode_from_sid)
+- nsecdesclen += 2 * sizeof(struct smb_ace);
++ if (posix)
++ nsecdesclen = 1 * sizeof(struct smb_ace);
++ else if (mode_from_sid)
++ nsecdesclen = secdesclen + (2 * sizeof(struct smb_ace));
+ else /* cifsacl */
+- nsecdesclen += 5 * sizeof(struct smb_ace);
++ nsecdesclen = secdesclen + (5 * sizeof(struct smb_ace));
+ } else { /* chown */
+ /* When ownership changes, changes new owner sid length could be different */
+ nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2);
+@@ -1657,7 +1667,7 @@ id_mode_to_cifs_acl(struct inode *inode,
+ }
+
+ rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
+- mode_from_sid, id_from_sid, &aclflag);
++ mode_from_sid, id_from_sid, posix, &aclflag);
+
+ cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -244,7 +244,9 @@ extern int cifs_set_acl(struct mnt_idmap
+ extern int set_cifs_acl(struct smb_ntsd *pntsd, __u32 len, struct inode *ino,
+ const char *path, int flag);
+ extern unsigned int setup_authusers_ACE(struct smb_ace *pace);
+-extern unsigned int setup_special_mode_ACE(struct smb_ace *pace, __u64 nmode);
++extern unsigned int setup_special_mode_ACE(struct smb_ace *pace,
++ bool posix,
++ __u64 nmode);
+ extern unsigned int setup_special_user_owner_ACE(struct smb_ace *pace);
+
+ extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -3062,6 +3062,7 @@ cifs_setattr_nounix(struct dentry *diren
+ int rc = -EACCES;
+ __u32 dosattr = 0;
+ __u64 mode = NO_CHANGE_64;
++ bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
+
+ xid = get_xid();
+
+@@ -3152,7 +3153,8 @@ cifs_setattr_nounix(struct dentry *diren
+ mode = attrs->ia_mode;
+ rc = 0;
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
++ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) ||
++ posix) {
+ rc = id_mode_to_cifs_acl(inode, full_path, &mode,
+ INVALID_UID, INVALID_GID);
+ if (rc) {
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2685,7 +2685,7 @@ create_sd_buf(umode_t mode, bool set_own
+ ptr += sizeof(struct smb3_acl);
+
+ /* create one ACE to hold the mode embedded in reserved special SID */
+- acelen = setup_special_mode_ACE((struct smb_ace *)ptr, (__u64)mode);
++ acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
+ ptr += acelen;
+ acl_size = acelen + sizeof(struct smb3_acl);
+ ace_count = 1;
alsa-hda-realtek-fix-internal-speaker-and-mic-boost-of-infinix-y4-max.patch
alsa-hda-realtek-fix-mute-micmute-leds-don-t-work-for-elitebook-x-g1i.patch
alsa-hda-realtek-apply-quirk-for-medion-e15433.patch
+fs-smb-client-implement-chmod-for-smb3-posix-extensions.patch
+smb-client-fix-use-after-free-of-signing-key.patch
+smb3-request-handle-caching-when-caching-directories.patch
+smb-client-handle-max-length-for-smb-symlinks.patch
+smb-don-t-leak-cfid-when-reconnect-races-with-open_cached_dir.patch
+smb-prevent-use-after-free-due-to-open_cached_dir-error-paths.patch
+smb-during-unmount-ensure-all-cached-dir-instances-drop-their-dentry.patch
+usb-misc-ljca-set-small-runtime-autosuspend-delay.patch
+usb-misc-ljca-move-usb_autopm_put_interface-after-wait-for-response.patch
+usb-dwc3-ep0-don-t-clear-ep0-dwc3_ep_transfer_started.patch
+usb-musb-fix-hardware-lockup-on-first-rx-endpoint-request.patch
+usb-dwc3-gadget-add-missing-check-for-single-port-ram-in-txfifo-resizing-logic.patch
+usb-dwc3-gadget-fix-checking-for-number-of-trbs-left.patch
+usb-dwc3-gadget-fix-looping-of-queued-sg-entries.patch
--- /dev/null
+From 343d7fe6df9e247671440a932b6a73af4fa86d95 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 11 Nov 2024 10:40:55 -0300
+Subject: smb: client: fix use-after-free of signing key
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 343d7fe6df9e247671440a932b6a73af4fa86d95 upstream.
+
+Customers have reported use-after-free in @ses->auth_key.response with
+SMB2.1 + sign mounts which occurs due to following race:
+
+task A task B
+cifs_mount()
+ dfs_mount_share()
+ get_session()
+ cifs_mount_get_session() cifs_send_recv()
+ cifs_get_smb_ses() compound_send_recv()
+ cifs_setup_session() smb2_setup_request()
+ kfree_sensitive() smb2_calc_signature()
+ crypto_shash_setkey() *UAF*
+
+Fix this by ensuring that we have a valid @ses->auth_key.response by
+checking whether @ses->ses_status is SES_GOOD or SES_EXITING with
+@ses->ses_lock held. After commit 24a9799aa8ef ("smb: client: fix UAF
+in smb2_reconnect_server()"), we made sure to call ->logoff() only
+when @ses was known to be good (e.g. valid ->auth_key.response), so
+it's safe to access signing key when @ses->ses_status == SES_EXITING.
+
+Cc: stable@vger.kernel.org
+Reported-by: Jay Shin <jaeshin@redhat.com>
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2proto.h | 2 -
+ fs/smb/client/smb2transport.c | 56 ++++++++++++++++++++++++++++++------------
+ 2 files changed, 40 insertions(+), 18 deletions(-)
+
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -37,8 +37,6 @@ extern struct mid_q_entry *smb2_setup_re
+ struct smb_rqst *rqst);
+ extern struct mid_q_entry *smb2_setup_async_request(
+ struct TCP_Server_Info *server, struct smb_rqst *rqst);
+-extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+- __u64 ses_id);
+ extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ __u64 ses_id, __u32 tid);
+ extern int smb2_calc_signature(struct smb_rqst *rqst,
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -74,7 +74,7 @@ err:
+
+
+ static
+-int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
++int smb3_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ {
+ struct cifs_chan *chan;
+ struct TCP_Server_Info *pserver;
+@@ -168,16 +168,41 @@ smb2_find_smb_ses_unlocked(struct TCP_Se
+ return NULL;
+ }
+
+-struct cifs_ses *
+-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
++static int smb2_get_sign_key(struct TCP_Server_Info *server,
++ __u64 ses_id, u8 *key)
+ {
+ struct cifs_ses *ses;
++ int rc = -ENOENT;
++
++ if (SERVER_IS_CHAN(server))
++ server = server->primary_server;
+
+ spin_lock(&cifs_tcp_ses_lock);
+- ses = smb2_find_smb_ses_unlocked(server, ses_id);
+- spin_unlock(&cifs_tcp_ses_lock);
++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (ses->Suid != ses_id)
++ continue;
+
+- return ses;
++ rc = 0;
++ spin_lock(&ses->ses_lock);
++ switch (ses->ses_status) {
++ case SES_EXITING: /* SMB2_LOGOFF */
++ case SES_GOOD:
++ if (likely(ses->auth_key.response)) {
++ memcpy(key, ses->auth_key.response,
++ SMB2_NTLMV2_SESSKEY_SIZE);
++ } else {
++ rc = -EIO;
++ }
++ break;
++ default:
++ rc = -EAGAIN;
++ break;
++ }
++ spin_unlock(&ses->ses_lock);
++ break;
++ }
++ spin_unlock(&cifs_tcp_ses_lock);
++ return rc;
+ }
+
+ static struct cifs_tcon *
+@@ -236,14 +261,16 @@ smb2_calc_signature(struct smb_rqst *rqs
+ unsigned char *sigptr = smb2_signature;
+ struct kvec *iov = rqst->rq_iov;
+ struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
+- struct cifs_ses *ses;
+ struct shash_desc *shash = NULL;
+ struct smb_rqst drqst;
++ __u64 sid = le64_to_cpu(shdr->SessionId);
++ u8 key[SMB2_NTLMV2_SESSKEY_SIZE];
+
+- ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
+- if (unlikely(!ses)) {
+- cifs_server_dbg(FYI, "%s: Could not find session\n", __func__);
+- return -ENOENT;
++ rc = smb2_get_sign_key(server, sid, key);
++ if (unlikely(rc)) {
++ cifs_server_dbg(FYI, "%s: [sesid=0x%llx] couldn't find signing key: %d\n",
++ __func__, sid, rc);
++ return rc;
+ }
+
+ memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
+@@ -260,8 +287,7 @@ smb2_calc_signature(struct smb_rqst *rqs
+ shash = server->secmech.hmacsha256;
+ }
+
+- rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response,
+- SMB2_NTLMV2_SESSKEY_SIZE);
++ rc = crypto_shash_setkey(shash->tfm, key, sizeof(key));
+ if (rc) {
+ cifs_server_dbg(VFS,
+ "%s: Could not update with response\n",
+@@ -303,8 +329,6 @@ smb2_calc_signature(struct smb_rqst *rqs
+ out:
+ if (allocate_crypto)
+ cifs_free_hash(&shash);
+- if (ses)
+- cifs_put_smb_ses(ses);
+ return rc;
+ }
+
+@@ -570,7 +594,7 @@ smb3_calc_signature(struct smb_rqst *rqs
+ struct smb_rqst drqst;
+ u8 key[SMB3_SIGN_KEY_SIZE];
+
+- rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
++ rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
+ if (unlikely(rc)) {
+ cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
+ return rc;
--- /dev/null
+From 0812340811e45ec4039d409049be53056182a552 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@manguebit.com>
+Date: Mon, 18 Nov 2024 12:35:16 -0300
+Subject: smb: client: handle max length for SMB symlinks
+
+From: Paulo Alcantara <pc@manguebit.com>
+
+commit 0812340811e45ec4039d409049be53056182a552 upstream.
+
+We can't use PATH_MAX for SMB symlinks because
+
+ (1) Windows Server will fail FSCTL_SET_REPARSE_POINT with
+ STATUS_IO_REPARSE_DATA_INVALID when input buffer is larger than
+ 16K, as specified in MS-FSA 2.1.5.10.37.
+
+ (2) The client won't be able to parse large SMB responses that
+ includes SMB symlink path within SMB2_CREATE or SMB2_IOCTL
+ responses.
+
+Fix this by defining a maximum length value (4060) for SMB symlinks
+that both client and server can handle.
+
+Cc: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/reparse.c | 5 ++++-
+ fs/smb/client/reparse.h | 2 ++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -35,6 +35,9 @@ int smb2_create_reparse_symlink(const un
+ u16 len, plen;
+ int rc = 0;
+
++ if (strlen(symname) > REPARSE_SYM_PATH_MAX)
++ return -ENAMETOOLONG;
++
+ sym = kstrdup(symname, GFP_KERNEL);
+ if (!sym)
+ return -ENOMEM;
+@@ -64,7 +67,7 @@ int smb2_create_reparse_symlink(const un
+ if (rc < 0)
+ goto out;
+
+- plen = 2 * UniStrnlen((wchar_t *)path, PATH_MAX);
++ plen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
+ len = sizeof(*buf) + plen * 2;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+--- a/fs/smb/client/reparse.h
++++ b/fs/smb/client/reparse.h
+@@ -12,6 +12,8 @@
+ #include "fs_context.h"
+ #include "cifsglob.h"
+
++#define REPARSE_SYM_PATH_MAX 4060
++
+ /*
+ * Used only by cifs.ko to ignore reparse points from files when client or
+ * server doesn't support FSCTL_GET_REPARSE_POINT.
--- /dev/null
+From 7afb86733685c64c604d32faf00fa4a1f22c2ab1 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:26 -0800
+Subject: smb: Don't leak cfid when reconnect races with open_cached_dir
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit 7afb86733685c64c604d32faf00fa4a1f22c2ab1 upstream.
+
+open_cached_dir() may either race with the tcon reconnection even before
+compound_send_recv() or directly trigger a reconnection via
+SMB2_open_init() or SMB_query_info_init().
+
+The reconnection process invokes invalidate_all_cached_dirs() via
+cifs_mark_open_files_invalid(), which removes all cfids from the
+cfids->entries list but doesn't drop a ref if has_lease isn't true. This
+results in the currently-being-constructed cfid not being on the list,
+but still having a refcount of 2. It leaks if returned from
+open_cached_dir().
+
+Fix this by setting cfid->has_lease when the ref is actually taken; the
+cfid will not be used by other threads until it has a valid time.
+
+Addresses these kmemleaks:
+
+unreferenced object 0xffff8881090c4000 (size 1024):
+ comm "bash", pid 1860, jiffies 4295126592
+ hex dump (first 32 bytes):
+ 00 01 00 00 00 00 ad de 22 01 00 00 00 00 ad de ........".......
+ 00 ca 45 22 81 88 ff ff f8 dc 4f 04 81 88 ff ff ..E"......O.....
+ backtrace (crc 6f58c20f):
+ [<ffffffff8b895a1e>] __kmalloc_cache_noprof+0x2be/0x350
+ [<ffffffff8bda06e3>] open_cached_dir+0x993/0x1fb0
+ [<ffffffff8bdaa750>] cifs_readdir+0x15a0/0x1d50
+ [<ffffffff8b9a853f>] iterate_dir+0x28f/0x4b0
+ [<ffffffff8b9a9aed>] __x64_sys_getdents64+0xfd/0x200
+ [<ffffffff8cf6da05>] do_syscall_64+0x95/0x1a0
+ [<ffffffff8d00012f>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+unreferenced object 0xffff8881044fdcf8 (size 8):
+ comm "bash", pid 1860, jiffies 4295126592
+ hex dump (first 8 bytes):
+ 00 cc cc cc cc cc cc cc ........
+ backtrace (crc 10c106a9):
+ [<ffffffff8b89a3d3>] __kmalloc_node_track_caller_noprof+0x363/0x480
+ [<ffffffff8b7d7256>] kstrdup+0x36/0x60
+ [<ffffffff8bda0700>] open_cached_dir+0x9b0/0x1fb0
+ [<ffffffff8bdaa750>] cifs_readdir+0x15a0/0x1d50
+ [<ffffffff8b9a853f>] iterate_dir+0x28f/0x4b0
+ [<ffffffff8b9a9aed>] __x64_sys_getdents64+0xfd/0x200
+ [<ffffffff8cf6da05>] do_syscall_64+0x95/0x1a0
+ [<ffffffff8d00012f>] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+And addresses these BUG splats when unmounting the SMB filesystem:
+
+BUG: Dentry ffff888140590ba0{i=1000000000080,n=/} still in use (2) [unmount of cifs cifs]
+WARNING: CPU: 3 PID: 3433 at fs/dcache.c:1536 umount_check+0xd0/0x100
+Modules linked in:
+CPU: 3 UID: 0 PID: 3433 Comm: bash Not tainted 6.12.0-rc4-g850925a8133c-dirty #49
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:umount_check+0xd0/0x100
+Code: 8d 7c 24 40 e8 31 5a f4 ff 49 8b 54 24 40 41 56 49 89 e9 45 89 e8 48 89 d9 41 57 48 89 de 48 c7 c7 80 e7 db ac e8 f0 72 9a ff <0f> 0b 58 31 c0 5a 5b 5d 41 5c 41 5d 41 5e 41 5f e9 2b e5 5d 01 41
+RSP: 0018:ffff88811cc27978 EFLAGS: 00010286
+RAX: 0000000000000000 RBX: ffff888140590ba0 RCX: ffffffffaaf20bae
+RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffff8881f6fb6f40
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed1023984ee3
+R10: ffff88811cc2771f R11: 00000000016cfcc0 R12: ffff888134383e08
+R13: 0000000000000002 R14: ffff8881462ec668 R15: ffffffffaceab4c0
+FS: 00007f23bfa98740(0000) GS:ffff8881f6f80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000556de4a6f808 CR3: 0000000123c80000 CR4: 0000000000350ef0
+Call Trace:
+ <TASK>
+ d_walk+0x6a/0x530
+ shrink_dcache_for_umount+0x6a/0x200
+ generic_shutdown_super+0x52/0x2a0
+ kill_anon_super+0x22/0x40
+ cifs_kill_sb+0x159/0x1e0
+ deactivate_locked_super+0x66/0xe0
+ cleanup_mnt+0x140/0x210
+ task_work_run+0xfb/0x170
+ syscall_exit_to_user_mode+0x29f/0x2b0
+ do_syscall_64+0xa1/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7f23bfb93ae7
+Code: ff ff ff ff c3 66 0f 1f 44 00 00 48 8b 0d 11 93 0d 00 f7 d8 64 89 01 b8 ff ff ff ff eb bf 0f 1f 44 00 00 b8 50 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e9 92 0d 00 f7 d8 64 89 01 48
+RSP: 002b:00007ffee9138598 EFLAGS: 00000246 ORIG_RAX: 0000000000000050
+RAX: 0000000000000000 RBX: 0000558f1803e9a0 RCX: 00007f23bfb93ae7
+RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000558f1803e9a0
+RBP: 0000558f1803e600 R08: 0000000000000007 R09: 0000558f17fab610
+R10: d91d5ec34ab757b0 R11: 0000000000000246 R12: 0000000000000001
+R13: 0000000000000000 R14: 0000000000000015 R15: 0000000000000000
+ </TASK>
+irq event stamp: 1163486
+hardirqs last enabled at (1163485): [<ffffffffac98d344>] _raw_spin_unlock_irqrestore+0x34/0x60
+hardirqs last disabled at (1163486): [<ffffffffac97dcfc>] __schedule+0xc7c/0x19a0
+softirqs last enabled at (1163482): [<ffffffffab79a3ee>] __smb_send_rqst+0x3de/0x990
+softirqs last disabled at (1163480): [<ffffffffac2314f1>] release_sock+0x21/0xf0
+---[ end trace 0000000000000000 ]---
+
+VFS: Busy inodes after unmount of cifs (cifs)
+------------[ cut here ]------------
+kernel BUG at fs/super.c:661!
+Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI
+CPU: 1 UID: 0 PID: 3433 Comm: bash Tainted: G W 6.12.0-rc4-g850925a8133c-dirty #49
+Tainted: [W]=WARN
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:generic_shutdown_super+0x290/0x2a0
+Code: e8 15 7c f7 ff 48 8b 5d 28 48 89 df e8 09 7c f7 ff 48 8b 0b 48 89 ee 48 8d 95 68 06 00 00 48 c7 c7 80 7f db ac e8 00 69 af ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 90 90 90 90 90 90
+RSP: 0018:ffff88811cc27a50 EFLAGS: 00010246
+RAX: 000000000000003e RBX: ffffffffae994420 RCX: 0000000000000027
+RDX: 0000000000000000 RSI: ffffffffab06180e RDI: ffff8881f6eb18c8
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed103edd6319
+R10: ffff8881f6eb18cb R11: 00000000016d3158 R12: ffff8881462ec9c0
+R13: ffff8881462ec050 R14: 0000000000000001 R15: 0000000000000000
+FS: 00007f23bfa98740(0000) GS:ffff8881f6e80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8364005d68 CR3: 0000000123c80000 CR4: 0000000000350ef0
+Call Trace:
+ <TASK>
+ kill_anon_super+0x22/0x40
+ cifs_kill_sb+0x159/0x1e0
+ deactivate_locked_super+0x66/0xe0
+ cleanup_mnt+0x140/0x210
+ task_work_run+0xfb/0x170
+ syscall_exit_to_user_mode+0x29f/0x2b0
+ do_syscall_64+0xa1/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7f23bfb93ae7
+ </TASK>
+Modules linked in:
+---[ end trace 0000000000000000 ]---
+RIP: 0010:generic_shutdown_super+0x290/0x2a0
+Code: e8 15 7c f7 ff 48 8b 5d 28 48 89 df e8 09 7c f7 ff 48 8b 0b 48 89 ee 48 8d 95 68 06 00 00 48 c7 c7 80 7f db ac e8 00 69 af ff <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 90 90 90 90 90 90
+RSP: 0018:ffff88811cc27a50 EFLAGS: 00010246
+RAX: 000000000000003e RBX: ffffffffae994420 RCX: 0000000000000027
+RDX: 0000000000000000 RSI: ffffffffab06180e RDI: ffff8881f6eb18c8
+RBP: ffff8881462ec000 R08: 0000000000000001 R09: ffffed103edd6319
+R10: ffff8881f6eb18cb R11: 00000000016d3158 R12: ffff8881462ec9c0
+R13: ffff8881462ec050 R14: 0000000000000001 R15: 0000000000000000
+FS: 00007f23bfa98740(0000) GS:ffff8881f6e80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8364005d68 CR3: 0000000123c80000 CR4: 0000000000350ef0
+
+This reproduces eventually with an SMB mount and two shells running
+these loops concurrently
+
+- while true; do
+ cd ~; sleep 1;
+ for i in {1..3}; do cd /mnt/test/subdir;
+ echo $PWD; sleep 1; cd ..; echo $PWD; sleep 1;
+ done;
+ echo ...;
+ done
+- while true; do
+ iptables -F OUTPUT; mount -t cifs -a;
+ for _ in {0..2}; do ls /mnt/test/subdir/ | wc -l; done;
+ iptables -I OUTPUT -p tcp --dport 445 -j DROP;
+ sleep 10
+ echo "unmounting"; umount -l -t cifs -a; echo "done unmounting";
+ sleep 20
+ echo "recovering"; iptables -F OUTPUT;
+ sleep 10;
+ done
+
+Fixes: ebe98f1447bb ("cifs: enable caching of directories for which a lease is held")
+Fixes: 5c86919455c1 ("smb: client: fix use-after-free in smb2_query_info_compound()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -59,6 +59,16 @@ static struct cached_fid *find_or_create
+ list_add(&cfid->entry, &cfids->entries);
+ cfid->on_list = true;
+ kref_get(&cfid->refcount);
++ /*
++ * Set @cfid->has_lease to true during construction so that the lease
++ * reference can be put in cached_dir_lease_break() due to a potential
++ * lease break right after the request is sent or while @cfid is still
++ * being cached, or if a reconnection is triggered during construction.
++ * Concurrent processes won't be to use it yet due to @cfid->time being
++ * zero.
++ */
++ cfid->has_lease = true;
++
+ spin_unlock(&cfids->cfid_list_lock);
+ return cfid;
+ }
+@@ -176,12 +186,12 @@ replay_again:
+ return -ENOENT;
+ }
+ /*
+- * Return cached fid if it has a lease. Otherwise, it is either a new
+- * entry or laundromat worker removed it from @cfids->entries. Caller
+- * will put last reference if the latter.
++ * Return cached fid if it is valid (has a lease and has a time).
++ * Otherwise, it is either a new entry or laundromat worker removed it
++ * from @cfids->entries. Caller will put last reference if the latter.
+ */
+ spin_lock(&cfids->cfid_list_lock);
+- if (cfid->has_lease) {
++ if (cfid->has_lease && cfid->time) {
+ spin_unlock(&cfids->cfid_list_lock);
+ *ret_cfid = cfid;
+ kfree(utf16_path);
+@@ -267,15 +277,6 @@ replay_again:
+
+ smb2_set_related(&rqst[1]);
+
+- /*
+- * Set @cfid->has_lease to true before sending out compounded request so
+- * its lease reference can be put in cached_dir_lease_break() due to a
+- * potential lease break right after the request is sent or while @cfid
+- * is still being cached. Concurrent processes won't be to use it yet
+- * due to @cfid->time being zero.
+- */
+- cfid->has_lease = true;
+-
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
--- /dev/null
+From 3fa640d035e5ae526769615c35cb9ed4be6e3662 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:28 -0800
+Subject: smb: During unmount, ensure all cached dir instances drop their dentry
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit 3fa640d035e5ae526769615c35cb9ed4be6e3662 upstream.
+
+The unmount process (cifs_kill_sb() calling close_all_cached_dirs()) can
+race with various cached directory operations, which ultimately results
+in dentries not being dropped and these kernel BUGs:
+
+BUG: Dentry ffff88814f37e358{i=1000000000080,n=/} still in use (2) [unmount of cifs cifs]
+VFS: Busy inodes after unmount of cifs (cifs)
+------------[ cut here ]------------
+kernel BUG at fs/super.c:661!
+
+This happens when a cfid is in the process of being cleaned up when, and
+has been removed from the cfids->entries list, including:
+
+- Receiving a lease break from the server
+- Server reconnection triggers invalidate_all_cached_dirs(), which
+ removes all the cfids from the list
+- The laundromat thread decides to expire an old cfid.
+
+To solve these problems, dropping the dentry is done in queued work done
+in a newly-added cfid_put_wq workqueue, and close_all_cached_dirs()
+flushes that workqueue after it drops all the dentries of which it's
+aware. This is a global workqueue (rather than scoped to a mount), but
+the queued work is minimal.
+
+The final cleanup work for cleaning up a cfid is performed via work
+queued in the serverclose_wq workqueue; this is done separate from
+dropping the dentries so that close_all_cached_dirs() doesn't block on
+any server operations.
+
+Both of these queued works expect to invoked with a cfid reference and
+a tcon reference to avoid those objects from being freed while the work
+is ongoing.
+
+While we're here, add proper locking to close_all_cached_dirs(), and
+locking around the freeing of cfid->dentry.
+
+Fixes: ebe98f1447bb ("cifs: enable caching of directories for which a lease is held")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 156 ++++++++++++++++++++++++++++++++++++---------
+ fs/smb/client/cached_dir.h | 6 +
+ fs/smb/client/cifsfs.c | 12 +++
+ fs/smb/client/cifsglob.h | 3
+ fs/smb/client/inode.c | 3
+ fs/smb/client/trace.h | 3
+ 6 files changed, 147 insertions(+), 36 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -17,6 +17,11 @@ static void free_cached_dir(struct cache
+ static void smb2_close_cached_fid(struct kref *ref);
+ static void cfids_laundromat_worker(struct work_struct *work);
+
++struct cached_dir_dentry {
++ struct list_head entry;
++ struct dentry *dentry;
++};
++
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ const char *path,
+ bool lookup_only,
+@@ -470,7 +475,10 @@ void close_all_cached_dirs(struct cifs_s
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+ struct cached_fids *cfids;
++ struct cached_dir_dentry *tmp_list, *q;
++ LIST_HEAD(entry);
+
++ spin_lock(&cifs_sb->tlink_tree_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+@@ -479,11 +487,30 @@ void close_all_cached_dirs(struct cifs_s
+ cfids = tcon->cfids;
+ if (cfids == NULL)
+ continue;
++ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+- dput(cfid->dentry);
++ tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
++ if (tmp_list == NULL)
++ break;
++ spin_lock(&cfid->fid_lock);
++ tmp_list->dentry = cfid->dentry;
+ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ list_add_tail(&tmp_list->entry, &entry);
+ }
++ spin_unlock(&cfids->cfid_list_lock);
++ }
++ spin_unlock(&cifs_sb->tlink_tree_lock);
++
++ list_for_each_entry_safe(tmp_list, q, &entry, entry) {
++ list_del(&tmp_list->entry);
++ dput(tmp_list->dentry);
++ kfree(tmp_list);
+ }
++
++ /* Flush any pending work that will drop dentries */
++ flush_workqueue(cfid_put_wq);
+ }
+
+ /*
+@@ -494,14 +521,18 @@ void invalidate_all_cached_dirs(struct c
+ {
+ struct cached_fids *cfids = tcon->cfids;
+ struct cached_fid *cfid, *q;
+- LIST_HEAD(entry);
+
+ if (cfids == NULL)
+ return;
+
++ /*
++ * Mark all the cfids as closed, and move them to the cfids->dying list.
++ * They'll be cleaned up later by cfids_invalidation_worker. Take
++ * a reference to each cfid during this process.
++ */
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+- list_move(&cfid->entry, &entry);
++ list_move(&cfid->entry, &cfids->dying);
+ cfids->num_entries--;
+ cfid->is_open = false;
+ cfid->on_list = false;
+@@ -514,26 +545,47 @@ void invalidate_all_cached_dirs(struct c
+ } else
+ kref_get(&cfid->refcount);
+ }
++ /*
++ * Queue dropping of the dentries once locks have been dropped
++ */
++ if (!list_empty(&cfids->dying))
++ queue_work(cfid_put_wq, &cfids->invalidation_work);
+ spin_unlock(&cfids->cfid_list_lock);
+-
+- list_for_each_entry_safe(cfid, q, &entry, entry) {
+- list_del(&cfid->entry);
+- cancel_work_sync(&cfid->lease_break);
+- /*
+- * Drop the ref-count from above, either the lease-ref (if there
+- * was one) or the extra one acquired.
+- */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+ }
+
+ static void
+-smb2_cached_lease_break(struct work_struct *work)
++cached_dir_offload_close(struct work_struct *work)
+ {
+ struct cached_fid *cfid = container_of(work,
+- struct cached_fid, lease_break);
++ struct cached_fid, close_work);
++ struct cifs_tcon *tcon = cfid->tcon;
++
++ WARN_ON(cfid->on_list);
+
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
++ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
++}
++
++/*
++ * Release the cached directory's dentry, and then queue work to drop cached
++ * directory itself (closing on server if needed).
++ *
++ * Must be called with a reference to the cached_fid and a reference to the
++ * tcon.
++ */
++static void cached_dir_put_work(struct work_struct *work)
++{
++ struct cached_fid *cfid = container_of(work, struct cached_fid,
++ put_work);
++ struct dentry *dentry;
++
++ spin_lock(&cfid->fid_lock);
++ dentry = cfid->dentry;
++ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ dput(dentry);
++ queue_work(serverclose_wq, &cfid->close_work);
+ }
+
+ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+@@ -560,8 +612,10 @@ int cached_dir_lease_break(struct cifs_t
+ cfid->on_list = false;
+ cfids->num_entries--;
+
+- queue_work(cifsiod_wq,
+- &cfid->lease_break);
++ ++tcon->tc_count;
++ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
++ netfs_trace_tcon_ref_get_cached_lease_break);
++ queue_work(cfid_put_wq, &cfid->put_work);
+ spin_unlock(&cfids->cfid_list_lock);
+ return true;
+ }
+@@ -583,7 +637,8 @@ static struct cached_fid *init_cached_di
+ return NULL;
+ }
+
+- INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
++ INIT_WORK(&cfid->close_work, cached_dir_offload_close);
++ INIT_WORK(&cfid->put_work, cached_dir_put_work);
+ INIT_LIST_HEAD(&cfid->entry);
+ INIT_LIST_HEAD(&cfid->dirents.entries);
+ mutex_init(&cfid->dirents.de_mutex);
+@@ -596,6 +651,9 @@ static void free_cached_dir(struct cache
+ {
+ struct cached_dirent *dirent, *q;
+
++ WARN_ON(work_pending(&cfid->close_work));
++ WARN_ON(work_pending(&cfid->put_work));
++
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+
+@@ -613,10 +671,30 @@ static void free_cached_dir(struct cache
+ kfree(cfid);
+ }
+
++static void cfids_invalidation_worker(struct work_struct *work)
++{
++ struct cached_fids *cfids = container_of(work, struct cached_fids,
++ invalidation_work);
++ struct cached_fid *cfid, *q;
++ LIST_HEAD(entry);
++
++ spin_lock(&cfids->cfid_list_lock);
++ /* move cfids->dying to the local list */
++ list_cut_before(&entry, &cfids->dying, &cfids->dying);
++ spin_unlock(&cfids->cfid_list_lock);
++
++ list_for_each_entry_safe(cfid, q, &entry, entry) {
++ list_del(&cfid->entry);
++ /* Drop the ref-count acquired in invalidate_all_cached_dirs */
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
++ }
++}
++
+ static void cfids_laundromat_worker(struct work_struct *work)
+ {
+ struct cached_fids *cfids;
+ struct cached_fid *cfid, *q;
++ struct dentry *dentry;
+ LIST_HEAD(entry);
+
+ cfids = container_of(work, struct cached_fids, laundromat_work.work);
+@@ -642,18 +720,28 @@ static void cfids_laundromat_worker(stru
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+- /*
+- * Cancel and wait for the work to finish in case we are racing
+- * with it.
+- */
+- cancel_work_sync(&cfid->lease_break);
+- /*
+- * Drop the ref-count from above, either the lease-ref (if there
+- * was one) or the extra one acquired.
+- */
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
++
++ spin_lock(&cfid->fid_lock);
++ dentry = cfid->dentry;
++ cfid->dentry = NULL;
++ spin_unlock(&cfid->fid_lock);
++
++ dput(dentry);
++ if (cfid->is_open) {
++ spin_lock(&cifs_tcp_ses_lock);
++ ++cfid->tcon->tc_count;
++ trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
++ netfs_trace_tcon_ref_get_cached_laundromat);
++ spin_unlock(&cifs_tcp_ses_lock);
++ queue_work(serverclose_wq, &cfid->close_work);
++ } else
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+- queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+ }
+
+@@ -666,9 +754,11 @@ struct cached_fids *init_cached_dirs(voi
+ return NULL;
+ spin_lock_init(&cfids->cfid_list_lock);
+ INIT_LIST_HEAD(&cfids->entries);
++ INIT_LIST_HEAD(&cfids->dying);
+
++ INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
+ INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+- queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
++ queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
+ dir_cache_timeout * HZ);
+
+ return cfids;
+@@ -687,12 +777,18 @@ void free_cached_dirs(struct cached_fids
+ return;
+
+ cancel_delayed_work_sync(&cfids->laundromat_work);
++ cancel_work_sync(&cfids->invalidation_work);
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ cfid->on_list = false;
+ cfid->is_open = false;
+ list_move(&cfid->entry, &entry);
++ }
++ list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
++ cfid->on_list = false;
++ cfid->is_open = false;
++ list_move(&cfid->entry, &entry);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+--- a/fs/smb/client/cached_dir.h
++++ b/fs/smb/client/cached_dir.h
+@@ -44,7 +44,8 @@ struct cached_fid {
+ spinlock_t fid_lock;
+ struct cifs_tcon *tcon;
+ struct dentry *dentry;
+- struct work_struct lease_break;
++ struct work_struct put_work;
++ struct work_struct close_work;
+ struct smb2_file_all_info file_all_info;
+ struct cached_dirents dirents;
+ };
+@@ -53,10 +54,13 @@ struct cached_fid {
+ struct cached_fids {
+ /* Must be held when:
+ * - accessing the cfids->entries list
++ * - accessing the cfids->dying list
+ */
+ spinlock_t cfid_list_lock;
+ int num_entries;
+ struct list_head entries;
++ struct list_head dying;
++ struct work_struct invalidation_work;
+ struct delayed_work laundromat_work;
+ };
+
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -157,6 +157,7 @@ struct workqueue_struct *fileinfo_put_wq
+ struct workqueue_struct *cifsoplockd_wq;
+ struct workqueue_struct *deferredclose_wq;
+ struct workqueue_struct *serverclose_wq;
++struct workqueue_struct *cfid_put_wq;
+ __u32 cifs_lock_secret;
+
+ /*
+@@ -1895,9 +1896,16 @@ init_cifs(void)
+ goto out_destroy_deferredclose_wq;
+ }
+
++ cfid_put_wq = alloc_workqueue("cfid_put_wq",
++ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
++ if (!cfid_put_wq) {
++ rc = -ENOMEM;
++ goto out_destroy_serverclose_wq;
++ }
++
+ rc = cifs_init_inodecache();
+ if (rc)
+- goto out_destroy_serverclose_wq;
++ goto out_destroy_cfid_put_wq;
+
+ rc = cifs_init_netfs();
+ if (rc)
+@@ -1965,6 +1973,8 @@ out_destroy_netfs:
+ cifs_destroy_netfs();
+ out_destroy_inodecache:
+ cifs_destroy_inodecache();
++out_destroy_cfid_put_wq:
++ destroy_workqueue(cfid_put_wq);
+ out_destroy_serverclose_wq:
+ destroy_workqueue(serverclose_wq);
+ out_destroy_deferredclose_wq:
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -1983,7 +1983,7 @@ require use of the stronger protocol */
+ * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
+ * ->can_cache_brlcks
+ * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
+- * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc
++ * cached_fids->cfid_list_lock cifs_tcon->cfids->entries init_cached_dirs
+ * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
+ * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
+ * ->invalidHandle initiate_cifs_search
+@@ -2071,6 +2071,7 @@ extern struct workqueue_struct *fileinfo
+ extern struct workqueue_struct *cifsoplockd_wq;
+ extern struct workqueue_struct *deferredclose_wq;
+ extern struct workqueue_struct *serverclose_wq;
++extern struct workqueue_struct *cfid_put_wq;
+ extern __u32 cifs_lock_secret;
+
+ extern mempool_t *cifs_sm_req_poolp;
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -2473,13 +2473,10 @@ cifs_dentry_needs_reval(struct dentry *d
+ return true;
+
+ if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
+- spin_lock(&cfid->fid_lock);
+ if (cfid->time && cifs_i->time > cfid->time) {
+- spin_unlock(&cfid->fid_lock);
+ close_cached_dir(cfid);
+ return false;
+ }
+- spin_unlock(&cfid->fid_lock);
+ close_cached_dir(cfid);
+ }
+ /*
+--- a/fs/smb/client/trace.h
++++ b/fs/smb/client/trace.h
+@@ -44,6 +44,8 @@
+ EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \
+ EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \
+ EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \
++ EM(netfs_trace_tcon_ref_get_cached_laundromat, "GET Ch-Lau") \
++ EM(netfs_trace_tcon_ref_get_cached_lease_break, "GET Ch-Lea") \
+ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
+ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
+@@ -52,6 +54,7 @@
+ EM(netfs_trace_tcon_ref_new, "NEW ") \
+ EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \
+ EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \
++ EM(netfs_trace_tcon_ref_put_cached_close, "PUT Ch-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
--- /dev/null
+From a9685b409a03b73d2980bbfa53eb47555802d0a9 Mon Sep 17 00:00:00 2001
+From: Paul Aurich <paul@darkrain42.org>
+Date: Mon, 18 Nov 2024 13:50:27 -0800
+Subject: smb: prevent use-after-free due to open_cached_dir error paths
+
+From: Paul Aurich <paul@darkrain42.org>
+
+commit a9685b409a03b73d2980bbfa53eb47555802d0a9 upstream.
+
+If open_cached_dir() encounters an error parsing the lease from the
+server, the error handling may race with receiving a lease break,
+resulting in open_cached_dir() freeing the cfid while the queued work is
+pending.
+
+Update open_cached_dir() to drop refs rather than directly freeing the
+cfid.
+
+Have cached_dir_lease_break(), cfids_laundromat_worker(), and
+invalidate_all_cached_dirs() clear has_lease immediately while still
+holding cfids->cfid_list_lock, and then use this to also simplify the
+reference counting in cfids_laundromat_worker() and
+invalidate_all_cached_dirs().
+
+Fixes this KASAN splat (which manually injects an error and lease break
+in open_cached_dir()):
+
+==================================================================
+BUG: KASAN: slab-use-after-free in smb2_cached_lease_break+0x27/0xb0
+Read of size 8 at addr ffff88811cc24c10 by task kworker/3:1/65
+
+CPU: 3 UID: 0 PID: 65 Comm: kworker/3:1 Not tainted 6.12.0-rc6-g255cf264e6e5-dirty #87
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020
+Workqueue: cifsiod smb2_cached_lease_break
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x77/0xb0
+ print_report+0xce/0x660
+ kasan_report+0xd3/0x110
+ smb2_cached_lease_break+0x27/0xb0
+ process_one_work+0x50a/0xc50
+ worker_thread+0x2ba/0x530
+ kthread+0x17c/0x1c0
+ ret_from_fork+0x34/0x60
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+Allocated by task 2464:
+ kasan_save_stack+0x33/0x60
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0xaa/0xb0
+ open_cached_dir+0xa7d/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Freed by task 2464:
+ kasan_save_stack+0x33/0x60
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3b/0x60
+ __kasan_slab_free+0x51/0x70
+ kfree+0x174/0x520
+ open_cached_dir+0x97f/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Last potentially related work creation:
+ kasan_save_stack+0x33/0x60
+ __kasan_record_aux_stack+0xad/0xc0
+ insert_work+0x32/0x100
+ __queue_work+0x5c9/0x870
+ queue_work_on+0x82/0x90
+ open_cached_dir+0x1369/0x1fb0
+ smb2_query_path_info+0x43c/0x6e0
+ cifs_get_fattr+0x346/0xf10
+ cifs_get_inode_info+0x157/0x210
+ cifs_revalidate_dentry_attr+0x2d1/0x460
+ cifs_getattr+0x173/0x470
+ vfs_statx_path+0x10f/0x160
+ vfs_statx+0xe9/0x150
+ vfs_fstatat+0x5e/0xc0
+ __do_sys_newfstatat+0x91/0xf0
+ do_syscall_64+0x95/0x1a0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The buggy address belongs to the object at ffff88811cc24c00
+ which belongs to the cache kmalloc-1k of size 1024
+The buggy address is located 16 bytes inside of
+ freed 1024-byte region [ffff88811cc24c00, ffff88811cc25000)
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Aurich <paul@darkrain42.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cached_dir.c | 70 ++++++++++++++++++---------------------------
+ 1 file changed, 29 insertions(+), 41 deletions(-)
+
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -348,6 +348,7 @@ oshr_free:
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
++out:
+ if (rc) {
+ spin_lock(&cfids->cfid_list_lock);
+ if (cfid->on_list) {
+@@ -359,23 +360,14 @@ oshr_free:
+ /*
+ * We are guaranteed to have two references at this
+ * point. One for the caller and one for a potential
+- * lease. Release the Lease-ref so that the directory
+- * will be closed when the caller closes the cached
+- * handle.
++ * lease. Release one here, and the second below.
+ */
+ cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+- goto out;
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+- }
+-out:
+- if (rc) {
+- if (cfid->is_open)
+- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+- cfid->fid.volatile_fid);
+- free_cached_dir(cfid);
++
++ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ } else {
+ *ret_cfid = cfid;
+ atomic_inc(&tcon->num_remote_opens);
+@@ -513,25 +505,24 @@ void invalidate_all_cached_dirs(struct c
+ cfids->num_entries--;
+ cfid->is_open = false;
+ cfid->on_list = false;
+- /* To prevent race with smb2_cached_lease_break() */
+- kref_get(&cfid->refcount);
++ if (cfid->has_lease) {
++ /*
++ * The lease was never cancelled from the server,
++ * so steal that reference.
++ */
++ cfid->has_lease = false;
++ } else
++ kref_get(&cfid->refcount);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ cancel_work_sync(&cfid->lease_break);
+- if (cfid->has_lease) {
+- /*
+- * We lease was never cancelled from the server so we
+- * need to drop the reference.
+- */
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+- /* Drop the extra reference opened above*/
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ }
+@@ -542,9 +533,6 @@ smb2_cached_lease_break(struct work_stru
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+
+- spin_lock(&cfid->cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfid->cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+
+@@ -562,6 +550,7 @@ int cached_dir_lease_break(struct cifs_t
+ !memcmp(lease_key,
+ cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
++ cfid->has_lease = false;
+ cfid->time = 0;
+ /*
+ * We found a lease remove it from the list
+@@ -639,8 +628,14 @@ static void cfids_laundromat_worker(stru
+ cfid->on_list = false;
+ list_move(&cfid->entry, &entry);
+ cfids->num_entries--;
+- /* To prevent race with smb2_cached_lease_break() */
+- kref_get(&cfid->refcount);
++ if (cfid->has_lease) {
++ /*
++ * Our lease has not yet been cancelled from the
++ * server. Steal that reference.
++ */
++ cfid->has_lease = false;
++ } else
++ kref_get(&cfid->refcount);
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+@@ -652,17 +647,10 @@ static void cfids_laundromat_worker(stru
+ * with it.
+ */
+ cancel_work_sync(&cfid->lease_break);
+- if (cfid->has_lease) {
+- /*
+- * Our lease has not yet been cancelled from the server
+- * so we need to drop the reference.
+- */
+- spin_lock(&cfids->cfid_list_lock);
+- cfid->has_lease = false;
+- spin_unlock(&cfids->cfid_list_lock);
+- kref_put(&cfid->refcount, smb2_close_cached_fid);
+- }
+- /* Drop the extra reference opened above */
++ /*
++ * Drop the ref-count from above, either the lease-ref (if there
++ * was one) or the extra one acquired.
++ */
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
--- /dev/null
+From 9ed9d83a51a9636d367c796252409e7b2f4de4d4 Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Mon, 18 Nov 2024 12:19:46 -0600
+Subject: smb3: request handle caching when caching directories
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 9ed9d83a51a9636d367c796252409e7b2f4de4d4 upstream.
+
+This client was only requesting READ caching, not READ and HANDLE caching
+in the LeaseState on the open requests we send for directories. To
+delay closing a handle (e.g. for caching directory contents) we should
+be requesting HANDLE as well as READ (as we already do for deferred
+close of files). See MS-SMB2 3.3.1.4 e.g.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/smb2ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4080,7 +4080,7 @@ map_oplock_to_lease(u8 oplock)
+ if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+ return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
+ else if (oplock == SMB2_OPLOCK_LEVEL_II)
+- return SMB2_LEASE_READ_CACHING_LE;
++ return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE;
+ else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
+ return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
+ SMB2_LEASE_WRITE_CACHING_LE;
--- /dev/null
+From 5d2fb074dea289c41f5aaf2c3f68286bee370634 Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 14 Nov 2024 01:02:06 +0000
+Subject: usb: dwc3: ep0: Don't clear ep0 DWC3_EP_TRANSFER_STARTED
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 5d2fb074dea289c41f5aaf2c3f68286bee370634 upstream.
+
+The driver cannot issue the End Transfer command to the SETUP transfer.
+Don't clear DWC3_EP_TRANSFER_STARTED flag to make sure that the driver
+won't send Start Transfer command again, which can cause no-resource
+error. For example this can occur if the host issues a reset to the
+device.
+
+Cc: stable@vger.kernel.org
+Fixes: 76cb323f80ac ("usb: dwc3: ep0: clear all EP0 flags")
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/d3d618185fd614bb7426352a9fc1199641d3b5f5.1731545781.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/ep0.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -232,7 +232,7 @@ void dwc3_ep0_stall_and_restart(struct d
+ /* stall is always issued on EP0 */
+ dep = dwc->eps[0];
+ __dwc3_gadget_ep_set_halt(dep, 1, false);
+- dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
++ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED | DWC3_EP_TRANSFER_STARTED;
+ dep->flags |= DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
+
--- /dev/null
+From 61eb055cd3048ee01ca43d1be924167d33e16fdc Mon Sep 17 00:00:00 2001
+From: Selvarasu Ganesan <selvarasu.g@samsung.com>
+Date: Tue, 12 Nov 2024 10:18:02 +0530
+Subject: usb: dwc3: gadget: Add missing check for single port RAM in TxFIFO resizing logic
+
+From: Selvarasu Ganesan <selvarasu.g@samsung.com>
+
+commit 61eb055cd3048ee01ca43d1be924167d33e16fdc upstream.
+
+The existing implementation of the TxFIFO resizing logic only supports
+scenarios where more than one port RAM is used. However, there is a need
+to resize the TxFIFO in USB2.0-only mode where only a single port RAM is
+available. This commit introduces the necessary changes to support
+TxFIFO resizing in such scenarios by adding a missing check for single
+port RAM.
+
+This fix addresses certain platform configurations where the existing
+TxFIFO resizing logic does not work properly due to the absence of
+support for single port RAM. By adding this missing check, we ensure
+that the TxFIFO resizing logic works correctly in all scenarios,
+including those with a single port RAM.
+
+Fixes: 9f607a309fbe ("usb: dwc3: Resize TX FIFOs to meet EP bursting requirements")
+Cc: stable@vger.kernel.org # 6.12.x: fad16c82: usb: dwc3: gadget: Refine the logic for resizing Tx FIFOs
+Signed-off-by: Selvarasu Ganesan <selvarasu.g@samsung.com>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/20241112044807.623-1-selvarasu.g@samsung.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/core.h | 4 +++
+ drivers/usb/dwc3/gadget.c | 54 +++++++++++++++++++++++++++++++++++++++-------
+ 2 files changed, 50 insertions(+), 8 deletions(-)
+
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -915,6 +915,7 @@ struct dwc3_hwparams {
+ #define DWC3_MODE(n) ((n) & 0x7)
+
+ /* HWPARAMS1 */
++#define DWC3_SPRAM_TYPE(n) (((n) >> 23) & 1)
+ #define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+ /* HWPARAMS3 */
+@@ -925,6 +926,9 @@ struct dwc3_hwparams {
+ #define DWC3_NUM_IN_EPS(p) (((p)->hwparams3 & \
+ (DWC3_NUM_IN_EPS_MASK)) >> 18)
+
++/* HWPARAMS6 */
++#define DWC3_RAM0_DEPTH(n) (((n) & (0xffff0000)) >> 16)
++
+ /* HWPARAMS7 */
+ #define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -688,6 +688,44 @@ static int dwc3_gadget_calc_tx_fifo_size
+ }
+
+ /**
++ * dwc3_gadget_calc_ram_depth - calculates the ram depth for txfifo
++ * @dwc: pointer to the DWC3 context
++ */
++static int dwc3_gadget_calc_ram_depth(struct dwc3 *dwc)
++{
++ int ram_depth;
++ int fifo_0_start;
++ bool is_single_port_ram;
++
++ /* Check supporting RAM type by HW */
++ is_single_port_ram = DWC3_SPRAM_TYPE(dwc->hwparams.hwparams1);
++
++ /*
++ * If a single port RAM is utilized, then allocate TxFIFOs from
++ * RAM0. otherwise, allocate them from RAM1.
++ */
++ ram_depth = is_single_port_ram ? DWC3_RAM0_DEPTH(dwc->hwparams.hwparams6) :
++ DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++
++ /*
++ * In a single port RAM configuration, the available RAM is shared
++ * between the RX and TX FIFOs. This means that the txfifo can begin
++ * at a non-zero address.
++ */
++ if (is_single_port_ram) {
++ u32 reg;
++
++ /* Check if TXFIFOs start at non-zero addr */
++ reg = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
++ fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(reg);
++
++ ram_depth -= (fifo_0_start >> 16);
++ }
++
++ return ram_depth;
++}
++
++/**
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
+ * @dwc: pointer to the DWC3 context
+ *
+@@ -753,7 +791,7 @@ static int dwc3_gadget_resize_tx_fifos(s
+ {
+ struct dwc3 *dwc = dep->dwc;
+ int fifo_0_start;
+- int ram1_depth;
++ int ram_depth;
+ int fifo_size;
+ int min_depth;
+ int num_in_ep;
+@@ -773,7 +811,7 @@ static int dwc3_gadget_resize_tx_fifos(s
+ if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
+ return 0;
+
+- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
+
+ if ((dep->endpoint.maxburst > 1 &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
+@@ -794,7 +832,7 @@ static int dwc3_gadget_resize_tx_fifos(s
+
+ /* Reserve at least one FIFO for the number of IN EPs */
+ min_depth = num_in_ep * (fifo + 1);
+- remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
++ remaining = ram_depth - min_depth - dwc->last_fifo_depth;
+ remaining = max_t(int, 0, remaining);
+ /*
+ * We've already reserved 1 FIFO per EP, so check what we can fit in
+@@ -820,9 +858,9 @@ static int dwc3_gadget_resize_tx_fifos(s
+ dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
+
+ /* Check fifo size allocation doesn't exceed available RAM size. */
+- if (dwc->last_fifo_depth >= ram1_depth) {
++ if (dwc->last_fifo_depth >= ram_depth) {
+ dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+- dwc->last_fifo_depth, ram1_depth,
++ dwc->last_fifo_depth, ram_depth,
+ dep->endpoint.name, fifo_size);
+ if (DWC3_IP_IS(DWC3))
+ fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
+@@ -3075,7 +3113,7 @@ static int dwc3_gadget_check_config(stru
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ struct usb_ep *ep;
+ int fifo_size = 0;
+- int ram1_depth;
++ int ram_depth;
+ int ep_num = 0;
+
+ if (!dwc->do_fifo_resize)
+@@ -3098,8 +3136,8 @@ static int dwc3_gadget_check_config(stru
+ fifo_size += dwc->max_cfg_eps;
+
+ /* Check if we can fit a single fifo per endpoint */
+- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+- if (fifo_size > ram1_depth)
++ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
++ if (fifo_size > ram_depth)
+ return -ENOMEM;
+
+ return 0;
--- /dev/null
+From 02a6982b0ccfcdc39e20016f5fc9a1b7826a6ee7 Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 14 Nov 2024 01:02:12 +0000
+Subject: usb: dwc3: gadget: Fix checking for number of TRBs left
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit 02a6982b0ccfcdc39e20016f5fc9a1b7826a6ee7 upstream.
+
+The check whether the TRB ring is full or empty in dwc3_calc_trbs_left()
+is insufficient. It assumes there are active TRBs if there's any request
+in the started_list. However, that's not the case for requests with a
+large SG list.
+
+That is, if we have a single usb request that requires more TRBs than
+the total TRBs in the TRB ring, the queued TRBs will be available when
+all the TRBs in the ring are completed. But the request is only
+partially completed and remains in the started_list. With the current
+logic, the TRB ring is empty, but dwc3_calc_trbs_left() returns 0.
+
+Fix this by additionally checking for the request->num_trbs for active
+TRB count.
+
+Cc: stable@vger.kernel.org
+Fixes: 51f1954ad853 ("usb: dwc3: gadget: Fix dwc3_calc_trbs_left()")
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/708dc62b56b77da1f704cc2ae9b6ddb1f2dbef1f.1731545781.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1215,11 +1215,14 @@ static u32 dwc3_calc_trbs_left(struct dw
+ * pending to be processed by the driver.
+ */
+ if (dep->trb_enqueue == dep->trb_dequeue) {
++ struct dwc3_request *req;
++
+ /*
+- * If there is any request remained in the started_list at
+- * this point, that means there is no TRB available.
++ * If there is any request remained in the started_list with
++ * active TRBs at this point, then there is no TRB available.
+ */
+- if (!list_empty(&dep->started_list))
++ req = next_request(&dep->started_list);
++ if (req && req->num_trbs)
+ return 0;
+
+ return DWC3_TRB_NUM - 1;
--- /dev/null
+From b7fc65f5141c24785dc8c19249ca4efcf71b3524 Mon Sep 17 00:00:00 2001
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Date: Thu, 14 Nov 2024 01:02:18 +0000
+Subject: usb: dwc3: gadget: Fix looping of queued SG entries
+
+From: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+
+commit b7fc65f5141c24785dc8c19249ca4efcf71b3524 upstream.
+
+The dwc3_request->num_queued_sgs is decremented on completion. If a
+partially completed request is handled, then the
+dwc3_request->num_queued_sgs no longer reflects the total number of
+num_queued_sgs (it would be cleared).
+
+Correctly check the number of request SG entries remained to be prepare
+and queued. Failure to do this may cause null pointer dereference when
+accessing non-existent SG entry.
+
+Cc: stable@vger.kernel.org
+Fixes: c96e6725db9d ("usb: dwc3: gadget: Correct the logic for queuing sgs")
+Signed-off-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://lore.kernel.org/r/d07a7c4aa0fcf746cdca0515150dbe5c52000af7.1731545781.git.Thinh.Nguyen@synopsys.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1455,8 +1455,8 @@ static int dwc3_prepare_trbs_sg(struct d
+ struct scatterlist *s;
+ int i;
+ unsigned int length = req->request.length;
+- unsigned int remaining = req->request.num_mapped_sgs
+- - req->num_queued_sgs;
++ unsigned int remaining = req->num_pending_sgs;
++ unsigned int num_queued_sgs = req->request.num_mapped_sgs - remaining;
+ unsigned int num_trbs = req->num_trbs;
+ bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
+
+@@ -1464,7 +1464,7 @@ static int dwc3_prepare_trbs_sg(struct d
+ * If we resume preparing the request, then get the remaining length of
+ * the request and resume where we left off.
+ */
+- for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
++ for_each_sg(req->request.sg, s, num_queued_sgs, i)
+ length -= sg_dma_len(s);
+
+ for_each_sg(sg, s, remaining, i) {
--- /dev/null
+From 5c5d8eb8af06df615e8b1dc88e5847196c846045 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Date: Tue, 12 Nov 2024 08:55:12 +0100
+Subject: usb: misc: ljca: move usb_autopm_put_interface() after wait for response
+
+From: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+
+commit 5c5d8eb8af06df615e8b1dc88e5847196c846045 upstream.
+
+Do not mark interface as ready to suspend when we are still waiting
+for response messages from the device.
+
+Fixes: acd6199f195d ("usb: Add support for Intel LJCA device")
+Cc: stable@vger.kernel.org
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Tested-by: Hans de Goede <hdegoede@redhat.com> # ThinkPad X1 Yoga Gen 8, ov2740
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Link: https://lore.kernel.org/r/20241112075514.680712-1-stanislaw.gruszka@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/misc/usb-ljca.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/usb/misc/usb-ljca.c
++++ b/drivers/usb/misc/usb-ljca.c
+@@ -332,14 +332,11 @@ static int ljca_send(struct ljca_adapter
+
+ ret = usb_bulk_msg(adap->usb_dev, adap->tx_pipe, header,
+ msg_len, &transferred, LJCA_WRITE_TIMEOUT_MS);
+-
+- usb_autopm_put_interface(adap->intf);
+-
+ if (ret < 0)
+- goto out;
++ goto out_put;
+ if (transferred != msg_len) {
+ ret = -EIO;
+- goto out;
++ goto out_put;
+ }
+
+ if (ack) {
+@@ -347,11 +344,14 @@ static int ljca_send(struct ljca_adapter
+ timeout);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+- goto out;
++ goto out_put;
+ }
+ }
+ ret = adap->actual_length;
+
++out_put:
++ usb_autopm_put_interface(adap->intf);
++
+ out:
+ spin_lock_irqsave(&adap->lock, flags);
+ adap->ex_buf = NULL;
--- /dev/null
+From 2481af79671a6603fce201cbbc48f31e488e9fae Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Date: Tue, 12 Nov 2024 08:55:13 +0100
+Subject: usb: misc: ljca: set small runtime autosuspend delay
+
+From: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+
+commit 2481af79671a6603fce201cbbc48f31e488e9fae upstream.
+
+On some Lenovo platforms, the patch works around problems with ov2740
+sensor initialization, which manifest themself like below:
+
+[ 4.540476] ov2740 i2c-INT3474:01: error -EIO: failed to find sensor
+[ 4.542066] ov2740 i2c-INT3474:01: probe with driver ov2740 failed with error -5
+
+or
+
+[ 7.742633] ov2740 i2c-INT3474:01: chip id mismatch: 2740 != 0
+[ 7.742638] ov2740 i2c-INT3474:01: error -ENXIO: failed to find sensor
+
+and also by random failures of video stream start.
+
+Issue can be reproduced by this script:
+
+n=0
+k=0
+while [ $n -lt 50 ] ; do
+ sudo modprobe -r ov2740
+ sleep `expr $RANDOM % 5`
+ sudo modprobe ov2740
+ if media-ctl -p | grep -q ov2740 ; then
+ let k++
+ fi
+ let n++
+done
+echo Success rate $k/$n
+
+Without the patch, success rate is approximately 15 or 50 tries.
+With the patch it does not fail.
+
+This problem is some hardware or firmware malfunction, that can not be
+easy debug and fix. While setting small autosuspend delay is not perfect
+workaround as user can configure it to any value, it will prevent
+the failures by default.
+
+Additionally setting small autosuspend delay should have positive effect
+on power consumption as for most ljca workloads device is used for just
+a few milliseconds flowed by long periods of at least 100ms of inactivity
+(usually more).
+
+Fixes: acd6199f195d ("usb: Add support for Intel LJCA device")
+Cc: stable@vger.kernel.org
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Tested-by: Hans de Goede <hdegoede@redhat.com> # ThinkPad X1 Yoga Gen 8, ov2740
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
+Link: https://lore.kernel.org/r/20241112075514.680712-2-stanislaw.gruszka@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/misc/usb-ljca.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/usb/misc/usb-ljca.c
++++ b/drivers/usb/misc/usb-ljca.c
+@@ -811,6 +811,14 @@ static int ljca_probe(struct usb_interfa
+ if (ret)
+ goto err_free;
+
++ /*
++ * This works around problems with ov2740 initialization on some
++ * Lenovo platforms. The autosuspend delay, has to be smaller than
++ * the delay after setting the reset_gpio line in ov2740_resume().
++ * Otherwise the sensor randomly fails to initialize.
++ */
++ pm_runtime_set_autosuspend_delay(&usb_dev->dev, 10);
++
+ usb_enable_autosuspend(usb_dev);
+
+ return 0;
--- /dev/null
+From 3fc137386c4620305bbc2a216868c53f9245670a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Hubert=20Wi=C5=9Bniewski?=
+ <hubert.wisniewski.25632@gmail.com>
+Date: Sun, 10 Nov 2024 18:21:48 +0100
+Subject: usb: musb: Fix hardware lockup on first Rx endpoint request
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hubert Wiśniewski <hubert.wisniewski.25632@gmail.com>
+
+commit 3fc137386c4620305bbc2a216868c53f9245670a upstream.
+
+There is a possibility that a request's callback could be invoked from
+usb_ep_queue() (call trace below, supplemented with missing calls):
+
+req->complete from usb_gadget_giveback_request
+ (drivers/usb/gadget/udc/core.c:999)
+usb_gadget_giveback_request from musb_g_giveback
+ (drivers/usb/musb/musb_gadget.c:147)
+musb_g_giveback from rxstate
+ (drivers/usb/musb/musb_gadget.c:784)
+rxstate from musb_ep_restart
+ (drivers/usb/musb/musb_gadget.c:1169)
+musb_ep_restart from musb_ep_restart_resume_work
+ (drivers/usb/musb/musb_gadget.c:1176)
+musb_ep_restart_resume_work from musb_queue_resume_work
+ (drivers/usb/musb/musb_core.c:2279)
+musb_queue_resume_work from musb_gadget_queue
+ (drivers/usb/musb/musb_gadget.c:1241)
+musb_gadget_queue from usb_ep_queue
+ (drivers/usb/gadget/udc/core.c:300)
+
+According to the docstring of usb_ep_queue(), this should not happen:
+
+"Note that @req's ->complete() callback must never be called from within
+usb_ep_queue() as that can create deadlock situations."
+
+In fact, a hardware lockup might occur in the following sequence:
+
+1. The gadget is initialized using musb_gadget_enable().
+2. Meanwhile, a packet arrives, and the RXPKTRDY flag is set, raising an
+ interrupt.
+3. If IRQs are enabled, the interrupt is handled, but musb_g_rx() finds an
+ empty queue (next_request() returns NULL). The interrupt flag has
+ already been cleared by the glue layer handler, but the RXPKTRDY flag
+ remains set.
+4. The first request is enqueued using usb_ep_queue(), leading to the call
+ of req->complete(), as shown in the call trace above.
+5. If the callback enables IRQs and another packet is waiting, step (3)
+ repeats. The request queue is empty because usb_g_giveback() removes the
+ request before invoking the callback.
+6. The endpoint remains locked up, as the interrupt triggered by hardware
+ setting the RXPKTRDY flag has been handled, but the flag itself remains
+ set.
+
+For this scenario to occur, it is only necessary for IRQs to be enabled at
+some point during the complete callback. This happens with the USB Ethernet
+gadget, whose rx_complete() callback calls netif_rx(). If called in the
+task context, netif_rx() disables the bottom halves (BHs). When the BHs are
+re-enabled, IRQs are also enabled to allow soft IRQs to be processed. The
+gadget itself is initialized at module load (or at boot if built-in), but
+the first request is enqueued when the network interface is brought up,
+triggering rx_complete() in the task context via ioctl(). If a packet
+arrives while the interface is down, it can prevent the interface from
+receiving any further packets from the USB host.
+
+The situation is quite complicated with many parties involved. This
+particular issue can be resolved in several possible ways:
+
+1. Ensure that callbacks never enable IRQs. This would be difficult to
+ enforce, as discovering how netif_rx() interacts with interrupts was
+ already quite challenging and u_ether is not the only function driver.
+ Similar "bugs" could be hidden in other drivers as well.
+2. Disable MUSB interrupts in musb_g_giveback() before calling the callback
+ and re-enable them afterwars (by calling musb_{dis,en}able_interrupts(),
+ for example). This would ensure that MUSB interrupts are not handled
+ during the callback, even if IRQs are enabled. In fact, it would allow
+ IRQs to be enabled when releasing the lock. However, this feels like an
+ inelegant hack.
+3. Modify the interrupt handler to clear the RXPKTRDY flag if the request
+ queue is empty. While this approach also feels like a hack, it wastes
+ CPU time by attempting to handle incoming packets when the software is
+ not ready to process them.
+4. Flush the Rx FIFO instead of calling rxstate() in musb_ep_restart().
+ This ensures that the hardware can receive packets when there is at
+ least one request in the queue. Once IRQs are enabled, the interrupt
+ handler will be able to correctly process the next incoming packet
+ (eventually calling rxstate()). This approach may cause one or two
+ packets to be dropped (two if double buffering is enabled), but this
+ seems to be a minor issue, as packet loss can occur when the software is
+ not yet ready to process them. Additionally, this solution makes the
+ gadget driver compliant with the rule mentioned in the docstring of
+ usb_ep_queue().
+
+There may be additional solutions, but from these four, the last one has
+been chosen as it seems to be the most appropriate, as it addresses the
+"bad" behavior of the driver.
+
+Fixes: baebdf48c360 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hubert Wiśniewski <hubert.wisniewski.25632@gmail.com>
+Link: https://lore.kernel.org/r/4ee1ead4525f78fb5909a8cbf99513ad0082ad21.camel@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/musb/musb_gadget.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1161,12 +1161,19 @@ void musb_free_request(struct usb_ep *ep
+ */
+ void musb_ep_restart(struct musb *musb, struct musb_request *req)
+ {
++ u16 csr;
++ void __iomem *epio = req->ep->hw_ep->regs;
++
+ trace_musb_req_start(req);
+ musb_ep_select(musb->mregs, req->epnum);
+- if (req->tx)
++ if (req->tx) {
+ txstate(musb, req);
+- else
+- rxstate(musb, req);
++ } else {
++ csr = musb_readw(epio, MUSB_RXCSR);
++ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
++ musb_writew(epio, MUSB_RXCSR, csr);
++ musb_writew(epio, MUSB_RXCSR, csr);
++ }
+ }
+
+ static int musb_ep_restart_resume_work(struct musb *musb, void *data)