--- /dev/null
+From stable+bounces-15503-greg=kroah.com@vger.kernel.org Tue Jan 23 03:40:04 2024
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 23 Jan 2024 20:38:54 +0900
+Subject: ksmbd: Add missing set_freezable() for freezable kthread
+To: gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, Kevin Hao <haokexin@gmail.com>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20240123113854.194887-6-linkinjeon@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 8fb7b723924cc9306bc161f45496497aec733904 ]
+
+The kernel thread function ksmbd_conn_handler_loop() invokes
+the try_to_freeze() in its loop. But all the kernel threads are
+non-freezable by default. So if we want to make a kernel thread to be
+freezable, we have to invoke set_freezable() explicitly.
+
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -284,6 +284,7 @@ int ksmbd_conn_handler_loop(void *p)
+ goto out;
+
+ conn->last_active = jiffies;
++ set_freezable();
+ while (ksmbd_conn_alive(conn)) {
+ if (try_to_freeze())
+ continue;
--- /dev/null
+From stable+bounces-15501-greg=kroah.com@vger.kernel.org Tue Jan 23 03:39:55 2024
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 23 Jan 2024 20:38:52 +0900
+Subject: ksmbd: don't increment epoch if current state and request state are same
+To: gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20240123113854.194887-4-linkinjeon@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit b6e9a44e99603fe10e1d78901fdd97681a539612 ]
+
+If existing lease state and request state are same, don't increment
+epoch in create context.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -105,7 +105,7 @@ static int alloc_lease(struct oplock_inf
+ lease->is_dir = lctx->is_dir;
+ memcpy(lease->parent_lease_key, lctx->parent_lease_key, SMB2_LEASE_KEY_SIZE);
+ lease->version = lctx->version;
+- lease->epoch = le16_to_cpu(lctx->epoch);
++ lease->epoch = le16_to_cpu(lctx->epoch) + 1;
+ INIT_LIST_HEAD(&opinfo->lease_entry);
+ opinfo->o_lease = lease;
+
+@@ -541,6 +541,9 @@ static struct oplock_info *same_client_h
+ continue;
+ }
+
++ if (lctx->req_state != lease->state)
++ lease->epoch++;
++
+ /* upgrading lease */
+ if ((atomic_read(&ci->op_count) +
+ atomic_read(&ci->sop_count)) == 1) {
+@@ -1035,7 +1038,7 @@ static void copy_lease(struct oplock_inf
+ SMB2_LEASE_KEY_SIZE);
+ lease2->duration = lease1->duration;
+ lease2->flags = lease1->flags;
+- lease2->epoch = lease1->epoch++;
++ lease2->epoch = lease1->epoch;
+ lease2->version = lease1->version;
+ }
+
+@@ -1454,7 +1457,7 @@ void create_lease_buf(u8 *rbuf, struct l
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ SMB2_LEASE_KEY_SIZE);
+ buf->lcontext.LeaseFlags = lease->flags;
+- buf->lcontext.Epoch = cpu_to_le16(++lease->epoch);
++ buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
+ buf->lcontext.LeaseState = lease->state;
+ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ SMB2_LEASE_KEY_SIZE);
--- /dev/null
+From stable+bounces-15500-greg=kroah.com@vger.kernel.org Tue Jan 23 03:39:52 2024
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 23 Jan 2024 20:38:51 +0900
+Subject: ksmbd: fix potential circular locking issue in smb2_set_ea()
+To: gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20240123113854.194887-3-linkinjeon@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 6fc0a265e1b932e5e97a038f99e29400a93baad0 ]
+
+smb2_set_ea() can be called in parent inode lock range.
+So add get_write argument to smb2_set_ea() not to call nested
+mnt_want_write().
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2pdu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2321,11 +2321,12 @@ out:
+ * @eabuf: set info command buffer
+ * @buf_len: set info command buffer length
+ * @path: dentry path for get ea
++ * @get_write: get write access to a mount
+ *
+ * Return: 0 on success, otherwise error
+ */
+ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+- const struct path *path)
++ const struct path *path, bool get_write)
+ {
+ struct user_namespace *user_ns = mnt_user_ns(path->mnt);
+ char *attr_name = NULL, *value;
+@@ -3013,7 +3014,7 @@ int smb2_open(struct ksmbd_work *work)
+
+ rc = smb2_set_ea(&ea_buf->ea,
+ le32_to_cpu(ea_buf->ccontext.DataLength),
+- &path);
++ &path, false);
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
+ else if (rc)
+@@ -5990,7 +5991,7 @@ static int smb2_set_info_file(struct ksm
+ return -EINVAL;
+
+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+- buf_len, &fp->filp->f_path);
++ buf_len, &fp->filp->f_path, true);
+ }
+ case FILE_POSITION_INFORMATION:
+ {
--- /dev/null
+From stable+bounces-15502-greg=kroah.com@vger.kernel.org Tue Jan 23 03:39:57 2024
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 23 Jan 2024 20:38:53 +0900
+Subject: ksmbd: send lease break notification on FILE_RENAME_INFORMATION
+To: gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20240123113854.194887-5-linkinjeon@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 3fc74c65b367476874da5fe6f633398674b78e5a ]
+
+Send lease break notification on FILE_RENAME_INFORMATION request.
+This patch fix smb2.lease.v2_epoch2 test failure.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 12 +++++++-----
+ fs/smb/server/smb2pdu.c | 1 +
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -541,14 +541,12 @@ static struct oplock_info *same_client_h
+ continue;
+ }
+
+- if (lctx->req_state != lease->state)
+- lease->epoch++;
+-
+ /* upgrading lease */
+ if ((atomic_read(&ci->op_count) +
+ atomic_read(&ci->sop_count)) == 1) {
+ if (lease->state != SMB2_LEASE_NONE_LE &&
+ lease->state == (lctx->req_state & lease->state)) {
++ lease->epoch++;
+ lease->state |= lctx->req_state;
+ if (lctx->req_state &
+ SMB2_LEASE_WRITE_CACHING_LE)
+@@ -559,13 +557,17 @@ static struct oplock_info *same_client_h
+ atomic_read(&ci->sop_count)) > 1) {
+ if (lctx->req_state ==
+ (SMB2_LEASE_READ_CACHING_LE |
+- SMB2_LEASE_HANDLE_CACHING_LE))
++ SMB2_LEASE_HANDLE_CACHING_LE)) {
++ lease->epoch++;
+ lease->state = lctx->req_state;
++ }
+ }
+
+ if (lctx->req_state && lease->state ==
+- SMB2_LEASE_NONE_LE)
++ SMB2_LEASE_NONE_LE) {
++ lease->epoch++;
+ lease_none_upgrade(opinfo, lctx->req_state);
++ }
+ }
+ read_lock(&ci->m_lock);
+ }
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -5579,6 +5579,7 @@ static int smb2_rename(struct ksmbd_work
+ if (!file_info->ReplaceIfExists)
+ flags = RENAME_NOREPLACE;
+
++ smb_break_all_levII_oplock(work, fp, 0);
+ rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ out:
+ kfree(new_name);
--- /dev/null
+From stable+bounces-15499-greg=kroah.com@vger.kernel.org Tue Jan 23 03:39:45 2024
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 23 Jan 2024 20:38:50 +0900
+Subject: ksmbd: set v2 lease version on lease upgrade
+To: gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: stable@vger.kernel.org, Namjae Jeon <linkinjeon@kernel.org>, Tom Talpey <tom@talpey.com>, Steve French <stfrench@microsoft.com>
+Message-ID: <20240123113854.194887-2-linkinjeon@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit bb05367a66a9990d2c561282f5620bb1dbe40c28 ]
+
+If file opened with v2 lease is upgraded with v1 lease, smb server
+should response v2 lease create context to client.
+This patch fix smb2.lease.v2_epoch2 test failure.
+
+This test case assumes the following scenario:
+ 1. smb2 create with v2 lease(R, LEASE1 key)
+ 2. smb server return smb2 create response with v2 lease context(R,
+LEASE1 key, epoch + 1)
+ 3. smb2 create with v1 lease(RH, LEASE1 key)
+ 4. smb server return smb2 create response with v2 lease context(RH,
+LEASE1 key, epoch + 2)
+
+i.e. If same client(same lease key) try to open a file that is being
+opened with v2 lease with v1 lease, smb server should return v2 lease.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Acked-by: Tom Talpey <tom@talpey.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/oplock.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1036,6 +1036,7 @@ static void copy_lease(struct oplock_inf
+ lease2->duration = lease1->duration;
+ lease2->flags = lease1->flags;
+ lease2->epoch = lease1->epoch++;
++ lease2->version = lease1->version;
+ }
+
+ static int add_lease_global_list(struct oplock_info *opinfo)
--- /dev/null
+From a2ccf46333d7b2cf9658f0d82ac74097c1542fae Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Wed, 8 Nov 2023 14:12:15 +0800
+Subject: LoongArch/smp: Call rcutree_report_cpu_starting() earlier
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit a2ccf46333d7b2cf9658f0d82ac74097c1542fae upstream.
+
+rcutree_report_cpu_starting() must be called before cpu_probe() to avoid
+the following lockdep splat that triggered by calling __alloc_pages() when
+CONFIG_PROVE_RCU_LIST=y:
+
+ =============================
+ WARNING: suspicious RCU usage
+ 6.6.0+ #980 Not tainted
+ -----------------------------
+ kernel/locking/lockdep.c:3761 RCU-list traversed in non-reader section!!
+ other info that might help us debug this:
+ RCU used illegally from offline CPU!
+ rcu_scheduler_active = 1, debug_locks = 1
+ 1 lock held by swapper/1/0:
+ #0: 900000000c82ef98 (&pcp->lock){+.+.}-{2:2}, at: get_page_from_freelist+0x894/0x1790
+ CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.6.0+ #980
+ Stack : 0000000000000001 9000000004f79508 9000000004893670 9000000100310000
+ 90000001003137d0 0000000000000000 90000001003137d8 9000000004f79508
+ 0000000000000000 0000000000000001 0000000000000000 90000000048a3384
+ 203a656d616e2065 ca43677b3687e616 90000001002c3480 0000000000000008
+ 000000000000009d 0000000000000000 0000000000000001 80000000ffffe0b8
+ 000000000000000d 0000000000000033 0000000007ec0000 13bbf50562dad831
+ 9000000005140748 0000000000000000 9000000004f79508 0000000000000004
+ 0000000000000000 9000000005140748 90000001002bad40 0000000000000000
+ 90000001002ba400 0000000000000000 9000000003573ec8 0000000000000000
+ 00000000000000b0 0000000000000004 0000000000000000 0000000000070000
+ ...
+ Call Trace:
+ [<9000000003573ec8>] show_stack+0x38/0x150
+ [<9000000004893670>] dump_stack_lvl+0x74/0xa8
+ [<900000000360d2bc>] lockdep_rcu_suspicious+0x14c/0x190
+ [<900000000361235c>] __lock_acquire+0xd0c/0x2740
+ [<90000000036146f4>] lock_acquire+0x104/0x2c0
+ [<90000000048a955c>] _raw_spin_lock_irqsave+0x5c/0x90
+ [<900000000381cd5c>] rmqueue_bulk+0x6c/0x950
+ [<900000000381fc0c>] get_page_from_freelist+0xd4c/0x1790
+ [<9000000003821c6c>] __alloc_pages+0x1bc/0x3e0
+ [<9000000003583b40>] tlb_init+0x150/0x2a0
+ [<90000000035742a0>] per_cpu_trap_init+0xf0/0x110
+ [<90000000035712fc>] cpu_probe+0x3dc/0x7a0
+ [<900000000357ed20>] start_secondary+0x40/0xb0
+ [<9000000004897138>] smpboot_entry+0x54/0x58
+
+raw_smp_processor_id() is required in order to avoid calling into lockdep
+before RCU has declared the CPU to be watched for readers.
+
+See also commit 29368e093921 ("x86/smpboot: Move rcu_cpu_starting() earlier"),
+commit de5d9dae150c ("s390/smp: move rcu_cpu_starting() earlier") and commit
+99f070b62322 ("powerpc/smp: Call rcu_cpu_starting() earlier").
+
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/smp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -471,8 +471,9 @@ asmlinkage void start_secondary(void)
+ unsigned int cpu;
+
+ sync_counter();
+- cpu = smp_processor_id();
++ cpu = raw_smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
++ rcutree_report_cpu_starting(cpu);
+
+ cpu_probe();
+ constant_clockevent_init();
--- /dev/null
+From ac3f3b0a55518056bc80ed32a41931c99e1f7d81 Mon Sep 17 00:00:00 2001
+From: Charan Teja Kalla <quic_charante@quicinc.com>
+Date: Fri, 24 Nov 2023 16:27:25 +0530
+Subject: mm: page_alloc: unreserve highatomic page blocks before oom
+
+From: Charan Teja Kalla <quic_charante@quicinc.com>
+
+commit ac3f3b0a55518056bc80ed32a41931c99e1f7d81 upstream.
+
+__alloc_pages_direct_reclaim() is called from slowpath allocation where
+high atomic reserves can be unreserved after there is a progress in
+reclaim and yet no suitable page is found. Later should_reclaim_retry()
+gets called from slow path allocation to decide if the reclaim needs to be
+retried before OOM kill path is taken.
+
+should_reclaim_retry() checks the available(reclaimable + free pages)
+memory against the min wmark levels of a zone and returns:
+
+a) true, if it is above the min wmark so that slow path allocation will
+ do the reclaim retries.
+
+b) false, thus slowpath allocation takes oom kill path.
+
+should_reclaim_retry() can also unreserves the high atomic reserves **but
+only after all the reclaim retries are exhausted.**
+
+In a case where there are almost none reclaimable memory and free pages
+contains mostly the high atomic reserves but allocation context can't use
+these high atomic reserves, makes the available memory below min wmark
+levels hence false is returned from should_reclaim_retry() leading the
+allocation request to take OOM kill path. This can turn into a early oom
+kill if high atomic reserves are holding lot of free memory and
+unreserving of them is not attempted.
+
+(early)OOM is encountered on a VM with the below state:
+[ 295.998653] Normal free:7728kB boost:0kB min:804kB low:1004kB
+high:1204kB reserved_highatomic:8192KB active_anon:4kB inactive_anon:0kB
+active_file:24kB inactive_file:24kB unevictable:1220kB writepending:0kB
+present:70732kB managed:49224kB mlocked:0kB bounce:0kB free_pcp:688kB
+local_pcp:492kB free_cma:0kB
+[ 295.998656] lowmem_reserve[]: 0 32
+[ 295.998659] Normal: 508*4kB (UMEH) 241*8kB (UMEH) 143*16kB (UMEH)
+33*32kB (UH) 7*64kB (UH) 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB
+0*4096kB = 7752kB
+
+Per above log, the free memory of ~7MB exist in the high atomic reserves
+is not freed up before falling back to oom kill path.
+
+Fix it by trying to unreserve the high atomic reserves in
+should_reclaim_retry() before __alloc_pages_direct_reclaim() can fallback
+to oom kill path.
+
+Link: https://lkml.kernel.org/r/1700823445-27531-1-git-send-email-quic_charante@quicinc.com
+Fixes: 0aaa29a56e4f ("mm, page_alloc: reserve pageblocks for high-order atomic allocations on demand")
+Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
+Reported-by: Chris Goldsworthy <quic_cgoldswo@quicinc.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Chris Goldsworthy <quic_cgoldswo@quicinc.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Pavankumar Kondeti <quic_pkondeti@quicinc.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Joakim Tjernlund <Joakim.Tjernlund@infinera.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4921,14 +4921,9 @@ should_reclaim_retry(gfp_t gfp_mask, uns
+ else
+ (*no_progress_loops)++;
+
+- /*
+- * Make sure we converge to OOM if we cannot make any progress
+- * several times in the row.
+- */
+- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+- /* Before OOM, exhaust highatomic_reserve */
+- return unreserve_highatomic_pageblock(ac, true);
+- }
++ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
++ goto out;
++
+
+ /*
+ * Keep reclaiming pages while there is a chance this will lead
+@@ -4971,6 +4966,11 @@ should_reclaim_retry(gfp_t gfp_mask, uns
+ schedule_timeout_uninterruptible(1);
+ else
+ cond_resched();
++out:
++ /* Before OOM, exhaust highatomic_reserve */
++ if (!ret)
++ return unreserve_highatomic_pageblock(ac, true);
++
+ return ret;
+ }
+
serial-sc16is7xx-fix-invalid-sc16is7xx_lines-bitfield-in-case-of-probe-error.patch
serial-sc16is7xx-remove-obsolete-loop-in-sc16is7xx_port_irq.patch
serial-sc16is7xx-improve-do-while-loop-in-sc16is7xx_irq.patch
+loongarch-smp-call-rcutree_report_cpu_starting-earlier.patch
+mm-page_alloc-unreserve-highatomic-page-blocks-before-oom.patch
+ksmbd-set-v2-lease-version-on-lease-upgrade.patch
+ksmbd-fix-potential-circular-locking-issue-in-smb2_set_ea.patch
+ksmbd-don-t-increment-epoch-if-current-state-and-request-state-are-same.patch
+ksmbd-send-lease-break-notification-on-file_rename_information.patch
+ksmbd-add-missing-set_freezable-for-freezable-kthread.patch