--- /dev/null
+From ecfa23c8df7ef3ea2a429dfe039341bf792e95b4 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Thu, 11 Jul 2024 16:19:54 -0400
+Subject: drm/amdgpu/vcn: identify unified queue in sw init
+
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+
+commit ecfa23c8df7ef3ea2a429dfe039341bf792e95b4 upstream.
+
+Determine whether VCN using unified queue in sw_init, instead of calling
+functions later on.
+
+v2: fix coding style
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 39 ++++++++++++--------------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1
+ 2 files changed, 16 insertions(+), 24 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -151,6 +151,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_dev
+ }
+ }
+
++ /* from vcn4 and above, only unified queue is used */
++ adev->vcn.using_unified_queue =
++ amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
++
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
+ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+
+@@ -279,18 +283,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_dev
+ return 0;
+ }
+
+-/* from vcn4 and above, only unified queue is used */
+-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
+-{
+- struct amdgpu_device *adev = ring->adev;
+- bool ret = false;
+-
+- if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
+- ret = true;
+-
+- return ret;
+-}
+-
+ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
+ {
+ bool ret = false;
+@@ -728,12 +720,11 @@ static int amdgpu_vcn_dec_sw_send_msg(st
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ uint32_t *ib_checksum;
+ uint32_t ib_pack_in_dw;
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -746,7 +737,7 @@ static int amdgpu_vcn_dec_sw_send_msg(st
+ ib->length_dw = 0;
+
+ /* single queue headers */
+- if (sq) {
++ if (adev->vcn.using_unified_queue) {
+ ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ + 4 + 2; /* engine info + decoding ib in dw */
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+@@ -765,7 +756,7 @@ static int amdgpu_vcn_dec_sw_send_msg(st
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -855,15 +846,15 @@ static int amdgpu_vcn_enc_get_create_msg
+ struct dma_fence **fence)
+ {
+ unsigned int ib_size_dw = 16;
++ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -877,7 +868,7 @@ static int amdgpu_vcn_enc_get_create_msg
+
+ ib->length_dw = 0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -899,7 +890,7 @@ static int amdgpu_vcn_enc_get_create_msg
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+@@ -922,15 +913,15 @@ static int amdgpu_vcn_enc_get_destroy_ms
+ struct dma_fence **fence)
+ {
+ unsigned int ib_size_dw = 16;
++ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+- bool sq = amdgpu_vcn_using_unified_queue(ring);
+ int i, r;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+@@ -944,7 +935,7 @@ static int amdgpu_vcn_enc_get_destroy_ms
+
+ ib->length_dw = 0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+@@ -966,7 +957,7 @@ static int amdgpu_vcn_enc_get_destroy_ms
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+- if (sq)
++ if (adev->vcn.using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -329,6 +329,7 @@ struct amdgpu_vcn {
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
++ bool using_unified_queue;
+ };
+
+ struct amdgpu_fw_shared_rb_ptrs_struct {
--- /dev/null
+From 7d75ef3736a025db441be652c8cc8e84044a215f Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Wed, 10 Jul 2024 16:17:12 -0400
+Subject: drm/amdgpu/vcn: not pause dpg for unified queue
+
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+
+commit 7d75ef3736a025db441be652c8cc8e84044a215f upstream.
+
+For unified queue, DPG pause for encoding is done inside VCN firmware,
+so there is no need to pause dpg based on ring type in kernel.
+
+For VCN3 and below, pausing DPG for encoding in kernel is still needed.
+
+v2: add more comments
+v3: update commit message
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -393,7 +393,9 @@ static void amdgpu_vcn_idle_work_handler
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
+
+- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++ !adev->vcn.using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (fence[j] ||
+@@ -439,7 +441,9 @@ void amdgpu_vcn_ring_begin_use(struct am
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
+
+- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
++ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
++ !adev->vcn.using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+@@ -465,8 +469,12 @@ void amdgpu_vcn_ring_begin_use(struct am
+
+ void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
+ {
++ struct amdgpu_device *adev = ring->adev;
++
++ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+- ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
++ !adev->vcn.using_unified_queue)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.total_submission_cnt);
--- /dev/null
+From 9e0a3d39a36f80fd39b5ec2f943b9514bba1e9bd Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Tue, 27 Aug 2024 09:27:56 +0900
+Subject: ksmbd: fix race condition between destroy_previous_session() and smb2 operations()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 76e98a158b207771a6c9a0de0a60522a446a3447 ]
+
+If there is ->PreviousSessionId field in the session setup request,
+The session of the previous connection should be destroyed.
+During this, if the smb2 operation requests in the previous session are
+being processed, a racy issue could happen with ksmbd_destroy_file_table().
+This patch sets conn->status to KSMBD_SESS_NEED_RECONNECT to block
+incoming operations and waits until on-going operations are complete
+(i.e. idle) before desctorying the previous session.
+
+Fixes: c8efcc786146 ("ksmbd: add support for durable handles v1/v2")
+Cc: stable@vger.kernel.org # v6.6+
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-25040
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/connection.c | 34 +++++++++++++++++++++++++++++++++-
+ fs/smb/server/connection.h | 3 ++-
+ fs/smb/server/mgmt/user_session.c | 8 ++++++++
+ fs/smb/server/smb2pdu.c | 2 +-
+ 4 files changed, 44 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/connection.c
++++ b/fs/smb/server/connection.c
+@@ -165,11 +165,43 @@ void ksmbd_all_conn_set_status(u64 sess_
+ up_read(&conn_list_lock);
+ }
+
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
+ {
+ wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
+ }
+
++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
++{
++ struct ksmbd_conn *conn;
++ int rc, retry_count = 0, max_timeout = 120;
++ int rcount = 1;
++
++retry_idle:
++ if (retry_count >= max_timeout)
++ return -EIO;
++
++ down_read(&conn_list_lock);
++ list_for_each_entry(conn, &conn_list, conns_list) {
++ if (conn->binding || xa_load(&conn->sessions, sess_id)) {
++ if (conn == curr_conn)
++ rcount = 2;
++ if (atomic_read(&conn->req_running) >= rcount) {
++ rc = wait_event_timeout(conn->req_running_q,
++ atomic_read(&conn->req_running) < rcount,
++ HZ);
++ if (!rc) {
++ up_read(&conn_list_lock);
++ retry_count++;
++ goto retry_idle;
++ }
++ }
++ }
++ }
++ up_read(&conn_list_lock);
++
++ return 0;
++}
++
+ int ksmbd_conn_write(struct ksmbd_work *work)
+ {
+ struct ksmbd_conn *conn = work->conn;
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -145,7 +145,8 @@ extern struct list_head conn_list;
+ extern struct rw_semaphore conn_list_lock;
+
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -310,6 +310,7 @@ void destroy_previous_session(struct ksm
+ {
+ struct ksmbd_session *prev_sess;
+ struct ksmbd_user *prev_user;
++ int err;
+
+ down_write(&sessions_table_lock);
+ down_write(&conn->session_lock);
+@@ -324,8 +325,15 @@ void destroy_previous_session(struct ksm
+ memcmp(user->passkey, prev_user->passkey, user->passkey_sz))
+ goto out;
+
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
++ err = ksmbd_conn_wait_idle_sess_id(conn, id);
++ if (err) {
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++ goto out;
++ }
+ ksmbd_destroy_file_table(&prev_sess->file_table);
+ prev_sess->state = SMB2_SESSION_EXPIRED;
++ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ out:
+ up_write(&conn->session_lock);
+ up_write(&sessions_table_lock);
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2210,7 +2210,7 @@ int smb2_session_logoff(struct ksmbd_wor
+ ksmbd_conn_unlock(conn);
+
+ ksmbd_close_session_fds(work);
+- ksmbd_conn_wait_idle(conn, sess_id);
++ ksmbd_conn_wait_idle(conn);
+
+ /*
+ * Re-lookup session to validate if session is deleted
--- /dev/null
+From 232590ea7fc125986a526e03081b98e5783f70d2 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Mon, 19 Aug 2024 10:38:23 +0200
+Subject: Revert "pidfd: prevent creation of pidfds for kthreads"
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 232590ea7fc125986a526e03081b98e5783f70d2 upstream.
+
+This reverts commit 3b5bbe798b2451820e74243b738268f51901e7d0.
+
+Eric reported that systemd-shutdown gets broken by blocking the creating
+of pidfds for kthreads as older versions seems to rely on being able to
+create a pidfd for any process in /proc.
+
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Link: https://lore.kernel.org/r/20240818035818.GA1929@sol.localdomain
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/fork.c | 25 +++----------------------
+ 1 file changed, 3 insertions(+), 22 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2069,23 +2069,10 @@ static int __pidfd_prepare(struct pid *p
+ */
+ int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
+ {
+- if (!pid)
+- return -EINVAL;
+-
+- scoped_guard(rcu) {
+- struct task_struct *tsk;
++ bool thread = flags & PIDFD_THREAD;
+
+- if (flags & PIDFD_THREAD)
+- tsk = pid_task(pid, PIDTYPE_PID);
+- else
+- tsk = pid_task(pid, PIDTYPE_TGID);
+- if (!tsk)
+- return -EINVAL;
+-
+- /* Don't create pidfds for kernel threads for now. */
+- if (tsk->flags & PF_KTHREAD)
+- return -EINVAL;
+- }
++ if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID))
++ return -EINVAL;
+
+ return __pidfd_prepare(pid, flags, ret);
+ }
+@@ -2432,12 +2419,6 @@ __latent_entropy struct task_struct *cop
+ if (clone_flags & CLONE_PIDFD) {
+ int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
+
+- /* Don't create pidfds for kernel threads for now. */
+- if (args->kthread) {
+- retval = -EINVAL;
+- goto bad_fork_free_pid;
+- }
+-
+ /* Note that no task has been attached to @pid yet. */
+ retval = __pidfd_prepare(pid, flags, &pidfile);
+ if (retval < 0)
--- /dev/null
+From 662c3e2db00f92e50c26e9dc4fe47c52223d9982 Mon Sep 17 00:00:00 2001
+From: Yonghong Song <yonghong.song@linux.dev>
+Date: Mon, 12 Aug 2024 14:48:52 -0700
+Subject: selftests/bpf: Add a test to verify previous stacksafe() fix
+
+From: Yonghong Song <yonghong.song@linux.dev>
+
+commit 662c3e2db00f92e50c26e9dc4fe47c52223d9982 upstream.
+
+A selftest is added such that without the previous patch,
+a crash can happen. With the previous patch, the test can
+run successfully. The new test is written in a way which
+mimics original crash case:
+ main_prog
+ static_prog_1
+ static_prog_2
+where static_prog_1 has different paths to static_prog_2
+and some path has stack allocated and some other path
+does not. A stacksafe() checking in static_prog_2()
+triggered the crash.
+
+Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
+Link: https://lore.kernel.org/r/20240812214852.214037-1-yonghong.song@linux.dev
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/progs/iters.c | 54 ++++++++++++++++++++++++++++++
+ 1 file changed, 54 insertions(+)
+
+--- a/tools/testing/selftests/bpf/progs/iters.c
++++ b/tools/testing/selftests/bpf/progs/iters.c
+@@ -1434,4 +1434,58 @@ int iter_arr_with_actual_elem_count(cons
+ return sum;
+ }
+
++__u32 upper, select_n, result;
++__u64 global;
++
++static __noinline bool nest_2(char *str)
++{
++ /* some insns (including branch insns) to ensure stacksafe() is triggered
++ * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
++ */
++ if (str[0] == 't')
++ return true;
++ if (str[1] == 'e')
++ return true;
++ if (str[2] == 's')
++ return true;
++ if (str[3] == 't')
++ return true;
++ return false;
++}
++
++static __noinline bool nest_1(int n)
++{
++ /* case 0: allocate stack, case 1: no allocate stack */
++ switch (n) {
++ case 0: {
++ char comm[16];
++
++ if (bpf_get_current_comm(comm, 16))
++ return false;
++ return nest_2(comm);
++ }
++ case 1:
++ return nest_2((char *)&global);
++ default:
++ return false;
++ }
++}
++
++SEC("raw_tp")
++__success
++int iter_subprog_check_stacksafe(const void *ctx)
++{
++ long i;
++
++ bpf_for(i, 0, upper) {
++ if (!nest_1(select_n)) {
++ result = 1;
++ return 0;
++ }
++ }
++
++ result = 2;
++ return 0;
++}
++
+ char _license[] SEC("license") = "GPL";
selftests-mptcp-join-validate-fullmesh-endp-on-1st-sf.patch
selftests-mptcp-join-check-re-using-id-of-closed-subflow.patch
drm-xe-do-not-dereference-null-job-fence-in-trace-points.patch
+revert-pidfd-prevent-creation-of-pidfds-for-kthreads.patch
+drm-amdgpu-vcn-identify-unified-queue-in-sw-init.patch
+drm-amdgpu-vcn-not-pause-dpg-for-unified-queue.patch
+selftests-bpf-add-a-test-to-verify-previous-stacksafe-fix.patch
+ksmbd-fix-race-condition-between-destroy_previous_session-and-smb2-operations.patch