--- /dev/null
+From 135ffc7becc82cfb84936ae133da7969220b43b2 Mon Sep 17 00:00:00 2001
+From: Michal Luczaj <mhal@rbox.co>
+Date: Mon, 18 Nov 2024 22:03:43 +0100
+Subject: bpf, vsock: Invoke proto::close on close()
+
+From: Michal Luczaj <mhal@rbox.co>
+
+commit 135ffc7becc82cfb84936ae133da7969220b43b2 upstream.
+
+vsock defines a BPF callback to be invoked when close() is called. However,
+this callback is never actually executed. As a result, a closed vsock
+socket is not automatically removed from the sockmap/sockhash.
+
+Introduce a dummy vsock_close() and make vsock_release() call proto::close.
+
+Note: changes in __vsock_release() look messy, but it's only due to indent
+level reduction and variables xmas tree reorder.
+
+Fixes: 634f1a7110b4 ("vsock: support sockmap")
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Reviewed-by: Luigi Leonardi <leonardi@redhat.com>
+Link: https://lore.kernel.org/r/20241118-vsock-bpf-poll-close-v1-3-f1b9669cacdc@rbox.co
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+[LL: There is no sockmap support for this kernel version. This patch has
+been backported because it helps reduce conflicts on future backports]
+Signed-off-by: Luigi Leonardi <leonardi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 71 +++++++++++++++++++++++++++--------------------
+ 1 file changed, 42 insertions(+), 29 deletions(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -113,12 +113,14 @@
+ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
+ static void vsock_sk_destruct(struct sock *sk);
+ static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
++static void vsock_close(struct sock *sk, long timeout);
+
+ /* Protocol family. */
+ static struct proto vsock_proto = {
+ .name = "AF_VSOCK",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct vsock_sock),
++ .close = vsock_close,
+ };
+
+ /* The default peer timeout indicates how long we will wait for a peer response
+@@ -767,39 +769,37 @@ static struct sock *__vsock_create(struc
+
+ static void __vsock_release(struct sock *sk, int level)
+ {
+- if (sk) {
+- struct sock *pending;
+- struct vsock_sock *vsk;
++ struct vsock_sock *vsk;
++ struct sock *pending;
+
+- vsk = vsock_sk(sk);
+- pending = NULL; /* Compiler warning. */
++ vsk = vsock_sk(sk);
++ pending = NULL; /* Compiler warning. */
+
+- /* When "level" is SINGLE_DEPTH_NESTING, use the nested
+- * version to avoid the warning "possible recursive locking
+- * detected". When "level" is 0, lock_sock_nested(sk, level)
+- * is the same as lock_sock(sk).
+- */
+- lock_sock_nested(sk, level);
++ /* When "level" is SINGLE_DEPTH_NESTING, use the nested
++ * version to avoid the warning "possible recursive locking
++ * detected". When "level" is 0, lock_sock_nested(sk, level)
++ * is the same as lock_sock(sk).
++ */
++ lock_sock_nested(sk, level);
+
+- if (vsk->transport)
+- vsk->transport->release(vsk);
+- else if (sk->sk_type == SOCK_STREAM)
+- vsock_remove_sock(vsk);
+-
+- sock_orphan(sk);
+- sk->sk_shutdown = SHUTDOWN_MASK;
+-
+- skb_queue_purge(&sk->sk_receive_queue);
+-
+- /* Clean up any sockets that never were accepted. */
+- while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+- __vsock_release(pending, SINGLE_DEPTH_NESTING);
+- sock_put(pending);
+- }
++ if (vsk->transport)
++ vsk->transport->release(vsk);
++ else if (sk->sk_type == SOCK_STREAM)
++ vsock_remove_sock(vsk);
+
+- release_sock(sk);
+- sock_put(sk);
++ sock_orphan(sk);
++ sk->sk_shutdown = SHUTDOWN_MASK;
++
++ skb_queue_purge(&sk->sk_receive_queue);
++
++ /* Clean up any sockets that never were accepted. */
++ while ((pending = vsock_dequeue_accept(sk)) != NULL) {
++ __vsock_release(pending, SINGLE_DEPTH_NESTING);
++ sock_put(pending);
+ }
++
++ release_sock(sk);
++ sock_put(sk);
+ }
+
+ static void vsock_sk_destruct(struct sock *sk)
+@@ -853,9 +853,22 @@ s64 vsock_stream_has_space(struct vsock_
+ }
+ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+
++/* Dummy callback required by sockmap.
++ * See unconditional call of saved_close() in sock_map_close().
++ */
++static void vsock_close(struct sock *sk, long timeout)
++{
++}
++
+ static int vsock_release(struct socket *sock)
+ {
+- __vsock_release(sock->sk, 0);
++ struct sock *sk = sock->sk;
++
++ if (!sk)
++ return 0;
++
++ sk->sk_prot->close(sk, 0);
++ __vsock_release(sk, 0);
+ sock->sk = NULL;
+ sock->state = SS_FREE;
+
--- /dev/null
+From d9fecd096f67a4469536e040a8a10bbfb665918b Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda <ribalda@chromium.org>
+Date: Tue, 3 Dec 2024 21:20:08 +0000
+Subject: media: uvcvideo: Only save async fh if success
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+commit d9fecd096f67a4469536e040a8a10bbfb665918b upstream.
+
+Now we keep a reference to the active fh for any call to uvc_ctrl_set,
+regardless if it is an actual set or if it is a just a try or if the
+device refused the operation.
+
+We should only keep the file handle if the device actually accepted
+applying the operation.
+
+Cc: stable@vger.kernel.org
+Fixes: e5225c820c05 ("media: uvcvideo: Send a control event when a Control Change interrupt arrives")
+Suggested-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Link: https://lore.kernel.org/r/20241203-uvc-fix-async-v6-1-26c867231118@chromium.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+
+---
+ drivers/media/usb/uvc/uvc_ctrl.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1526,7 +1526,9 @@ int uvc_ctrl_begin(struct uvc_video_chai
+ }
+
+ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+- struct uvc_entity *entity, int rollback)
++ struct uvc_fh *handle,
++ struct uvc_entity *entity,
++ int rollback)
+ {
+ struct uvc_control *ctrl;
+ unsigned int i;
+@@ -1570,6 +1572,10 @@ static int uvc_ctrl_commit_entity(struct
+
+ if (ret < 0)
+ return ret;
++
++ if (!rollback && handle &&
++ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
++ ctrl->handle = handle;
+ }
+
+ return 0;
+@@ -1585,7 +1591,8 @@ int __uvc_ctrl_commit(struct uvc_fh *han
+
+ /* Find the control. */
+ list_for_each_entry(entity, &chain->entities, chain) {
+- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
++ ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
++ rollback);
+ if (ret < 0)
+ goto done;
+ }
+@@ -1709,9 +1716,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ mapping->set(mapping, value,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+
+- if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+- ctrl->handle = handle;
+-
+ ctrl->dirty = 1;
+ ctrl->modified = 1;
+ return 0;
+@@ -2040,7 +2044,7 @@ int uvc_ctrl_restore_values(struct uvc_d
+ ctrl->dirty = 1;
+ }
+
+- ret = uvc_ctrl_commit_entity(dev, entity, 0);
++ ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0);
+ if (ret < 0)
+ return ret;
+ }
--- /dev/null
+From 221cd51efe4565501a3dbf04cc011b537dcce7fb Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda <ribalda@chromium.org>
+Date: Tue, 3 Dec 2024 21:20:10 +0000
+Subject: media: uvcvideo: Remove dangling pointers
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+commit 221cd51efe4565501a3dbf04cc011b537dcce7fb upstream.
+
+When an async control is written, we copy a pointer to the file handle
+that started the operation. That pointer will be used when the device is
+done. Which could be anytime in the future.
+
+If the user closes that file descriptor, its structure will be freed,
+and there will be one dangling pointer per pending async control, that
+the driver will try to use.
+
+Clean all the dangling pointers during release().
+
+To avoid adding a performance penalty in the most common case (no async
+operation), a counter has been introduced with some logic to make sure
+that it is properly handled.
+
+Cc: stable@vger.kernel.org
+Fixes: e5225c820c05 ("media: uvcvideo: Send a control event when a Control Change interrupt arrives")
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Link: https://lore.kernel.org/r/20241203-uvc-fix-async-v6-3-26c867231118@chromium.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/uvc/uvc_ctrl.c | 63 +++++++++++++++++++++++++++++++++++++--
+ drivers/media/usb/uvc/uvc_v4l2.c | 2 +
+ drivers/media/usb/uvc/uvcvideo.h | 9 ++++-
+ 3 files changed, 71 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1306,6 +1306,40 @@ static void uvc_ctrl_send_slave_event(st
+ uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+ }
+
++static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
++ struct uvc_fh *new_handle)
++{
++ lockdep_assert_held(&handle->chain->ctrl_mutex);
++
++ if (new_handle) {
++ if (ctrl->handle)
++ dev_warn_ratelimited(&handle->stream->dev->udev->dev,
++ "UVC non compliance: Setting an async control with a pending operation.");
++
++ if (new_handle == ctrl->handle)
++ return;
++
++ if (ctrl->handle) {
++ WARN_ON(!ctrl->handle->pending_async_ctrls);
++ if (ctrl->handle->pending_async_ctrls)
++ ctrl->handle->pending_async_ctrls--;
++ }
++
++ ctrl->handle = new_handle;
++ handle->pending_async_ctrls++;
++ return;
++ }
++
++ /* Cannot clear the handle for a control not owned by us.*/
++ if (WARN_ON(ctrl->handle != handle))
++ return;
++
++ ctrl->handle = NULL;
++ if (WARN_ON(!handle->pending_async_ctrls))
++ return;
++ handle->pending_async_ctrls--;
++}
++
+ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, const u8 *data)
+ {
+@@ -1316,7 +1350,8 @@ void uvc_ctrl_status_event(struct uvc_vi
+ mutex_lock(&chain->ctrl_mutex);
+
+ handle = ctrl->handle;
+- ctrl->handle = NULL;
++ if (handle)
++ uvc_ctrl_set_handle(handle, ctrl, NULL);
+
+ list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+ s32 value = __uvc_ctrl_get_value(mapping, data);
+@@ -1575,7 +1610,7 @@ static int uvc_ctrl_commit_entity(struct
+
+ if (!rollback && handle &&
+ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+- ctrl->handle = handle;
++ uvc_ctrl_set_handle(handle, ctrl, handle);
+ }
+
+ return 0;
+@@ -2369,6 +2404,30 @@ int uvc_ctrl_init_device(struct uvc_devi
+ return 0;
+ }
+
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
++{
++ struct uvc_entity *entity;
++
++ mutex_lock(&handle->chain->ctrl_mutex);
++
++ if (!handle->pending_async_ctrls) {
++ mutex_unlock(&handle->chain->ctrl_mutex);
++ return;
++ }
++
++ list_for_each_entry(entity, &handle->chain->dev->entities, list) {
++ unsigned int i;
++ for (i = 0; i < entity->ncontrols; ++i) {
++ if (entity->controls[i].handle != handle)
++ continue;
++ uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
++ }
++ }
++
++ WARN_ON(handle->pending_async_ctrls);
++ mutex_unlock(&handle->chain->ctrl_mutex);
++}
++
+ /*
+ * Cleanup device controls.
+ */
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -593,6 +593,8 @@ static int uvc_v4l2_release(struct file
+
+ uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n");
+
++ uvc_ctrl_cleanup_fh(handle);
++
+ /* Only free resources if this is a privileged handle. */
+ if (uvc_has_privileges(handle))
+ uvc_queue_release(&stream->queue);
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -453,7 +453,11 @@ struct uvc_video_chain {
+ struct uvc_entity *processing; /* Processing unit */
+ struct uvc_entity *selector; /* Selector unit */
+
+- struct mutex ctrl_mutex; /* Protects ctrl.info */
++ struct mutex ctrl_mutex; /*
++ * Protects ctrl.info,
++ * ctrl.handle and
++ * uvc_fh.pending_async_ctrls
++ */
+
+ struct v4l2_prio_state prio; /* V4L2 priority state */
+ u32 caps; /* V4L2 chain-wide caps */
+@@ -699,6 +703,7 @@ struct uvc_fh {
+ struct uvc_video_chain *chain;
+ struct uvc_streaming *stream;
+ enum uvc_handle_state state;
++ unsigned int pending_async_ctrls;
+ };
+
+ struct uvc_driver {
+@@ -871,6 +876,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ struct uvc_xu_control_query *xqry);
+
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
++
+ /* Utility functions */
+ void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold);
--- /dev/null
+From 8cf57c6df818f58fdad16a909506be213623a88e Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Mon, 27 Nov 2023 23:30:21 +0900
+Subject: nilfs2: eliminate staggered calls to kunmap in nilfs_rename
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 8cf57c6df818f58fdad16a909506be213623a88e upstream.
+
+In nilfs_rename(), calls to nilfs_put_page() to release pages obtained
+with nilfs_find_entry() or nilfs_dotdot() are alternated in the normal
+path.
+
+When replacing the kernel memory mapping method from kmap to
+kmap_local_{page,folio}, this violates the constraint on the calling order
+of kunmap_local().
+
+Swap the order of nilfs_put_page calls where the kmap sections of multiple
+pages overlap so that they are nested, allowing direct replacement of
+nilfs_put_page() -> unmap_and_put_page().
+
+Without this reordering, that replacement will cause a kernel WARNING in
+kunmap_local_indexed() on architectures with high memory mapping.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-3-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/namei.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -428,13 +428,14 @@ static int nilfs_rename(struct inode *ol
+ old_inode->i_ctime = current_time(old_inode);
+
+ nilfs_delete_entry(old_de, old_page);
+- nilfs_put_page(old_page);
+
+ if (dir_de) {
+ nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+ nilfs_put_page(dir_page);
+ drop_nlink(old_dir);
+ }
++ nilfs_put_page(old_page);
++
+ nilfs_mark_inode_dirty(old_dir);
+ nilfs_mark_inode_dirty(old_inode);
+
--- /dev/null
+From ee70999a988b8abc3490609142f50ebaa8344432 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Sat, 11 Jan 2025 23:26:35 +0900
+Subject: nilfs2: handle errors that nilfs_prepare_chunk() may return
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit ee70999a988b8abc3490609142f50ebaa8344432 upstream.
+
+Patch series "nilfs2: fix issues with rename operations".
+
+This series fixes BUG_ON check failures reported by syzbot around rename
+operations, and a minor behavioral issue where the mtime of a child
+directory changes when it is renamed instead of moved.
+
+
+This patch (of 2):
+
+The directory manipulation routines nilfs_set_link() and
+nilfs_delete_entry() rewrite the directory entry in the folio/page
+previously read by nilfs_find_entry(), so error handling is omitted on the
+assumption that nilfs_prepare_chunk(), which prepares the buffer for
+rewriting, will always succeed for these. And if an error is returned, it
+triggers the legacy BUG_ON() checks in each routine.
+
+This assumption is wrong, as proven by syzbot: the buffer layer called by
+nilfs_prepare_chunk() may call nilfs_get_block() if necessary, which may
+fail due to metadata corruption or other reasons. This has been there all
+along, but improved sanity checks and error handling may have made it more
+reproducible in fuzzing tests.
+
+Fix this issue by adding missing error paths in nilfs_set_link(),
+nilfs_delete_entry(), and their caller nilfs_rename().
+
+Link: https://lkml.kernel.org/r/20250111143518.7901-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20250111143518.7901-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+32c3706ebf5d95046ea1@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=32c3706ebf5d95046ea1
+Reported-by: syzbot+1097e95f134f37d9395c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=1097e95f134f37d9395c
+Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations")
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c | 13 ++++++++++---
+ fs/nilfs2/namei.c | 29 +++++++++++++++--------------
+ fs/nilfs2/nilfs.h | 4 ++--
+ 3 files changed, 27 insertions(+), 19 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -444,7 +444,7 @@ int nilfs_inode_by_name(struct inode *di
+ return 0;
+ }
+
+-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct page *page, struct inode *inode)
+ {
+ unsigned int from = (char *)de - (char *)page_address(page);
+@@ -454,11 +454,15 @@ void nilfs_set_link(struct inode *dir, s
+
+ lock_page(page);
+ err = nilfs_prepare_chunk(page, from, to);
+- BUG_ON(err);
++ if (unlikely(err)) {
++ unlock_page(page);
++ return err;
++ }
+ de->inode = cpu_to_le64(inode->i_ino);
+ nilfs_set_de_type(de, inode);
+ nilfs_commit_chunk(page, mapping, from, to);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
++ return 0;
+ }
+
+ /*
+@@ -590,7 +594,10 @@ int nilfs_delete_entry(struct nilfs_dir_
+ from = (char *)pde - (char *)page_address(page);
+ lock_page(page);
+ err = nilfs_prepare_chunk(page, from, to);
+- BUG_ON(err);
++ if (unlikely(err)) {
++ unlock_page(page);
++ goto out;
++ }
+ if (pde)
+ pde->rec_len = nilfs_rec_len_to_disk(to - from);
+ dir->inode = 0;
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -403,8 +403,10 @@ static int nilfs_rename(struct inode *ol
+ err = PTR_ERR(new_de);
+ goto out_dir;
+ }
+- nilfs_set_link(new_dir, new_de, new_page, old_inode);
++ err = nilfs_set_link(new_dir, new_de, new_page, old_inode);
+ nilfs_put_page(new_page);
++ if (unlikely(err))
++ goto out_dir;
+ nilfs_mark_inode_dirty(new_dir);
+ new_inode->i_ctime = current_time(new_inode);
+ if (dir_de)
+@@ -427,28 +429,27 @@ static int nilfs_rename(struct inode *ol
+ */
+ old_inode->i_ctime = current_time(old_inode);
+
+- nilfs_delete_entry(old_de, old_page);
+-
+- if (dir_de) {
+- nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+- nilfs_put_page(dir_page);
+- drop_nlink(old_dir);
++ err = nilfs_delete_entry(old_de, old_page);
++ if (likely(!err)) {
++ if (dir_de) {
++ err = nilfs_set_link(old_inode, dir_de, dir_page,
++ new_dir);
++ drop_nlink(old_dir);
++ }
++ nilfs_mark_inode_dirty(old_dir);
+ }
+- nilfs_put_page(old_page);
+-
+- nilfs_mark_inode_dirty(old_dir);
+ nilfs_mark_inode_dirty(old_inode);
+
+- err = nilfs_transaction_commit(old_dir->i_sb);
+- return err;
+-
+ out_dir:
+ if (dir_de)
+ nilfs_put_page(dir_page);
+ out_old:
+ nilfs_put_page(old_page);
+ out:
+- nilfs_transaction_abort(old_dir->i_sb);
++ if (likely(!err))
++ err = nilfs_transaction_commit(old_dir->i_sb);
++ else
++ nilfs_transaction_abort(old_dir->i_sb);
+ return err;
+ }
+
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -240,8 +240,8 @@ nilfs_find_entry(struct inode *, const s
+ extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
+ extern int nilfs_empty_dir(struct inode *);
+ extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
+-extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+- struct page *, struct inode *);
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++ struct page *page, struct inode *inode);
+
+ static inline void nilfs_put_page(struct page *page)
+ {
--- /dev/null
+From 584db20c181f5e28c0386d7987406ace7fbd3e49 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Mon, 27 Nov 2023 23:30:20 +0900
+Subject: nilfs2: move page release outside of nilfs_delete_entry and nilfs_set_link
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 584db20c181f5e28c0386d7987406ace7fbd3e49 upstream.
+
+Patch series "nilfs2: Folio conversions for directory paths".
+
+This series applies page->folio conversions to nilfs2 directory
+operations. This reduces hidden compound_head() calls and also converts
+deprecated kmap calls to kmap_local in the directory code.
+
+Although nilfs2 does not yet support large folios, Matthew has done his
+best here to include support for large folios, which will be needed for
+devices with large block sizes.
+
+This series corresponds to the second half of the original post [1], but
+with two complementary patches inserted at the beginning and some
+adjustments, to prevent a kmap_local constraint violation found during
+testing with highmem mapping.
+
+[1] https://lkml.kernel.org/r/20231106173903.1734114-1-willy@infradead.org
+
+I have reviewed all changes and tested this for regular and small block
+sizes, both on machines with and without highmem mapping. No issues
+found.
+
+
+This patch (of 17):
+
+In a few directory operations, the call to nilfs_put_page() for a page
+obtained using nilfs_find_entry() or nilfs_dotdot() is hidden in
+nilfs_set_link() and nilfs_delete_entry(), making it difficult to track
+page release and preventing change of its call position.
+
+By moving nilfs_put_page() out of these functions, this makes the page
+get/put correspondence clearer and makes it easier to swap
+nilfs_put_page() calls (and kunmap calls within them) when modifying
+multiple directory entries simultaneously in nilfs_rename().
+
+Also, update comments for nilfs_set_link() and nilfs_delete_entry() to
+reflect changes in their behavior.
+
+To make nilfs_put_page() visible from namei.c, this moves its definition
+to nilfs.h and replaces existing equivalents to use it, but the exposure
+of that definition is temporary and will be removed on a later kmap ->
+kmap_local conversion.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20231127143036.2425-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c | 11 +----------
+ fs/nilfs2/namei.c | 13 +++++++------
+ fs/nilfs2/nilfs.h | 6 ++++++
+ 3 files changed, 14 insertions(+), 16 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -64,12 +64,6 @@ static inline unsigned int nilfs_chunk_s
+ return inode->i_sb->s_blocksize;
+ }
+
+-static inline void nilfs_put_page(struct page *page)
+-{
+- kunmap(page);
+- put_page(page);
+-}
+-
+ /*
+ * Return the offset into page `page_nr' of the last valid
+ * byte in that page, plus one.
+@@ -450,7 +444,6 @@ int nilfs_inode_by_name(struct inode *di
+ return 0;
+ }
+
+-/* Releases the page */
+ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct page *page, struct inode *inode)
+ {
+@@ -465,7 +458,6 @@ void nilfs_set_link(struct inode *dir, s
+ de->inode = cpu_to_le64(inode->i_ino);
+ nilfs_set_de_type(de, inode);
+ nilfs_commit_chunk(page, mapping, from, to);
+- nilfs_put_page(page);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ }
+
+@@ -569,7 +561,7 @@ out_unlock:
+
+ /*
+ * nilfs_delete_entry deletes a directory entry by merging it with the
+- * previous entry. Page is up-to-date. Releases the page.
++ * previous entry. Page is up-to-date.
+ */
+ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
+ {
+@@ -605,7 +597,6 @@ int nilfs_delete_entry(struct nilfs_dir_
+ nilfs_commit_chunk(page, mapping, from, to);
+ inode->i_ctime = inode->i_mtime = current_time(inode);
+ out:
+- nilfs_put_page(page);
+ return err;
+ }
+
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -295,6 +295,7 @@ static int nilfs_do_unlink(struct inode
+ set_nlink(inode, 1);
+ }
+ err = nilfs_delete_entry(de, page);
++ nilfs_put_page(page);
+ if (err)
+ goto out;
+
+@@ -403,6 +404,7 @@ static int nilfs_rename(struct inode *ol
+ goto out_dir;
+ }
+ nilfs_set_link(new_dir, new_de, new_page, old_inode);
++ nilfs_put_page(new_page);
+ nilfs_mark_inode_dirty(new_dir);
+ new_inode->i_ctime = current_time(new_inode);
+ if (dir_de)
+@@ -426,9 +428,11 @@ static int nilfs_rename(struct inode *ol
+ old_inode->i_ctime = current_time(old_inode);
+
+ nilfs_delete_entry(old_de, old_page);
++ nilfs_put_page(old_page);
+
+ if (dir_de) {
+ nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
++ nilfs_put_page(dir_page);
+ drop_nlink(old_dir);
+ }
+ nilfs_mark_inode_dirty(old_dir);
+@@ -438,13 +442,10 @@ static int nilfs_rename(struct inode *ol
+ return err;
+
+ out_dir:
+- if (dir_de) {
+- kunmap(dir_page);
+- put_page(dir_page);
+- }
++ if (dir_de)
++ nilfs_put_page(dir_page);
+ out_old:
+- kunmap(old_page);
+- put_page(old_page);
++ nilfs_put_page(old_page);
+ out:
+ nilfs_transaction_abort(old_dir->i_sb);
+ return err;
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -243,6 +243,12 @@ extern struct nilfs_dir_entry *nilfs_dot
+ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+ struct page *, struct inode *);
+
++static inline void nilfs_put_page(struct page *page)
++{
++ kunmap(page);
++ put_page(page);
++}
++
+ /* file.c */
+ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
+
--- /dev/null
+From 8004d635f27bbccaa5c083c50d4d5302a6ffa00e Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Date: Tue, 14 Jan 2025 17:00:45 -0300
+Subject: Revert "media: uvcvideo: Require entities to have a non-zero unique ID"
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+commit 8004d635f27bbccaa5c083c50d4d5302a6ffa00e upstream.
+
+This reverts commit 3dd075fe8ebbc6fcbf998f81a75b8c4b159a6195.
+
+Tomasz has reported that his device, Generalplus Technology Inc. 808 Camera,
+with ID 1b3f:2002, stopped being detected:
+
+$ ls -l /dev/video*
+zsh: no matches found: /dev/video*
+[ 7.230599] usb 3-2: Found multiple Units with ID 5
+
+This particular device is non-compliant, having both the Output Terminal
+and Processing Unit with ID 5. uvc_scan_fallback, though, is able to build
+a chain. However, when media elements are added and uvc_mc_create_links
+call uvc_entity_by_id, it will get the incorrect entity,
+media_create_pad_link will WARN, and it will fail to register the entities.
+
+In order to reinstate support for such devices in a timely fashion,
+reverting the fix for these warnings is appropriate. A proper fix that
+considers the existence of such non-compliant devices will be submitted in
+a later development cycle.
+
+Reported-by: Tomasz Sikora <sikora.tomus@gmail.com>
+Fixes: 3dd075fe8ebb ("media: uvcvideo: Require entities to have a non-zero unique ID")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Ricardo Ribalda <ribalda@chromium.org>
+Link: https://lore.kernel.org/r/20250114200045.1401644-1-cascardo@igalia.com
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 63 ++++++++++++++-----------------------
+ 1 file changed, 24 insertions(+), 39 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1029,27 +1029,14 @@ error:
+ return ret;
+ }
+
+-static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+- u16 id, unsigned int num_pads,
+- unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
++ unsigned int num_pads, unsigned int extra_size)
+ {
+ struct uvc_entity *entity;
+ unsigned int num_inputs;
+ unsigned int size;
+ unsigned int i;
+
+- /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+- if (id == 0) {
+- dev_err(&dev->udev->dev, "Found Unit with invalid ID 0.\n");
+- return ERR_PTR(-EINVAL);
+- }
+-
+- /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+- if (uvc_entity_by_id(dev, id)) {
+- dev_err(&dev->udev->dev, "Found multiple Units with ID %u\n", id);
+- return ERR_PTR(-EINVAL);
+- }
+-
+ extra_size = roundup(extra_size, sizeof(*entity->pads));
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -1059,7 +1046,7 @@ static struct uvc_entity *uvc_alloc_new_
+ + num_inputs;
+ entity = kzalloc(size, GFP_KERNEL);
+ if (entity == NULL)
+- return ERR_PTR(-ENOMEM);
++ return NULL;
+
+ entity->id = id;
+ entity->type = type;
+@@ -1130,10 +1117,10 @@ static int uvc_parse_vendor_control(stru
+ break;
+ }
+
+- unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+- buffer[3], p + 1, 2 * n);
+- if (IS_ERR(unit))
+- return PTR_ERR(unit);
++ unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
++ p + 1, 2*n);
++ if (unit == NULL)
++ return -ENOMEM;
+
+ memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1244,10 +1231,10 @@ static int uvc_parse_standard_control(st
+ return -EINVAL;
+ }
+
+- term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+- buffer[3], 1, n + p);
+- if (IS_ERR(term))
+- return PTR_ERR(term);
++ term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
++ 1, n + p);
++ if (term == NULL)
++ return -ENOMEM;
+
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ term->camera.bControlSize = n;
+@@ -1303,10 +1290,10 @@ static int uvc_parse_standard_control(st
+ return 0;
+ }
+
+- term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+- buffer[3], 1, 0);
+- if (IS_ERR(term))
+- return PTR_ERR(term);
++ term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
++ 1, 0);
++ if (term == NULL)
++ return -ENOMEM;
+
+ memcpy(term->baSourceID, &buffer[7], 1);
+
+@@ -1327,10 +1314,9 @@ static int uvc_parse_standard_control(st
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+- p + 1, 0);
+- if (IS_ERR(unit))
+- return PTR_ERR(unit);
++ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
++ if (unit == NULL)
++ return -ENOMEM;
+
+ memcpy(unit->baSourceID, &buffer[5], p);
+
+@@ -1352,9 +1338,9 @@ static int uvc_parse_standard_control(st
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+- if (IS_ERR(unit))
+- return PTR_ERR(unit);
++ unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
++ if (unit == NULL)
++ return -ENOMEM;
+
+ memcpy(unit->baSourceID, &buffer[4], 1);
+ unit->processing.wMaxMultiplier =
+@@ -1383,10 +1369,9 @@ static int uvc_parse_standard_control(st
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+- p + 1, n);
+- if (IS_ERR(unit))
+- return PTR_ERR(unit);
++ unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
++ if (unit == NULL)
++ return -ENOMEM;
+
+ memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
--- /dev/null
+From 737d4d91d35b5f7fa5bb442651472277318b0bfd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Tue, 7 Jan 2025 13:01:05 +0100
+Subject: sched: sch_cake: add bounds checks to host bulk flow fairness counts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+commit 737d4d91d35b5f7fa5bb442651472277318b0bfd upstream.
+
+Even though we fixed a logic error in the commit cited below, syzbot
+still managed to trigger an underflow of the per-host bulk flow
+counters, leading to an out of bounds memory access.
+
+To avoid any such logic errors causing out of bounds memory accesses,
+this commit factors out all accesses to the per-host bulk flow counters
+to a series of helpers that perform bounds-checking before any
+increments and decrements. This also has the benefit of improving
+readability by moving the conditional checks for the flow mode into
+these helpers, instead of having them spread out throughout the
+code (which was the cause of the original logic error).
+
+As part of this change, the flow quantum calculation is consolidated
+into a helper function, which means that the dithering applied to the
+ost load scaling is now applied both in the DRR rotation and when a
+sparse flow's quantum is first initiated. The only user-visible effect
+of this is that the maximum packet size that can be sent while a flow
+stays sparse will now vary with +/- one byte in some cases. This should
+not make a noticeable difference in practice, and thus it's not worth
+complicating the code to preserve the old behaviour.
+
+Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
+Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Dave Taht <dave.taht@gmail.com>
+Link: https://patch.msgid.link/20250107120105.70685-1-toke@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[Hagar: needed contextual fixes due to missing commit 7e3cf0843fe5]
+Signed-off-by: Hagar Hemdan <hagarhem@amazon.com>
+Reviewed-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cake.c | 140 +++++++++++++++++++++++++++------------------------
+ 1 file changed, 75 insertions(+), 65 deletions(-)
+
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -643,6 +643,63 @@ static bool cake_ddst(int flow_mode)
+ return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+ }
+
++static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
++ struct cake_flow *flow,
++ int flow_mode)
++{
++ if (likely(cake_dsrc(flow_mode) &&
++ q->hosts[flow->srchost].srchost_bulk_flow_count))
++ q->hosts[flow->srchost].srchost_bulk_flow_count--;
++}
++
++static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
++ struct cake_flow *flow,
++ int flow_mode)
++{
++ if (likely(cake_dsrc(flow_mode) &&
++ q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
++ q->hosts[flow->srchost].srchost_bulk_flow_count++;
++}
++
++static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
++ struct cake_flow *flow,
++ int flow_mode)
++{
++ if (likely(cake_ddst(flow_mode) &&
++ q->hosts[flow->dsthost].dsthost_bulk_flow_count))
++ q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
++}
++
++static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
++ struct cake_flow *flow,
++ int flow_mode)
++{
++ if (likely(cake_ddst(flow_mode) &&
++ q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
++ q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
++}
++
++static u16 cake_get_flow_quantum(struct cake_tin_data *q,
++ struct cake_flow *flow,
++ int flow_mode)
++{
++ u16 host_load = 1;
++
++ if (cake_dsrc(flow_mode))
++ host_load = max(host_load,
++ q->hosts[flow->srchost].srchost_bulk_flow_count);
++
++ if (cake_ddst(flow_mode))
++ host_load = max(host_load,
++ q->hosts[flow->dsthost].dsthost_bulk_flow_count);
++
++ /* The shifted prandom_u32() is a way to apply dithering to avoid
++ * accumulating roundoff errors
++ */
++ return (q->flow_quantum * quantum_div[host_load] +
++ (prandom_u32() >> 16)) >> 16;
++}
++
+ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ int flow_mode, u16 flow_override, u16 host_override)
+ {
+@@ -789,10 +846,8 @@ skip_hash:
+ allocate_dst = cake_ddst(flow_mode);
+
+ if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+- if (allocate_src)
+- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+- if (allocate_dst)
+- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++ cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
++ cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ }
+ found:
+ /* reserve queue for future packets in same flow */
+@@ -817,9 +872,10 @@ found:
+ q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+ found_src:
+ srchost_idx = outer_hash + k;
+- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+- q->hosts[srchost_idx].srchost_bulk_flow_count++;
+ q->flows[reduced_hash].srchost = srchost_idx;
++
++ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++ cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+ }
+
+ if (allocate_dst) {
+@@ -840,9 +896,10 @@ found_src:
+ q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+ found_dst:
+ dsthost_idx = outer_hash + k;
+- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+- q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
+ q->flows[reduced_hash].dsthost = dsthost_idx;
++
++ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++ cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+ }
+ }
+
+@@ -1855,10 +1912,6 @@ static s32 cake_enqueue(struct sk_buff *
+
+ /* flowchain */
+ if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+- struct cake_host *srchost = &b->hosts[flow->srchost];
+- struct cake_host *dsthost = &b->hosts[flow->dsthost];
+- u16 host_load = 1;
+-
+ if (!flow->set) {
+ list_add_tail(&flow->flowchain, &b->new_flows);
+ } else {
+@@ -1868,18 +1921,8 @@ static s32 cake_enqueue(struct sk_buff *
+ flow->set = CAKE_SET_SPARSE;
+ b->sparse_flow_count++;
+
+- if (cake_dsrc(q->flow_mode))
+- host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+- if (cake_ddst(q->flow_mode))
+- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+- flow->deficit = (b->flow_quantum *
+- quantum_div[host_load]) >> 16;
++ flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
+ } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+- struct cake_host *srchost = &b->hosts[flow->srchost];
+- struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-
+ /* this flow was empty, accounted as a sparse flow, but actually
+ * in the bulk rotation.
+ */
+@@ -1887,12 +1930,8 @@ static s32 cake_enqueue(struct sk_buff *
+ b->sparse_flow_count--;
+ b->bulk_flow_count++;
+
+- if (cake_dsrc(q->flow_mode))
+- srchost->srchost_bulk_flow_count++;
+-
+- if (cake_ddst(q->flow_mode))
+- dsthost->dsthost_bulk_flow_count++;
+-
++ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ }
+
+ if (q->buffer_used > q->buffer_max_used)
+@@ -1949,13 +1988,11 @@ static struct sk_buff *cake_dequeue(stru
+ {
+ struct cake_sched_data *q = qdisc_priv(sch);
+ struct cake_tin_data *b = &q->tins[q->cur_tin];
+- struct cake_host *srchost, *dsthost;
+ ktime_t now = ktime_get();
+ struct cake_flow *flow;
+ struct list_head *head;
+ bool first_flow = true;
+ struct sk_buff *skb;
+- u16 host_load;
+ u64 delay;
+ u32 len;
+
+@@ -2055,11 +2092,6 @@ retry:
+ q->cur_flow = flow - b->flows;
+ first_flow = false;
+
+- /* triple isolation (modified DRR++) */
+- srchost = &b->hosts[flow->srchost];
+- dsthost = &b->hosts[flow->dsthost];
+- host_load = 1;
+-
+ /* flow isolation (DRR++) */
+ if (flow->deficit <= 0) {
+ /* Keep all flows with deficits out of the sparse and decaying
+@@ -2071,11 +2103,8 @@ retry:
+ b->sparse_flow_count--;
+ b->bulk_flow_count++;
+
+- if (cake_dsrc(q->flow_mode))
+- srchost->srchost_bulk_flow_count++;
+-
+- if (cake_ddst(q->flow_mode))
+- dsthost->dsthost_bulk_flow_count++;
++ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+
+ flow->set = CAKE_SET_BULK;
+ } else {
+@@ -2087,19 +2116,7 @@ retry:
+ }
+ }
+
+- if (cake_dsrc(q->flow_mode))
+- host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+- if (cake_ddst(q->flow_mode))
+- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+- WARN_ON(host_load > CAKE_QUEUES);
+-
+- /* The shifted prandom_u32() is a way to apply dithering to
+- * avoid accumulating roundoff errors
+- */
+- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+- (prandom_u32() >> 16)) >> 16;
++ flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
+ list_move_tail(&flow->flowchain, &b->old_flows);
+
+ goto retry;
+@@ -2123,11 +2140,8 @@ retry:
+ if (flow->set == CAKE_SET_BULK) {
+ b->bulk_flow_count--;
+
+- if (cake_dsrc(q->flow_mode))
+- srchost->srchost_bulk_flow_count--;
+-
+- if (cake_ddst(q->flow_mode))
+- dsthost->dsthost_bulk_flow_count--;
++ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+
+ b->decaying_flow_count++;
+ } else if (flow->set == CAKE_SET_SPARSE ||
+@@ -2145,12 +2159,8 @@ retry:
+ else if (flow->set == CAKE_SET_BULK) {
+ b->bulk_flow_count--;
+
+- if (cake_dsrc(q->flow_mode))
+- srchost->srchost_bulk_flow_count--;
+-
+- if (cake_ddst(q->flow_mode))
+- dsthost->dsthost_bulk_flow_count--;
+-
++ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+ } else
+ b->decaying_flow_count--;
+
slimbus-messaging-free-transaction-id-in-delayed-interrupt-scenario.patch
eeprom-digsy_mtc-make-gpio-lookup-table-match-the-device.patch
mtd-rawnand-cadence-fix-unchecked-dereference.patch
+spi-mxs-fix-chipselect-glitch.patch
+nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch
+nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch
+nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch
+media-uvcvideo-only-save-async-fh-if-success.patch
+media-uvcvideo-remove-dangling-pointers.patch
+revert-media-uvcvideo-require-entities-to-have-a-non-zero-unique-id.patch
+bpf-vsock-invoke-proto-close-on-close.patch
+vsock-keep-the-binding-until-socket-destruction.patch
+vsock-orphan-socket-after-transport-release.patch
+sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch
--- /dev/null
+From 269e31aecdd0b70f53a05def79480f15cbcc0fd6 Mon Sep 17 00:00:00 2001
+From: Ralf Schlatterbeck <rsc@runtux.com>
+Date: Fri, 2 Feb 2024 12:53:30 +0100
+Subject: spi-mxs: Fix chipselect glitch
+
+From: Ralf Schlatterbeck <rsc@runtux.com>
+
+commit 269e31aecdd0b70f53a05def79480f15cbcc0fd6 upstream.
+
+There was a change in the mxs-dma engine that uses a new custom flag.
+The change was not applied to the mxs spi driver.
+This results in chipselect being deasserted too early.
+This fixes the chipselect problem by using the new flag in the mxs-spi
+driver.
+
+Fixes: ceeeb99cd821 ("dmaengine: mxs: rename custom flag")
+Signed-off-by: Ralf Schlatterbeck <rsc@runtux.com>
+Link: https://msgid.link/r/20240202115330.wxkbfmvd76sy3a6a@runtux.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: Stefan Wahren <wahrenst@gmx.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-mxs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-mxs.c
++++ b/drivers/spi/spi-mxs.c
+@@ -39,6 +39,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/mxs-spi.h>
+ #include <trace/events/spi.h>
++#include <linux/dma/mxs-dma.h>
+
+ #define DRIVER_NAME "mxs-spi"
+
+@@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_s
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++ DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
+
+ if (!desc) {
+ dev_err(ssp->dev,
--- /dev/null
+From fcdd2242c0231032fc84e1404315c245ae56322a Mon Sep 17 00:00:00 2001
+From: Michal Luczaj <mhal@rbox.co>
+Date: Tue, 28 Jan 2025 14:15:27 +0100
+Subject: vsock: Keep the binding until socket destruction
+
+From: Michal Luczaj <mhal@rbox.co>
+
+commit fcdd2242c0231032fc84e1404315c245ae56322a upstream.
+
+Preserve sockets bindings; this includes both resulting from an explicit
+bind() and those implicitly bound through autobind during connect().
+
+Prevents socket unbinding during a transport reassignment, which fixes a
+use-after-free:
+
+ 1. vsock_create() (refcnt=1) calls vsock_insert_unbound() (refcnt=2)
+ 2. transport->release() calls vsock_remove_bound() without checking if
+ sk was bound and moved to bound list (refcnt=1)
+ 3. vsock_bind() assumes sk is in unbound list and before
+ __vsock_insert_bound(vsock_bound_sockets()) calls
+ __vsock_remove_bound() which does:
+ list_del_init(&vsk->bound_table); // nop
+ sock_put(&vsk->sk); // refcnt=0
+
+BUG: KASAN: slab-use-after-free in __vsock_bind+0x62e/0x730
+Read of size 4 at addr ffff88816b46a74c by task a.out/2057
+ dump_stack_lvl+0x68/0x90
+ print_report+0x174/0x4f6
+ kasan_report+0xb9/0x190
+ __vsock_bind+0x62e/0x730
+ vsock_bind+0x97/0xe0
+ __sys_bind+0x154/0x1f0
+ __x64_sys_bind+0x6e/0xb0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Allocated by task 2057:
+ kasan_save_stack+0x1e/0x40
+ kasan_save_track+0x10/0x30
+ __kasan_slab_alloc+0x85/0x90
+ kmem_cache_alloc_noprof+0x131/0x450
+ sk_prot_alloc+0x5b/0x220
+ sk_alloc+0x2c/0x870
+ __vsock_create.constprop.0+0x2e/0xb60
+ vsock_create+0xe4/0x420
+ __sock_create+0x241/0x650
+ __sys_socket+0xf2/0x1a0
+ __x64_sys_socket+0x6e/0xb0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Freed by task 2057:
+ kasan_save_stack+0x1e/0x40
+ kasan_save_track+0x10/0x30
+ kasan_save_free_info+0x37/0x60
+ __kasan_slab_free+0x4b/0x70
+ kmem_cache_free+0x1a1/0x590
+ __sk_destruct+0x388/0x5a0
+ __vsock_bind+0x5e1/0x730
+ vsock_bind+0x97/0xe0
+ __sys_bind+0x154/0x1f0
+ __x64_sys_bind+0x6e/0xb0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+refcount_t: addition on 0; use-after-free.
+WARNING: CPU: 7 PID: 2057 at lib/refcount.c:25 refcount_warn_saturate+0xce/0x150
+RIP: 0010:refcount_warn_saturate+0xce/0x150
+ __vsock_bind+0x66d/0x730
+ vsock_bind+0x97/0xe0
+ __sys_bind+0x154/0x1f0
+ __x64_sys_bind+0x6e/0xb0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+refcount_t: underflow; use-after-free.
+WARNING: CPU: 7 PID: 2057 at lib/refcount.c:28 refcount_warn_saturate+0xee/0x150
+RIP: 0010:refcount_warn_saturate+0xee/0x150
+ vsock_remove_bound+0x187/0x1e0
+ __vsock_release+0x383/0x4a0
+ vsock_release+0x90/0x120
+ __sock_release+0xa3/0x250
+ sock_close+0x14/0x20
+ __fput+0x359/0xa80
+ task_work_run+0x107/0x1d0
+ do_exit+0x847/0x2560
+ do_group_exit+0xb8/0x250
+ __x64_sys_exit_group+0x3a/0x50
+ x64_sys_call+0xfec/0x14f0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fixes: c0cfa2d8a788 ("vsock: add multi-transports support")
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Link: https://patch.msgid.link/20250128-vsock-transport-vs-autobind-v3-1-1cf57065b770@rbox.co
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Luigi Leonardi <leonardi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -330,7 +330,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_s
+
+ void vsock_remove_sock(struct vsock_sock *vsk)
+ {
+- vsock_remove_bound(vsk);
++ /* Transport reassignment must not remove the binding. */
++ if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
++ vsock_remove_bound(vsk);
++
+ vsock_remove_connected(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_sock);
+@@ -782,12 +785,13 @@ static void __vsock_release(struct sock
+ */
+ lock_sock_nested(sk, level);
+
++ sock_orphan(sk);
++
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sk->sk_type == SOCK_STREAM)
+ vsock_remove_sock(vsk);
+
+- sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ skb_queue_purge(&sk->sk_receive_queue);
--- /dev/null
+From 78dafe1cf3afa02ed71084b350713b07e72a18fb Mon Sep 17 00:00:00 2001
+From: Michal Luczaj <mhal@rbox.co>
+Date: Mon, 10 Feb 2025 13:15:00 +0100
+Subject: vsock: Orphan socket after transport release
+
+From: Michal Luczaj <mhal@rbox.co>
+
+commit 78dafe1cf3afa02ed71084b350713b07e72a18fb upstream.
+
+During socket release, sock_orphan() is called without considering that it
+sets sk->sk_wq to NULL. Later, if SO_LINGER is enabled, this leads to a
+null pointer dereferenced in virtio_transport_wait_close().
+
+Orphan the socket only after transport release.
+
+Partially reverts the 'Fixes:' commit.
+
+KASAN: null-ptr-deref in range [0x0000000000000018-0x000000000000001f]
+ lock_acquire+0x19e/0x500
+ _raw_spin_lock_irqsave+0x47/0x70
+ add_wait_queue+0x46/0x230
+ virtio_transport_release+0x4e7/0x7f0
+ __vsock_release+0xfd/0x490
+ vsock_release+0x90/0x120
+ __sock_release+0xa3/0x250
+ sock_close+0x14/0x20
+ __fput+0x35e/0xa90
+ __x64_sys_close+0x78/0xd0
+ do_syscall_64+0x93/0x1b0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Reported-by: syzbot+9d55b199192a4be7d02c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=9d55b199192a4be7d02c
+Fixes: fcdd2242c023 ("vsock: Keep the binding until socket destruction")
+Tested-by: Luigi Leonardi <leonardi@redhat.com>
+Reviewed-by: Luigi Leonardi <leonardi@redhat.com>
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Link: https://patch.msgid.link/20250210-vsock-linger-nullderef-v3-1-ef6244d02b54@rbox.co
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Luigi Leonardi <leonardi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -785,13 +785,19 @@ static void __vsock_release(struct sock
+ */
+ lock_sock_nested(sk, level);
+
+- sock_orphan(sk);
++ /* Indicate to vsock_remove_sock() that the socket is being released and
++ * can be removed from the bound_table. Unlike transport reassignment
++ * case, where the socket must remain bound despite vsock_remove_sock()
++ * being called from the transport release() callback.
++ */
++ sock_set_flag(sk, SOCK_DEAD);
+
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sk->sk_type == SOCK_STREAM)
+ vsock_remove_sock(vsk);
+
++ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ skb_queue_purge(&sk->sk_receive_queue);