]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Mar 2025 16:34:04 +0000 (17:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Mar 2025 16:34:04 +0000 (17:34 +0100)
added patches:
media-uvcvideo-only-save-async-fh-if-success.patch
media-uvcvideo-remove-dangling-pointers.patch
nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch
nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch
nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch
sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch
spi-mxs-fix-chipselect-glitch.patch

queue-5.4/media-uvcvideo-only-save-async-fh-if-success.patch [new file with mode: 0644]
queue-5.4/media-uvcvideo-remove-dangling-pointers.patch [new file with mode: 0644]
queue-5.4/nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch [new file with mode: 0644]
queue-5.4/nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch [new file with mode: 0644]
queue-5.4/nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch [new file with mode: 0644]
queue-5.4/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/spi-mxs-fix-chipselect-glitch.patch [new file with mode: 0644]

diff --git a/queue-5.4/media-uvcvideo-only-save-async-fh-if-success.patch b/queue-5.4/media-uvcvideo-only-save-async-fh-if-success.patch
new file mode 100644 (file)
index 0000000..f18bc4d
--- /dev/null
@@ -0,0 +1,85 @@
+From d9fecd096f67a4469536e040a8a10bbfb665918b Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda <ribalda@chromium.org>
+Date: Tue, 3 Dec 2024 21:20:08 +0000
+Subject: media: uvcvideo: Only save async fh if success
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+commit d9fecd096f67a4469536e040a8a10bbfb665918b upstream.
+
+Now we keep a reference to the active fh for any call to uvc_ctrl_set,
+regardless if it is an actual set or if it is a just a try or if the
+device refused the operation.
+
+We should only keep the file handle if the device actually accepted
+applying the operation.
+
+Cc: stable@vger.kernel.org
+Fixes: e5225c820c05 ("media: uvcvideo: Send a control event when a Control Change interrupt arrives")
+Suggested-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Link: https://lore.kernel.org/r/20241203-uvc-fix-async-v6-1-26c867231118@chromium.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+
+---
+ drivers/media/usb/uvc/uvc_ctrl.c |   16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1528,7 +1528,9 @@ int uvc_ctrl_begin(struct uvc_video_chai
+ }
+ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
+-      struct uvc_entity *entity, int rollback)
++                                struct uvc_fh *handle,
++                                struct uvc_entity *entity,
++                                int rollback)
+ {
+       struct uvc_control *ctrl;
+       unsigned int i;
+@@ -1572,6 +1574,10 @@ static int uvc_ctrl_commit_entity(struct
+               if (ret < 0)
+                       return ret;
++
++              if (!rollback && handle &&
++                  ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
++                      ctrl->handle = handle;
+       }
+       return 0;
+@@ -1587,7 +1593,8 @@ int __uvc_ctrl_commit(struct uvc_fh *han
+       /* Find the control. */
+       list_for_each_entry(entity, &chain->entities, chain) {
+-              ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
++              ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
++                                           rollback);
+               if (ret < 0)
+                       goto done;
+       }
+@@ -1711,9 +1718,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+       mapping->set(mapping, value,
+               uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+-      if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+-              ctrl->handle = handle;
+-
+       ctrl->dirty = 1;
+       ctrl->modified = 1;
+       return 0;
+@@ -2042,7 +2046,7 @@ int uvc_ctrl_restore_values(struct uvc_d
+                       ctrl->dirty = 1;
+               }
+-              ret = uvc_ctrl_commit_entity(dev, entity, 0);
++              ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0);
+               if (ret < 0)
+                       return ret;
+       }
diff --git a/queue-5.4/media-uvcvideo-remove-dangling-pointers.patch b/queue-5.4/media-uvcvideo-remove-dangling-pointers.patch
new file mode 100644 (file)
index 0000000..b7bbfa6
--- /dev/null
@@ -0,0 +1,174 @@
+From 221cd51efe4565501a3dbf04cc011b537dcce7fb Mon Sep 17 00:00:00 2001
+From: Ricardo Ribalda <ribalda@chromium.org>
+Date: Tue, 3 Dec 2024 21:20:10 +0000
+Subject: media: uvcvideo: Remove dangling pointers
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+commit 221cd51efe4565501a3dbf04cc011b537dcce7fb upstream.
+
+When an async control is written, we copy a pointer to the file handle
+that started the operation. That pointer will be used when the device is
+done. Which could be anytime in the future.
+
+If the user closes that file descriptor, its structure will be freed,
+and there will be one dangling pointer per pending async control, that
+the driver will try to use.
+
+Clean all the dangling pointers during release().
+
+To avoid adding a performance penalty in the most common case (no async
+operation), a counter has been introduced with some logic to make sure
+that it is properly handled.
+
+Cc: stable@vger.kernel.org
+Fixes: e5225c820c05 ("media: uvcvideo: Send a control event when a Control Change interrupt arrives")
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Link: https://lore.kernel.org/r/20241203-uvc-fix-async-v6-3-26c867231118@chromium.org
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/uvc/uvc_ctrl.c |   63 +++++++++++++++++++++++++++++++++++++--
+ drivers/media/usb/uvc/uvc_v4l2.c |    2 +
+ drivers/media/usb/uvc/uvcvideo.h |    9 ++++-
+ 3 files changed, 71 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1306,6 +1306,40 @@ static void uvc_ctrl_send_slave_event(st
+       uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
+ }
++static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
++                              struct uvc_fh *new_handle)
++{
++      lockdep_assert_held(&handle->chain->ctrl_mutex);
++
++      if (new_handle) {
++              if (ctrl->handle)
++                      dev_warn_ratelimited(&handle->stream->dev->udev->dev,
++                                           "UVC non compliance: Setting an async control with a pending operation.");
++
++              if (new_handle == ctrl->handle)
++                      return;
++
++              if (ctrl->handle) {
++                      WARN_ON(!ctrl->handle->pending_async_ctrls);
++                      if (ctrl->handle->pending_async_ctrls)
++                              ctrl->handle->pending_async_ctrls--;
++              }
++
++              ctrl->handle = new_handle;
++              handle->pending_async_ctrls++;
++              return;
++      }
++
++      /* Cannot clear the handle for a control not owned by us.*/
++      if (WARN_ON(ctrl->handle != handle))
++              return;
++
++      ctrl->handle = NULL;
++      if (WARN_ON(!handle->pending_async_ctrls))
++              return;
++      handle->pending_async_ctrls--;
++}
++
+ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
+                          struct uvc_control *ctrl, const u8 *data)
+ {
+@@ -1316,7 +1350,8 @@ void uvc_ctrl_status_event(struct uvc_vi
+       mutex_lock(&chain->ctrl_mutex);
+       handle = ctrl->handle;
+-      ctrl->handle = NULL;
++      if (handle)
++              uvc_ctrl_set_handle(handle, ctrl, NULL);
+       list_for_each_entry(mapping, &ctrl->info.mappings, list) {
+               s32 value = __uvc_ctrl_get_value(mapping, data);
+@@ -1577,7 +1612,7 @@ static int uvc_ctrl_commit_entity(struct
+               if (!rollback && handle &&
+                   ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+-                      ctrl->handle = handle;
++                      uvc_ctrl_set_handle(handle, ctrl, handle);
+       }
+       return 0;
+@@ -2378,6 +2413,30 @@ int uvc_ctrl_init_device(struct uvc_devi
+       return 0;
+ }
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
++{
++      struct uvc_entity *entity;
++
++      mutex_lock(&handle->chain->ctrl_mutex);
++
++      if (!handle->pending_async_ctrls) {
++              mutex_unlock(&handle->chain->ctrl_mutex);
++              return;
++      }
++
++      list_for_each_entry(entity, &handle->chain->dev->entities, list) {
++              unsigned int i;
++              for (i = 0; i < entity->ncontrols; ++i) {
++                      if (entity->controls[i].handle != handle)
++                              continue;
++                      uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
++              }
++      }
++
++      WARN_ON(handle->pending_async_ctrls);
++      mutex_unlock(&handle->chain->ctrl_mutex);
++}
++
+ /*
+  * Cleanup device controls.
+  */
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -589,6 +589,8 @@ static int uvc_v4l2_release(struct file
+       uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_release\n");
++      uvc_ctrl_cleanup_fh(handle);
++
+       /* Only free resources if this is a privileged handle. */
+       if (uvc_has_privileges(handle))
+               uvc_queue_release(&stream->queue);
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -447,7 +447,11 @@ struct uvc_video_chain {
+       struct uvc_entity *processing;          /* Processing unit */
+       struct uvc_entity *selector;            /* Selector unit */
+-      struct mutex ctrl_mutex;                /* Protects ctrl.info */
++      struct mutex ctrl_mutex;                /*
++                                               * Protects ctrl.info,
++                                               * ctrl.handle and
++                                               * uvc_fh.pending_async_ctrls
++                                               */
+       struct v4l2_prio_state prio;            /* V4L2 priority state */
+       u32 caps;                               /* V4L2 chain-wide caps */
+@@ -693,6 +697,7 @@ struct uvc_fh {
+       struct uvc_video_chain *chain;
+       struct uvc_streaming *stream;
+       enum uvc_handle_state state;
++      unsigned int pending_async_ctrls;
+ };
+ struct uvc_driver {
+@@ -865,6 +870,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+                     struct uvc_xu_control_query *xqry);
++void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
++
+ /* Utility functions */
+ void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
+                          unsigned int n_terms, unsigned int threshold);
diff --git a/queue-5.4/nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch b/queue-5.4/nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch
new file mode 100644 (file)
index 0000000..4e74116
--- /dev/null
@@ -0,0 +1,52 @@
+From 8cf57c6df818f58fdad16a909506be213623a88e Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Mon, 27 Nov 2023 23:30:21 +0900
+Subject: nilfs2: eliminate staggered calls to kunmap in nilfs_rename
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 8cf57c6df818f58fdad16a909506be213623a88e upstream.
+
+In nilfs_rename(), calls to nilfs_put_page() to release pages obtained
+with nilfs_find_entry() or nilfs_dotdot() are alternated in the normal
+path.
+
+When replacing the kernel memory mapping method from kmap to
+kmap_local_{page,folio}, this violates the constraint on the calling order
+of kunmap_local().
+
+Swap the order of nilfs_put_page calls where the kmap sections of multiple
+pages overlap so that they are nested, allowing direct replacement of
+nilfs_put_page() -> unmap_and_put_page().
+
+Without this reordering, that replacement will cause a kernel WARNING in
+kunmap_local_indexed() on architectures with high memory mapping.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-3-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/namei.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -428,13 +428,14 @@ static int nilfs_rename(struct inode *ol
+       old_inode->i_ctime = current_time(old_inode);
+       nilfs_delete_entry(old_de, old_page);
+-      nilfs_put_page(old_page);
+       if (dir_de) {
+               nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+               nilfs_put_page(dir_page);
+               drop_nlink(old_dir);
+       }
++      nilfs_put_page(old_page);
++
+       nilfs_mark_inode_dirty(old_dir);
+       nilfs_mark_inode_dirty(old_inode);
diff --git a/queue-5.4/nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch b/queue-5.4/nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch
new file mode 100644 (file)
index 0000000..696e476
--- /dev/null
@@ -0,0 +1,158 @@
+From ee70999a988b8abc3490609142f50ebaa8344432 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Sat, 11 Jan 2025 23:26:35 +0900
+Subject: nilfs2: handle errors that nilfs_prepare_chunk() may return
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit ee70999a988b8abc3490609142f50ebaa8344432 upstream.
+
+Patch series "nilfs2: fix issues with rename operations".
+
+This series fixes BUG_ON check failures reported by syzbot around rename
+operations, and a minor behavioral issue where the mtime of a child
+directory changes when it is renamed instead of moved.
+
+
+This patch (of 2):
+
+The directory manipulation routines nilfs_set_link() and
+nilfs_delete_entry() rewrite the directory entry in the folio/page
+previously read by nilfs_find_entry(), so error handling is omitted on the
+assumption that nilfs_prepare_chunk(), which prepares the buffer for
+rewriting, will always succeed for these.  And if an error is returned, it
+triggers the legacy BUG_ON() checks in each routine.
+
+This assumption is wrong, as proven by syzbot: the buffer layer called by
+nilfs_prepare_chunk() may call nilfs_get_block() if necessary, which may
+fail due to metadata corruption or other reasons.  This has been there all
+along, but improved sanity checks and error handling may have made it more
+reproducible in fuzzing tests.
+
+Fix this issue by adding missing error paths in nilfs_set_link(),
+nilfs_delete_entry(), and their caller nilfs_rename().
+
+Link: https://lkml.kernel.org/r/20250111143518.7901-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20250111143518.7901-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+32c3706ebf5d95046ea1@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=32c3706ebf5d95046ea1
+Reported-by: syzbot+1097e95f134f37d9395c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=1097e95f134f37d9395c
+Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations")
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c   |   13 ++++++++++---
+ fs/nilfs2/namei.c |   29 +++++++++++++++--------------
+ fs/nilfs2/nilfs.h |    4 ++--
+ 3 files changed, 27 insertions(+), 19 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -444,7 +444,7 @@ int nilfs_inode_by_name(struct inode *di
+       return 0;
+ }
+-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+                   struct page *page, struct inode *inode)
+ {
+       unsigned int from = (char *)de - (char *)page_address(page);
+@@ -454,11 +454,15 @@ void nilfs_set_link(struct inode *dir, s
+       lock_page(page);
+       err = nilfs_prepare_chunk(page, from, to);
+-      BUG_ON(err);
++      if (unlikely(err)) {
++              unlock_page(page);
++              return err;
++      }
+       de->inode = cpu_to_le64(inode->i_ino);
+       nilfs_set_de_type(de, inode);
+       nilfs_commit_chunk(page, mapping, from, to);
+       dir->i_mtime = dir->i_ctime = current_time(dir);
++      return 0;
+ }
+ /*
+@@ -590,7 +594,10 @@ int nilfs_delete_entry(struct nilfs_dir_
+               from = (char *)pde - (char *)page_address(page);
+       lock_page(page);
+       err = nilfs_prepare_chunk(page, from, to);
+-      BUG_ON(err);
++      if (unlikely(err)) {
++              unlock_page(page);
++              goto out;
++      }
+       if (pde)
+               pde->rec_len = nilfs_rec_len_to_disk(to - from);
+       dir->inode = 0;
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -403,8 +403,10 @@ static int nilfs_rename(struct inode *ol
+                       err = PTR_ERR(new_de);
+                       goto out_dir;
+               }
+-              nilfs_set_link(new_dir, new_de, new_page, old_inode);
++              err = nilfs_set_link(new_dir, new_de, new_page, old_inode);
+               nilfs_put_page(new_page);
++              if (unlikely(err))
++                      goto out_dir;
+               nilfs_mark_inode_dirty(new_dir);
+               new_inode->i_ctime = current_time(new_inode);
+               if (dir_de)
+@@ -427,28 +429,27 @@ static int nilfs_rename(struct inode *ol
+        */
+       old_inode->i_ctime = current_time(old_inode);
+-      nilfs_delete_entry(old_de, old_page);
+-
+-      if (dir_de) {
+-              nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+-              nilfs_put_page(dir_page);
+-              drop_nlink(old_dir);
++      err = nilfs_delete_entry(old_de, old_page);
++      if (likely(!err)) {
++              if (dir_de) {
++                      err = nilfs_set_link(old_inode, dir_de, dir_page,
++                                           new_dir);
++                      drop_nlink(old_dir);
++              }
++              nilfs_mark_inode_dirty(old_dir);
+       }
+-      nilfs_put_page(old_page);
+-
+-      nilfs_mark_inode_dirty(old_dir);
+       nilfs_mark_inode_dirty(old_inode);
+-      err = nilfs_transaction_commit(old_dir->i_sb);
+-      return err;
+-
+ out_dir:
+       if (dir_de)
+               nilfs_put_page(dir_page);
+ out_old:
+       nilfs_put_page(old_page);
+ out:
+-      nilfs_transaction_abort(old_dir->i_sb);
++      if (likely(!err))
++              err = nilfs_transaction_commit(old_dir->i_sb);
++      else
++              nilfs_transaction_abort(old_dir->i_sb);
+       return err;
+ }
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -240,8 +240,8 @@ nilfs_find_entry(struct inode *, const s
+ extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
+ extern int nilfs_empty_dir(struct inode *);
+ extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
+-extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+-                         struct page *, struct inode *);
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++                 struct page *page, struct inode *inode);
+ static inline void nilfs_put_page(struct page *page)
+ {
diff --git a/queue-5.4/nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch b/queue-5.4/nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch
new file mode 100644 (file)
index 0000000..e21b792
--- /dev/null
@@ -0,0 +1,174 @@
+From 584db20c181f5e28c0386d7987406ace7fbd3e49 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Mon, 27 Nov 2023 23:30:20 +0900
+Subject: nilfs2: move page release outside of nilfs_delete_entry and nilfs_set_link
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 584db20c181f5e28c0386d7987406ace7fbd3e49 upstream.
+
+Patch series "nilfs2: Folio conversions for directory paths".
+
+This series applies page->folio conversions to nilfs2 directory
+operations.  This reduces hidden compound_head() calls and also converts
+deprecated kmap calls to kmap_local in the directory code.
+
+Although nilfs2 does not yet support large folios, Matthew has done his
+best here to include support for large folios, which will be needed for
+devices with large block sizes.
+
+This series corresponds to the second half of the original post [1], but
+with two complementary patches inserted at the beginning and some
+adjustments, to prevent a kmap_local constraint violation found during
+testing with highmem mapping.
+
+[1] https://lkml.kernel.org/r/20231106173903.1734114-1-willy@infradead.org
+
+I have reviewed all changes and tested this for regular and small block
+sizes, both on machines with and without highmem mapping.  No issues
+found.
+
+
+This patch (of 17):
+
+In a few directory operations, the call to nilfs_put_page() for a page
+obtained using nilfs_find_entry() or nilfs_dotdot() is hidden in
+nilfs_set_link() and nilfs_delete_entry(), making it difficult to track
+page release and preventing change of its call position.
+
+By moving nilfs_put_page() out of these functions, this makes the page
+get/put correspondence clearer and makes it easier to swap
+nilfs_put_page() calls (and kunmap calls within them) when modifying
+multiple directory entries simultaneously in nilfs_rename().
+
+Also, update comments for nilfs_set_link() and nilfs_delete_entry() to
+reflect changes in their behavior.
+
+To make nilfs_put_page() visible from namei.c, this moves its definition
+to nilfs.h and replaces existing equivalents to use it, but the exposure
+of that definition is temporary and will be removed on a later kmap ->
+kmap_local conversion.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20231127143036.2425-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c   |   11 +----------
+ fs/nilfs2/namei.c |   13 +++++++------
+ fs/nilfs2/nilfs.h |    6 ++++++
+ 3 files changed, 14 insertions(+), 16 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -64,12 +64,6 @@ static inline unsigned int nilfs_chunk_s
+       return inode->i_sb->s_blocksize;
+ }
+-static inline void nilfs_put_page(struct page *page)
+-{
+-      kunmap(page);
+-      put_page(page);
+-}
+-
+ /*
+  * Return the offset into page `page_nr' of the last valid
+  * byte in that page, plus one.
+@@ -450,7 +444,6 @@ int nilfs_inode_by_name(struct inode *di
+       return 0;
+ }
+-/* Releases the page */
+ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+                   struct page *page, struct inode *inode)
+ {
+@@ -465,7 +458,6 @@ void nilfs_set_link(struct inode *dir, s
+       de->inode = cpu_to_le64(inode->i_ino);
+       nilfs_set_de_type(de, inode);
+       nilfs_commit_chunk(page, mapping, from, to);
+-      nilfs_put_page(page);
+       dir->i_mtime = dir->i_ctime = current_time(dir);
+ }
+@@ -569,7 +561,7 @@ out_unlock:
+ /*
+  * nilfs_delete_entry deletes a directory entry by merging it with the
+- * previous entry. Page is up-to-date. Releases the page.
++ * previous entry. Page is up-to-date.
+  */
+ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
+ {
+@@ -605,7 +597,6 @@ int nilfs_delete_entry(struct nilfs_dir_
+       nilfs_commit_chunk(page, mapping, from, to);
+       inode->i_ctime = inode->i_mtime = current_time(inode);
+ out:
+-      nilfs_put_page(page);
+       return err;
+ }
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -295,6 +295,7 @@ static int nilfs_do_unlink(struct inode
+               set_nlink(inode, 1);
+       }
+       err = nilfs_delete_entry(de, page);
++      nilfs_put_page(page);
+       if (err)
+               goto out;
+@@ -403,6 +404,7 @@ static int nilfs_rename(struct inode *ol
+                       goto out_dir;
+               }
+               nilfs_set_link(new_dir, new_de, new_page, old_inode);
++              nilfs_put_page(new_page);
+               nilfs_mark_inode_dirty(new_dir);
+               new_inode->i_ctime = current_time(new_inode);
+               if (dir_de)
+@@ -426,9 +428,11 @@ static int nilfs_rename(struct inode *ol
+       old_inode->i_ctime = current_time(old_inode);
+       nilfs_delete_entry(old_de, old_page);
++      nilfs_put_page(old_page);
+       if (dir_de) {
+               nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
++              nilfs_put_page(dir_page);
+               drop_nlink(old_dir);
+       }
+       nilfs_mark_inode_dirty(old_dir);
+@@ -438,13 +442,10 @@ static int nilfs_rename(struct inode *ol
+       return err;
+ out_dir:
+-      if (dir_de) {
+-              kunmap(dir_page);
+-              put_page(dir_page);
+-      }
++      if (dir_de)
++              nilfs_put_page(dir_page);
+ out_old:
+-      kunmap(old_page);
+-      put_page(old_page);
++      nilfs_put_page(old_page);
+ out:
+       nilfs_transaction_abort(old_dir->i_sb);
+       return err;
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -243,6 +243,12 @@ extern struct nilfs_dir_entry *nilfs_dot
+ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+                          struct page *, struct inode *);
++static inline void nilfs_put_page(struct page *page)
++{
++      kunmap(page);
++      put_page(page);
++}
++
+ /* file.c */
+ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
diff --git a/queue-5.4/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch b/queue-5.4/sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch
new file mode 100644 (file)
index 0000000..a4ad58d
--- /dev/null
@@ -0,0 +1,288 @@
+From 737d4d91d35b5f7fa5bb442651472277318b0bfd Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
+Date: Tue, 7 Jan 2025 13:01:05 +0100
+Subject: sched: sch_cake: add bounds checks to host bulk flow fairness counts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Toke Høiland-Jørgensen <toke@redhat.com>
+
+commit 737d4d91d35b5f7fa5bb442651472277318b0bfd upstream.
+
+Even though we fixed a logic error in the commit cited below, syzbot
+still managed to trigger an underflow of the per-host bulk flow
+counters, leading to an out of bounds memory access.
+
+To avoid any such logic errors causing out of bounds memory accesses,
+this commit factors out all accesses to the per-host bulk flow counters
+to a series of helpers that perform bounds-checking before any
+increments and decrements. This also has the benefit of improving
+readability by moving the conditional checks for the flow mode into
+these helpers, instead of having them spread out throughout the
+code (which was the cause of the original logic error).
+
+As part of this change, the flow quantum calculation is consolidated
+into a helper function, which means that the dithering applied to the
+ost load scaling is now applied both in the DRR rotation and when a
+sparse flow's quantum is first initiated. The only user-visible effect
+of this is that the maximum packet size that can be sent while a flow
+stays sparse will now vary with +/- one byte in some cases. This should
+not make a noticeable difference in practice, and thus it's not worth
+complicating the code to preserve the old behaviour.
+
+Fixes: 546ea84d07e3 ("sched: sch_cake: fix bulk flow accounting logic for host fairness")
+Reported-by: syzbot+f63600d288bfb7057424@syzkaller.appspotmail.com
+Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Dave Taht <dave.taht@gmail.com>
+Link: https://patch.msgid.link/20250107120105.70685-1-toke@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[Hagar: needed contextual fixes due to missing commit 7e3cf0843fe5]
+Signed-off-by: Hagar Hemdan <hagarhem@amazon.com>
+Reviewed-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_cake.c |  140 +++++++++++++++++++++++++++------------------------
+ 1 file changed, 75 insertions(+), 65 deletions(-)
+
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -622,6 +622,63 @@ static bool cake_ddst(int flow_mode)
+       return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+ }
++static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_dsrc(flow_mode) &&
++                 q->hosts[flow->srchost].srchost_bulk_flow_count))
++              q->hosts[flow->srchost].srchost_bulk_flow_count--;
++}
++
++static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_dsrc(flow_mode) &&
++                 q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
++              q->hosts[flow->srchost].srchost_bulk_flow_count++;
++}
++
++static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_ddst(flow_mode) &&
++                 q->hosts[flow->dsthost].dsthost_bulk_flow_count))
++              q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
++}
++
++static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
++                                           struct cake_flow *flow,
++                                           int flow_mode)
++{
++      if (likely(cake_ddst(flow_mode) &&
++                 q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
++              q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
++}
++
++static u16 cake_get_flow_quantum(struct cake_tin_data *q,
++                               struct cake_flow *flow,
++                               int flow_mode)
++{
++      u16 host_load = 1;
++
++      if (cake_dsrc(flow_mode))
++              host_load = max(host_load,
++                              q->hosts[flow->srchost].srchost_bulk_flow_count);
++
++      if (cake_ddst(flow_mode))
++              host_load = max(host_load,
++                              q->hosts[flow->dsthost].dsthost_bulk_flow_count);
++
++      /* The shifted prandom_u32() is a way to apply dithering to avoid
++       * accumulating roundoff errors
++       */
++      return (q->flow_quantum * quantum_div[host_load] +
++              (prandom_u32() >> 16)) >> 16;
++}
++
+ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+                    int flow_mode, u16 flow_override, u16 host_override)
+ {
+@@ -753,10 +810,8 @@ skip_hash:
+               allocate_dst = cake_ddst(flow_mode);
+               if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+-                      if (allocate_src)
+-                              q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+-                      if (allocate_dst)
+-                              q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++                      cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
++                      cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+               }
+ found:
+               /* reserve queue for future packets in same flow */
+@@ -781,9 +836,10 @@ found:
+                       q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+ found_src:
+                       srchost_idx = outer_hash + k;
+-                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-                              q->hosts[srchost_idx].srchost_bulk_flow_count++;
+                       q->flows[reduced_hash].srchost = srchost_idx;
++
++                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++                              cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+               }
+               if (allocate_dst) {
+@@ -804,9 +860,10 @@ found_src:
+                       q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+ found_dst:
+                       dsthost_idx = outer_hash + k;
+-                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+-                              q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
+                       q->flows[reduced_hash].dsthost = dsthost_idx;
++
++                      if (q->flows[reduced_hash].set == CAKE_SET_BULK)
++                              cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
+               }
+       }
+@@ -1821,10 +1878,6 @@ static s32 cake_enqueue(struct sk_buff *
+       /* flowchain */
+       if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+-              struct cake_host *srchost = &b->hosts[flow->srchost];
+-              struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-              u16 host_load = 1;
+-
+               if (!flow->set) {
+                       list_add_tail(&flow->flowchain, &b->new_flows);
+               } else {
+@@ -1834,18 +1887,8 @@ static s32 cake_enqueue(struct sk_buff *
+               flow->set = CAKE_SET_SPARSE;
+               b->sparse_flow_count++;
+-              if (cake_dsrc(q->flow_mode))
+-                      host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-              if (cake_ddst(q->flow_mode))
+-                      host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-              flow->deficit = (b->flow_quantum *
+-                               quantum_div[host_load]) >> 16;
++              flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
+       } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+-              struct cake_host *srchost = &b->hosts[flow->srchost];
+-              struct cake_host *dsthost = &b->hosts[flow->dsthost];
+-
+               /* this flow was empty, accounted as a sparse flow, but actually
+                * in the bulk rotation.
+                */
+@@ -1853,12 +1896,8 @@ static s32 cake_enqueue(struct sk_buff *
+               b->sparse_flow_count--;
+               b->bulk_flow_count++;
+-              if (cake_dsrc(q->flow_mode))
+-                      srchost->srchost_bulk_flow_count++;
+-
+-              if (cake_ddst(q->flow_mode))
+-                      dsthost->dsthost_bulk_flow_count++;
+-
++              cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++              cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+       }
+       if (q->buffer_used > q->buffer_max_used)
+@@ -1915,13 +1954,11 @@ static struct sk_buff *cake_dequeue(stru
+ {
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct cake_tin_data *b = &q->tins[q->cur_tin];
+-      struct cake_host *srchost, *dsthost;
+       ktime_t now = ktime_get();
+       struct cake_flow *flow;
+       struct list_head *head;
+       bool first_flow = true;
+       struct sk_buff *skb;
+-      u16 host_load;
+       u64 delay;
+       u32 len;
+@@ -2021,11 +2058,6 @@ retry:
+       q->cur_flow = flow - b->flows;
+       first_flow = false;
+-      /* triple isolation (modified DRR++) */
+-      srchost = &b->hosts[flow->srchost];
+-      dsthost = &b->hosts[flow->dsthost];
+-      host_load = 1;
+-
+       /* flow isolation (DRR++) */
+       if (flow->deficit <= 0) {
+               /* Keep all flows with deficits out of the sparse and decaying
+@@ -2037,11 +2069,8 @@ retry:
+                               b->sparse_flow_count--;
+                               b->bulk_flow_count++;
+-                              if (cake_dsrc(q->flow_mode))
+-                                      srchost->srchost_bulk_flow_count++;
+-
+-                              if (cake_ddst(q->flow_mode))
+-                                      dsthost->dsthost_bulk_flow_count++;
++                              cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                              cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                               flow->set = CAKE_SET_BULK;
+                       } else {
+@@ -2053,19 +2082,7 @@ retry:
+                       }
+               }
+-              if (cake_dsrc(q->flow_mode))
+-                      host_load = max(host_load, srchost->srchost_bulk_flow_count);
+-
+-              if (cake_ddst(q->flow_mode))
+-                      host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
+-
+-              WARN_ON(host_load > CAKE_QUEUES);
+-
+-              /* The shifted prandom_u32() is a way to apply dithering to
+-               * avoid accumulating roundoff errors
+-               */
+-              flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+-                                (prandom_u32() >> 16)) >> 16;
++              flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
+               list_move_tail(&flow->flowchain, &b->old_flows);
+               goto retry;
+@@ -2089,11 +2106,8 @@ retry:
+                               if (flow->set == CAKE_SET_BULK) {
+                                       b->bulk_flow_count--;
+-                                      if (cake_dsrc(q->flow_mode))
+-                                              srchost->srchost_bulk_flow_count--;
+-
+-                                      if (cake_ddst(q->flow_mode))
+-                                              dsthost->dsthost_bulk_flow_count--;
++                                      cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                                      cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                                       b->decaying_flow_count++;
+                               } else if (flow->set == CAKE_SET_SPARSE ||
+@@ -2111,12 +2125,8 @@ retry:
+                               else if (flow->set == CAKE_SET_BULK) {
+                                       b->bulk_flow_count--;
+-                                      if (cake_dsrc(q->flow_mode))
+-                                              srchost->srchost_bulk_flow_count--;
+-
+-                                      if (cake_ddst(q->flow_mode))
+-                                              dsthost->dsthost_bulk_flow_count--;
+-
++                                      cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
++                                      cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
+                               } else
+                                       b->decaying_flow_count--;
index ddd9a8294778b1f29b0fcd60680d95fa20602bc9..479903d30799bff0169e79c2a7d2b03e096bfb9c 100644 (file)
@@ -88,6 +88,13 @@ mfd-lpc_ich-add-another-gemini-lake-isa-bridge-pci-d.patch
 hid-wacom-add-pci-wacom-device-support.patch
 apei-ghes-have-ghes-honor-the-panic-setting.patch
 x86-mm-don-t-disable-pcid-when-invlpg-has-been-fixed-by-microcode.patch
+spi-mxs-fix-chipselect-glitch.patch
+nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch
+nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch
+nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch
+media-uvcvideo-only-save-async-fh-if-success.patch
+media-uvcvideo-remove-dangling-pointers.patch
+sched-sch_cake-add-bounds-checks-to-host-bulk-flow-fairness-counts.patch
 tasklet-introduce-new-initialization-api.patch
 net-usb-rtl8150-use-new-tasklet-api.patch
 net-usb-rtl8150-enable-basic-endpoint-checking.patch
diff --git a/queue-5.4/spi-mxs-fix-chipselect-glitch.patch b/queue-5.4/spi-mxs-fix-chipselect-glitch.patch
new file mode 100644 (file)
index 0000000..f6e66b8
--- /dev/null
@@ -0,0 +1,44 @@
+From 269e31aecdd0b70f53a05def79480f15cbcc0fd6 Mon Sep 17 00:00:00 2001
+From: Ralf Schlatterbeck <rsc@runtux.com>
+Date: Fri, 2 Feb 2024 12:53:30 +0100
+Subject: spi-mxs: Fix chipselect glitch
+
+From: Ralf Schlatterbeck <rsc@runtux.com>
+
+commit 269e31aecdd0b70f53a05def79480f15cbcc0fd6 upstream.
+
+There was a change in the mxs-dma engine that uses a new custom flag.
+The change was not applied to the mxs spi driver.
+This results in chipselect being deasserted too early.
+This fixes the chipselect problem by using the new flag in the mxs-spi
+driver.
+
+Fixes: ceeeb99cd821 ("dmaengine: mxs: rename custom flag")
+Signed-off-by: Ralf Schlatterbeck <rsc@runtux.com>
+Link: https://msgid.link/r/20240202115330.wxkbfmvd76sy3a6a@runtux.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: Stefan Wahren <wahrenst@gmx.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-mxs.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-mxs.c
++++ b/drivers/spi/spi-mxs.c
+@@ -40,6 +40,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/mxs-spi.h>
+ #include <trace/events/spi.h>
++#include <linux/dma/mxs-dma.h>
+ #define DRIVER_NAME           "mxs-spi"
+@@ -253,7 +254,7 @@ static int mxs_spi_txrx_dma(struct mxs_s
+               desc = dmaengine_prep_slave_sg(ssp->dmach,
+                               &dma_xfer[sg_count].sg, 1,
+                               (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+-                              DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++                              DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
+               if (!desc) {
+                       dev_err(ssp->dev,