]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Feb 2026 10:07:15 +0000 (11:07 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 4 Feb 2026 10:07:15 +0000 (11:07 +0100)
added patches:
drm-imx-tve-fix-probe-device-leak.patch
ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch
mm-kfence-randomize-the-freelist-on-initialization.patch
mptcp-avoid-dup-sub_closed-events-after-disconnect.patch
pinctrl-lpass-lpi-implement-.get_direction-for-the-gpio-driver.patch
writeback-fix-100-cpu-usage-when-dirtytime_expire_interval-is-0.patch

queue-6.1/drm-imx-tve-fix-probe-device-leak.patch [new file with mode: 0644]
queue-6.1/ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch [new file with mode: 0644]
queue-6.1/mm-kfence-randomize-the-freelist-on-initialization.patch [new file with mode: 0644]
queue-6.1/mptcp-avoid-dup-sub_closed-events-after-disconnect.patch [new file with mode: 0644]
queue-6.1/pinctrl-lpass-lpi-implement-.get_direction-for-the-gpio-driver.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/writeback-fix-100-cpu-usage-when-dirtytime_expire_interval-is-0.patch [new file with mode: 0644]

diff --git a/queue-6.1/drm-imx-tve-fix-probe-device-leak.patch b/queue-6.1/drm-imx-tve-fix-probe-device-leak.patch
new file mode 100644 (file)
index 0000000..fa64a71
--- /dev/null
@@ -0,0 +1,57 @@
+From stable+bounces-213318-greg=kroah.com@vger.kernel.org Wed Feb  4 00:29:57 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  3 Feb 2026 18:29:47 -0500
+Subject: drm/imx/tve: fix probe device leak
+To: stable@vger.kernel.org
+Cc: Johan Hovold <johan@kernel.org>, Philipp Zabel <p.zabel@pengutronix.de>, Frank Li <Frank.Li@nxp.com>, Maxime Ripard <mripard@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260203232947.1436026-1-sashal@kernel.org>
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit e535c23513c63f02f67e3e09e0787907029efeaf ]
+
+Make sure to drop the reference taken to the DDC device during probe on
+probe failure (e.g. probe deferral) and on driver unbind.
+
+Fixes: fcbc51e54d2a ("staging: drm/imx: Add support for Television Encoder (TVEv2)")
+Cc: stable@vger.kernel.org     # 3.10
+Cc: Philipp Zabel <p.zabel@pengutronix.de>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20251030163456.15807-1-johan@kernel.org
+Signed-off-by: Maxime Ripard <mripard@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/imx/imx-tve.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/gpu/drm/imx/imx-tve.c
++++ b/drivers/gpu/drm/imx/imx-tve.c
+@@ -522,6 +522,13 @@ static const struct component_ops imx_tv
+       .bind   = imx_tve_bind,
+ };
++static void imx_tve_put_device(void *_dev)
++{
++      struct device *dev = _dev;
++
++      put_device(dev);
++}
++
+ static int imx_tve_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+@@ -543,6 +550,12 @@ static int imx_tve_probe(struct platform
+       if (ddc_node) {
+               tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
+               of_node_put(ddc_node);
++              if (tve->ddc) {
++                      ret = devm_add_action_or_reset(dev, imx_tve_put_device,
++                                                     &tve->ddc->dev);
++                      if (ret)
++                              return ret;
++              }
+       }
+       tve->mode = of_get_tve_mode(np);
diff --git a/queue-6.1/ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch b/queue-6.1/ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch
new file mode 100644 (file)
index 0000000..3c8a3d6
--- /dev/null
@@ -0,0 +1,158 @@
+From 1468888505@139.com Wed Feb  4 03:22:44 2026
+From: Li hongliang <1468888505@139.com>
+Date: Wed,  4 Feb 2026 10:22:39 +0800
+Subject: ksmbd: fix recursive locking in RPC handle list access
+To: mmakassikis@freebox.fr, gregkh@linuxfoundation.org, stable@vger.kernel.org, ysk@kzalloc.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, linkinjeon@kernel.org, sfrench@samba.org, senozhatsky@chromium.org, tom@talpey.com, akendo@akendo.eu, set_pte_at@outlook.com, linux-cifs@vger.kernel.org, stfrench@microsoft.com
+Message-ID: <20260204022239.3204377-1-1468888505@139.com>
+
+From: Marios Makassikis <mmakassikis@freebox.fr>
+
+[ Upstream commit 88f170814fea74911ceab798a43cbd7c5599bed4 ]
+
+Since commit 305853cce3794 ("ksmbd: Fix race condition in RPC handle list
+access"), ksmbd_session_rpc_method() attempts to lock sess->rpc_lock.
+
+This causes hung connections / tasks when a client attempts to open
+a named pipe. Using Samba's rpcclient tool:
+
+ $ rpcclient //192.168.1.254 -U user%password
+ $ rpcclient $> srvinfo
+ <connection hung here>
+
+Kernel side:
+  "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+  task:kworker/0:0 state:D stack:0 pid:5021 tgid:5021 ppid:2 flags:0x00200000
+  Workqueue: ksmbd-io handle_ksmbd_work
+  Call trace:
+  __schedule from schedule+0x3c/0x58
+  schedule from schedule_preempt_disabled+0xc/0x10
+  schedule_preempt_disabled from rwsem_down_read_slowpath+0x1b0/0x1d8
+  rwsem_down_read_slowpath from down_read+0x28/0x30
+  down_read from ksmbd_session_rpc_method+0x18/0x3c
+  ksmbd_session_rpc_method from ksmbd_rpc_open+0x34/0x68
+  ksmbd_rpc_open from ksmbd_session_rpc_open+0x194/0x228
+  ksmbd_session_rpc_open from create_smb2_pipe+0x8c/0x2c8
+  create_smb2_pipe from smb2_open+0x10c/0x27ac
+  smb2_open from handle_ksmbd_work+0x238/0x3dc
+  handle_ksmbd_work from process_scheduled_works+0x160/0x25c
+  process_scheduled_works from worker_thread+0x16c/0x1e8
+  worker_thread from kthread+0xa8/0xb8
+  kthread from ret_from_fork+0x14/0x38
+  Exception stack(0x8529ffb0 to 0x8529fff8)
+
+The task deadlocks because the lock is already held:
+  ksmbd_session_rpc_open
+    down_write(&sess->rpc_lock)
+    ksmbd_rpc_open
+      ksmbd_session_rpc_method
+        down_read(&sess->rpc_lock)   <-- deadlock
+
+Adjust ksmbd_session_rpc_method() callers to take the lock when necessary.
+
+Fixes: 305853cce3794 ("ksmbd: Fix race condition in RPC handle list access")
+Signed-off-by: Marios Makassikis <mmakassikis@freebox.fr>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/mgmt/user_session.c |    7 ++-----
+ fs/smb/server/smb2pdu.c           |    9 ++++++++-
+ fs/smb/server/transport_ipc.c     |   12 ++++++++++++
+ 3 files changed, 22 insertions(+), 6 deletions(-)
+
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -147,14 +147,11 @@ void ksmbd_session_rpc_close(struct ksmb
+ int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
+ {
+       struct ksmbd_session_rpc *entry;
+-      int method;
+-      down_read(&sess->rpc_lock);
++      lockdep_assert_held(&sess->rpc_lock);
+       entry = xa_load(&sess->rpc_handle_list, id);
+-      method = entry ? entry->method : 0;
+-      up_read(&sess->rpc_lock);
+-      return method;
++      return entry ? entry->method : 0;
+ }
+ void ksmbd_session_destroy(struct ksmbd_session *sess)
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4308,8 +4308,15 @@ static int smb2_get_info_file_pipe(struc
+        * pipe without opening it, checking error condition here
+        */
+       id = req->VolatileFileId;
+-      if (!ksmbd_session_rpc_method(sess, id))
++
++      lockdep_assert_not_held(&sess->rpc_lock);
++
++      down_read(&sess->rpc_lock);
++      if (!ksmbd_session_rpc_method(sess, id)) {
++              up_read(&sess->rpc_lock);
+               return -ENOENT;
++      }
++      up_read(&sess->rpc_lock);
+       ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
+                   req->FileInfoClass, req->VolatileFileId);
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -775,6 +775,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_writ
+       if (!msg)
+               return NULL;
++      lockdep_assert_not_held(&sess->rpc_lock);
++
++      down_read(&sess->rpc_lock);
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+@@ -783,6 +786,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_writ
+       req->flags |= KSMBD_RPC_WRITE_METHOD;
+       req->payload_sz = payload_sz;
+       memcpy(req->payload, payload, payload_sz);
++      up_read(&sess->rpc_lock);
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+@@ -799,6 +803,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_read
+       if (!msg)
+               return NULL;
++      lockdep_assert_not_held(&sess->rpc_lock);
++
++      down_read(&sess->rpc_lock);
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+@@ -806,6 +813,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_read
+       req->flags |= rpc_context_flags(sess);
+       req->flags |= KSMBD_RPC_READ_METHOD;
+       req->payload_sz = 0;
++      up_read(&sess->rpc_lock);
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
+@@ -826,6 +834,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioct
+       if (!msg)
+               return NULL;
++      lockdep_assert_not_held(&sess->rpc_lock);
++
++      down_read(&sess->rpc_lock);
+       msg->type = KSMBD_EVENT_RPC_REQUEST;
+       req = (struct ksmbd_rpc_command *)msg->payload;
+       req->handle = handle;
+@@ -834,6 +845,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioct
+       req->flags |= KSMBD_RPC_IOCTL_METHOD;
+       req->payload_sz = payload_sz;
+       memcpy(req->payload, payload, payload_sz);
++      up_read(&sess->rpc_lock);
+       resp = ipc_msg_send_request(msg, req->handle);
+       ipc_msg_free(msg);
diff --git a/queue-6.1/mm-kfence-randomize-the-freelist-on-initialization.patch b/queue-6.1/mm-kfence-randomize-the-freelist-on-initialization.patch
new file mode 100644 (file)
index 0000000..44ced9b
--- /dev/null
@@ -0,0 +1,88 @@
+From sashal@kernel.org Tue Feb  3 21:13:46 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  3 Feb 2026 15:13:42 -0500
+Subject: mm/kfence: randomize the freelist on initialization
+To: stable@vger.kernel.org
+Cc: Pimyn Girgis <pimyn@google.com>, Alexander Potapenko <glider@google.com>, Dmitry Vyukov <dvyukov@google.com>, Marco Elver <elver@google.com>, Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>, Greg KH <gregkh@linuxfoundation.org>, Kees Cook <kees@kernel.org>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260203201342.1383528-1-sashal@kernel.org>
+
+From: Pimyn Girgis <pimyn@google.com>
+
+[ Upstream commit 870ff19251bf3910dda7a7245da826924045fedd ]
+
+Randomize the KFENCE freelist during pool initialization to make
+allocation patterns less predictable.  This is achieved by shuffling the
+order in which metadata objects are added to the freelist using
+get_random_u32_below().
+
+Additionally, ensure the error path correctly calculates the address range
+to be reset if initialization fails, as the address increment logic has
+been moved to a separate loop.
+
+Link: https://lkml.kernel.org/r/20260120161510.3289089-1-pimyn@google.com
+Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
+Signed-off-by: Pimyn Girgis <pimyn@google.com>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Kees Cook <kees@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ replaced kfence_metadata_init with kfence_metadata ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kfence/core.c |   23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -542,7 +542,7 @@ static unsigned long kfence_init_pool(vo
+ {
+       unsigned long addr = (unsigned long)__kfence_pool;
+       struct page *pages;
+-      int i;
++      int i, rand;
+       if (!arch_kfence_init_pool())
+               return addr;
+@@ -590,19 +590,34 @@ static unsigned long kfence_init_pool(vo
+               INIT_LIST_HEAD(&meta->list);
+               raw_spin_lock_init(&meta->lock);
+               meta->state = KFENCE_OBJECT_UNUSED;
+-              meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
+-              list_add_tail(&meta->list, &kfence_freelist);
++              /* Use addr to randomize the freelist. */
++              meta->addr = i;
+               /* Protect the right redzone. */
+-              if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
++              if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
+                       goto reset_slab;
++      }
++
++      for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
++              rand = get_random_u32_below(i);
++              swap(kfence_metadata[i - 1].addr, kfence_metadata[rand].addr);
++      }
++      for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
++              struct kfence_metadata *meta_1 = &kfence_metadata[i];
++              struct kfence_metadata *meta_2 = &kfence_metadata[meta_1->addr];
++
++              list_add_tail(&meta_2->list, &kfence_freelist);
++      }
++      for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
++              kfence_metadata[i].addr = addr;
+               addr += 2 * PAGE_SIZE;
+       }
+       return 0;
+ reset_slab:
++      addr += 2 * i * PAGE_SIZE;
+       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+               struct slab *slab = page_slab(nth_page(pages, i));
diff --git a/queue-6.1/mptcp-avoid-dup-sub_closed-events-after-disconnect.patch b/queue-6.1/mptcp-avoid-dup-sub_closed-events-after-disconnect.patch
new file mode 100644 (file)
index 0000000..29cda24
--- /dev/null
@@ -0,0 +1,56 @@
+From stable+bounces-213302-greg=kroah.com@vger.kernel.org Tue Feb  3 21:12:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  3 Feb 2026 15:12:53 -0500
+Subject: mptcp: avoid dup SUB_CLOSED events after disconnect
+To: stable@vger.kernel.org
+Cc: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Marco Angaroni <marco.angaroni@italtel.com>, Geliang Tang <geliang@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260203201253.1382543-1-sashal@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+[ Upstream commit 280d654324e33f8e6e3641f76764694c7b64c5db ]
+
+In case of subflow disconnect(), which can also happen with the first
+subflow in case of errors like timeout or reset, mptcp_subflow_ctx_reset
+will reset most fields from the mptcp_subflow_context structure,
+including close_event_done. Then, when another subflow is closed, yet
+another SUB_CLOSED event for the disconnected initial subflow is sent.
+Because of the previous reset, there are no source address and
+destination port.
+
+A solution is then to also check the subflow's local id: it shouldn't be
+negative anyway.
+
+Another solution would be not to reset subflow->close_event_done at
+disconnect time, but when reused. But then, probably the whole reset
+could be done when being reused. Let's not change this logic, similar
+to TCP with tcp_disconnect().
+
+Fixes: d82809b6c5f2 ("mptcp: avoid duplicated SUB_CLOSED events")
+Cc: stable@vger.kernel.org
+Reported-by: Marco Angaroni <marco.angaroni@italtel.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/603
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20260127-net-mptcp-dup-nl-events-v1-1-7f71e1bc4feb@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2569,8 +2569,8 @@ out:
+ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                    struct mptcp_subflow_context *subflow)
+ {
+-      /* The first subflow can already be closed and still in the list */
+-      if (subflow->close_event_done)
++      /* The first subflow can already be closed or disconnected */
++      if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0)
+               return;
+       subflow->close_event_done = true;
diff --git a/queue-6.1/pinctrl-lpass-lpi-implement-.get_direction-for-the-gpio-driver.patch b/queue-6.1/pinctrl-lpass-lpi-implement-.get_direction-for-the-gpio-driver.patch
new file mode 100644 (file)
index 0000000..340c195
--- /dev/null
@@ -0,0 +1,64 @@
+From stable+bounces-213320-greg=kroah.com@vger.kernel.org Wed Feb  4 00:50:51 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  3 Feb 2026 18:48:50 -0500
+Subject: pinctrl: lpass-lpi: implement .get_direction() for the GPIO driver
+To: stable@vger.kernel.org
+Cc: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>, Abel Vesa <abelvesa@kernel.org>, Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>, Abel Vesa <abel.vesa@oss.qualcomm.com>, Linus Walleij <linusw@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260203234850.1453977-1-sashal@kernel.org>
+
+From: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+
+[ Upstream commit 4f0d22ec60cee420125f4055af76caa0f373a3fe ]
+
+GPIO controller driver should typically implement the .get_direction()
+callback as GPIOLIB internals may try to use it to determine the state
+of a pin. Add it for the LPASS LPI driver.
+
+Reported-by: Abel Vesa <abelvesa@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 6e261d1090d6 ("pinctrl: qcom: Add sm8250 lpass lpi pinctrl driver")
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Tested-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com> # X1E CRD
+Tested-by: Abel Vesa <abel.vesa@oss.qualcomm.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+[ PIN_CONFIG_LEVEL => PIN_CONFIG_OUTPUT ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/qcom/pinctrl-lpass-lpi.c |   17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -248,6 +248,22 @@ static const struct pinconf_ops lpi_gpio
+       .pin_config_group_set           = lpi_config_set,
+ };
++static int lpi_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
++{
++      unsigned long config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, 0);
++      struct lpi_pinctrl *state = gpiochip_get_data(chip);
++      unsigned long arg;
++      int ret;
++
++      ret = lpi_config_get(state->ctrl, pin, &config);
++      if (ret)
++              return ret;
++
++      arg = pinconf_to_config_argument(config);
++
++      return arg ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
++}
++
+ static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
+ {
+       struct lpi_pinctrl *state = gpiochip_get_data(chip);
+@@ -346,6 +362,7 @@ static void lpi_gpio_dbg_show(struct seq
+ #endif
+ static const struct gpio_chip lpi_gpio_template = {
++      .get_direction          = lpi_gpio_get_direction,
+       .direction_input        = lpi_gpio_direction_input,
+       .direction_output       = lpi_gpio_direction_output,
+       .get                    = lpi_gpio_get,
index d6b5df165d82a81e9a9295d7b31ba5c623814911..038eeb723f2e7cedc28b28d6fdb3e1e855e50750 100644 (file)
@@ -272,3 +272,9 @@ vhost-scsi-fix-handling-of-multiple-calls-to-vhost_scsi_set_endpoint.patch
 drm-radeon-delete-radeon_fence_process-in-is_signaled-no-deadlock.patch
 btrfs-prevent-use-after-free-on-page-private-data-in-btrfs_subpage_clear_uptodate.patch
 net-sched-act_ife-convert-comma-to-semicolon.patch
+mptcp-avoid-dup-sub_closed-events-after-disconnect.patch
+mm-kfence-randomize-the-freelist-on-initialization.patch
+writeback-fix-100-cpu-usage-when-dirtytime_expire_interval-is-0.patch
+drm-imx-tve-fix-probe-device-leak.patch
+pinctrl-lpass-lpi-implement-.get_direction-for-the-gpio-driver.patch
+ksmbd-fix-recursive-locking-in-rpc-handle-list-access.patch
diff --git a/queue-6.1/writeback-fix-100-cpu-usage-when-dirtytime_expire_interval-is-0.patch b/queue-6.1/writeback-fix-100-cpu-usage-when-dirtytime_expire_interval-is-0.patch
new file mode 100644 (file)
index 0000000..d8cff0d
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-213304-greg=kroah.com@vger.kernel.org Tue Feb  3 21:19:40 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  3 Feb 2026 15:19:34 -0500
+Subject: writeback: fix 100% CPU usage when dirtytime_expire_interval is 0
+To: stable@vger.kernel.org
+Cc: Laveesh Bansal <laveeshb@laveeshbansal.com>, Jan Kara <jack@suse.cz>, Christian Brauner <brauner@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260203201934.1387179-1-sashal@kernel.org>
+
+From: Laveesh Bansal <laveeshb@laveeshbansal.com>
+
+[ Upstream commit 543467d6fe97e27e22a26e367fda972dbefebbff ]
+
+When vm.dirtytime_expire_seconds is set to 0, wakeup_dirtytime_writeback()
+schedules delayed work with a delay of 0, causing immediate execution.
+The function then reschedules itself with 0 delay again, creating an
+infinite busy loop that causes 100% kworker CPU usage.
+
+Fix by:
+- Only scheduling delayed work in wakeup_dirtytime_writeback() when
+  dirtytime_expire_interval is non-zero
+- Cancelling the delayed work in dirtytime_interval_handler() when
+  the interval is set to 0
+- Adding a guard in start_dirtytime_writeback() for defensive coding
+
+Tested by booting kernel in QEMU with virtme-ng:
+- Before fix: kworker CPU spikes to ~73%
+- After fix: CPU remains at normal levels
+- Setting interval back to non-zero correctly resumes writeback
+
+Fixes: a2f4870697a5 ("fs: make sure the timestamps for lazytime inodes eventually get written")
+Cc: stable@vger.kernel.org
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220227
+Signed-off-by: Laveesh Bansal <laveeshb@laveeshbansal.com>
+Link: https://patch.msgid.link/20260106145059.543282-2-laveeshb@laveeshbansal.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+[ adapted system_percpu_wq to system_wq for the workqueue used in dirtytime_interval_handler() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fs-writeback.c |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2360,12 +2360,14 @@ static void wakeup_dirtytime_writeback(s
+                               wb_wakeup(wb);
+       }
+       rcu_read_unlock();
+-      schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
++      if (dirtytime_expire_interval)
++              schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+ }
+ static int __init start_dirtytime_writeback(void)
+ {
+-      schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
++      if (dirtytime_expire_interval)
++              schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+       return 0;
+ }
+ __initcall(start_dirtytime_writeback);
+@@ -2376,8 +2378,12 @@ int dirtytime_interval_handler(struct ct
+       int ret;
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+-      if (ret == 0 && write)
+-              mod_delayed_work(system_wq, &dirtytime_work, 0);
++      if (ret == 0 && write) {
++              if (dirtytime_expire_interval)
++                      mod_delayed_work(system_wq, &dirtytime_work, 0);
++              else
++                      cancel_delayed_work_sync(&dirtytime_work);
++      }
+       return ret;
+ }