--- /dev/null
+From c384481006476ac65478fa3584c7245782e52f34 Mon Sep 17 00:00:00 2001
+From: Nikolaus Voss <nv@vosn.de>
+Date: Thu, 19 Dec 2024 11:54:11 +0100
+Subject: clk: clk-imx8mp-audiomix: fix function signature
+
+From: Nikolaus Voss <nv@vosn.de>
+
+commit c384481006476ac65478fa3584c7245782e52f34 upstream.
+
+clk_imx8mp_audiomix_reset_controller_register() in the
+"if !CONFIG_RESET_CONTROLLER" branch had the first
+argument missing. It is an empty function for this branch
+so it wasn't immediately apparent.
+
+Fixes: 6f0e817175c5 ("clk: imx: clk-audiomix: Add reset controller")
+Cc: <stable@vger.kernel.org> # 6.12.x
+Signed-off-by: Nikolaus Voss <nv@vosn.de>
+Link: https://lore.kernel.org/r/20241219105447.889CB11FE@mail.steuer-voss.de
+Reviewed-by: Daniel Baluta <daniel.baluta@nxp.com>
+Acked-by: Shengjiu Wang <shengjiu.wang@gmail.com>
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/imx/clk-imx8mp-audiomix.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
+@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_con
+
+ #else /* !CONFIG_RESET_CONTROLLER */
+
+-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
++static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
++ struct clk_imx8mp_audiomix_priv *priv)
+ {
+ return 0;
+ }
--- /dev/null
+From 79d67c499c3f886202a40c5cb27e747e4fa4d738 Mon Sep 17 00:00:00 2001
+From: Biju Das <biju.das.jz@bp.renesas.com>
+Date: Tue, 19 Nov 2024 19:20:31 +0000
+Subject: drm: adv7511: Drop dsi single lane support
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+commit 79d67c499c3f886202a40c5cb27e747e4fa4d738 upstream.
+
+As per [1] and [2], ADV7535/7533 supports only 2-, 3-, or 4-lane. Drop
+unsupported 1-lane.
+
+[1] https://www.analog.com/media/en/technical-documentation/data-sheets/ADV7535.pdf
+[2] https://www.analog.com/media/en/technical-documentation/data-sheets/ADV7533.pdf
+
+Fixes: 1e4d58cd7f88 ("drm/bridge: adv7533: Create a MIPI DSI device")
+Reported-by: Hien Huynh <hien.huynh.px@renesas.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Reviewed-by: Adam Ford <aford173@gmail.com>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241119192040.152657-4-biju.das.jz@bp.renesas.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/adv7511/adv7533.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+- if (num_lanes < 1 || num_lanes > 4)
++ if (num_lanes < 2 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
--- /dev/null
+From 81adbd3ff21c1182e06aa02c6be0bfd9ea02d8e8 Mon Sep 17 00:00:00 2001
+From: Biju Das <biju.das.jz@bp.renesas.com>
+Date: Tue, 19 Nov 2024 19:20:29 +0000
+Subject: drm: adv7511: Fix use-after-free in adv7533_attach_dsi()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+commit 81adbd3ff21c1182e06aa02c6be0bfd9ea02d8e8 upstream.
+
+The host_node pointer was assigned and freed in adv7533_parse_dt(), and
+later, adv7533_attach_dsi() uses the same. Fix this use-after-free issue
+by dropping of_node_put() in adv7533_parse_dt() and calling of_node_put()
+in error path of probe() and also in the remove().
+
+Fixes: 1e4d58cd7f88 ("drm/bridge: adv7533: Create a MIPI DSI device")
+Cc: stable@vger.kernel.org
+Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241119192040.152657-2-biju.das.jz@bp.renesas.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 10 ++++++++--
+ drivers/gpu/drm/bridge/adv7511/adv7533.c | 2 --
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_clie
+ return ret;
+
+ ret = adv7511_init_regulators(adv7511);
+- if (ret)
+- return dev_err_probe(dev, ret, "failed to init regulators\n");
++ if (ret) {
++ dev_err_probe(dev, ret, "failed to init regulators\n");
++ goto err_of_node_put;
++ }
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+@@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
+ i2c_unregister_device(adv7511->i2c_edid);
+ uninit_regulators:
+ adv7511_uninit_regulators(adv7511);
++err_of_node_put:
++ of_node_put(adv7511->host_node);
+
+ return ret;
+ }
+@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_cl
+ {
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
++ of_node_put(adv7511->host_node);
++
+ adv7511_uninit_regulators(adv7511);
+
+ drm_bridge_remove(&adv7511->bridge);
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node
+ if (!adv->host_node)
+ return -ENODEV;
+
+- of_node_put(adv->host_node);
+-
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
--- /dev/null
+From ee8f9ed57a397605434caeef351bafa3ec4dfdd4 Mon Sep 17 00:00:00 2001
+From: Biju Das <biju.das.jz@bp.renesas.com>
+Date: Tue, 19 Nov 2024 19:20:30 +0000
+Subject: dt-bindings: display: adi,adv7533: Drop single lane support
+
+From: Biju Das <biju.das.jz@bp.renesas.com>
+
+commit ee8f9ed57a397605434caeef351bafa3ec4dfdd4 upstream.
+
+As per [1] and [2], ADV7535/7533 supports only 2-, 3-, or 4-lane. Drop
+unsupported 1-lane from bindings.
+
+[1] https://www.analog.com/media/en/technical-documentation/data-sheets/ADV7535.pdf
+[2] https://www.analog.com/media/en/technical-documentation/data-sheets/ADV7533.pdf
+
+Fixes: 1e4d58cd7f88 ("drm/bridge: adv7533: Create a MIPI DSI device")
+Cc: stable@vger.kernel.org
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241119192040.152657-3-biju.das.jz@bp.renesas.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
++++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+@@ -90,7 +90,7 @@ properties:
+ adi,dsi-lanes:
+ description: Number of DSI data lanes connected to the DSI host.
+ $ref: /schemas/types.yaml#/definitions/uint32
+- enum: [ 1, 2, 3, 4 ]
++ enum: [ 2, 3, 4 ]
+
+ "#sound-dai-cells":
+ const: 0
--- /dev/null
+From d65474033740ded0a4fe9a097fce72328655b41d Mon Sep 17 00:00:00 2001
+From: Zilin Guan <zilin@seu.edu.cn>
+Date: Tue, 31 Dec 2024 11:37:31 +0000
+Subject: fgraph: Add READ_ONCE() when accessing fgraph_array[]
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+commit d65474033740ded0a4fe9a097fce72328655b41d upstream.
+
+In __ftrace_return_to_handler(), a loop iterates over the fgraph_array[]
+elements, which are fgraph_ops. The loop checks if an element is a
+fgraph_stub to prevent using a fgraph_stub afterward.
+
+However, if the compiler reloads fgraph_array[] after this check, it might
+race with an update to fgraph_array[] that introduces a fgraph_stub. This
+could result in the stub being processed, but the stub contains a null
+"func_hash" field, leading to a NULL pointer dereference.
+
+To ensure that the gops compared against the fgraph_stub matches the gops
+processed later, add a READ_ONCE(). A similar patch appears in commit
+63a8dfb ("function_graph: Add READ_ONCE() when accessing fgraph_array[]").
+
+Cc: stable@vger.kernel.org
+Fixes: 37238abe3cb47 ("ftrace/function_graph: Pass fgraph_ops to function graph callbacks")
+Link: https://lore.kernel.org/20241231113731.277668-1-zilin@seu.edu.cn
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/fgraph.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -802,7 +802,7 @@ static unsigned long __ftrace_return_to_
+ #endif
+ {
+ for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
+- struct fgraph_ops *gops = fgraph_array[i];
++ struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
+
+ if (gops == &fgraph_stub)
+ continue;
--- /dev/null
+From 3754137d263f52f4b507cf9ae913f8f0497d1b0e Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Tue, 17 Dec 2024 20:50:00 +0100
+Subject: fs/proc/task_mmu: fix pagemap flags with PMD THP entries on 32bit
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 3754137d263f52f4b507cf9ae913f8f0497d1b0e upstream.
+
+Entries (including flags) are u64, even on 32bit. So right now we are
+cutting of the flags on 32bit. This way, for example the cow selftest
+complains about:
+
+ # ./cow
+ ...
+ Bail Out! read and ioctl return unmatched results for populated: 0 1
+
+Link: https://lkml.kernel.org/r/20241217195000.1734039-1-david@redhat.com
+Fixes: 2c1f057e5be6 ("fs/proc/task_mmu: properly detect PM_MMAP_EXCLUSIVE per page of PMD-mapped THPs")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/task_mmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp
+ }
+
+ for (; addr != end; addr += PAGE_SIZE, idx++) {
+- unsigned long cur_flags = flags;
++ u64 cur_flags = flags;
+ pagemap_entry_t pme;
+
+ if (folio && (flags & PM_PRESENT) &&
--- /dev/null
+From 38fc96a58ce40257aec79b32e9b310c86907c63c Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Sat, 28 Dec 2024 17:44:52 +0000
+Subject: io_uring/rw: fix downgraded mshot read
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 38fc96a58ce40257aec79b32e9b310c86907c63c upstream.
+
+The io-wq path can downgrade a multishot request to oneshot mode,
+however io_read_mshot() doesn't handle that and would still post
+multiple CQEs. That's not allowed, because io_req_post_cqe() requires
+stricter context requirements.
+
+The described can only happen with pollable files that don't support
+FMODE_NOWAIT, which is an odd combination, so if even allowed it should
+be fairly rare.
+
+Cc: stable@vger.kernel.org
+Reported-by: chase xd <sl1589472800@gmail.com>
+Fixes: bee1d5becdf5b ("io_uring: disable io-wq execution of multishot NOWAIT requests")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/c5c8c4a50a882fd581257b81bf52eee260ac29fd.1735407848.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/rw.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -979,6 +979,8 @@ int io_read_mshot(struct io_kiocb *req,
+ io_kbuf_recycle(req, issue_flags);
+ if (ret < 0)
+ req_set_fail(req);
++ } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
++ cflags = io_put_kbuf(req, ret, issue_flags);
+ } else {
+ /*
+ * Any successful return value will keep the multishot read
--- /dev/null
+From cb0ca08b326aa03f87fe94bb91872ce8d2ef1ed8 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 17 Dec 2024 08:18:10 +0100
+Subject: kcov: mark in_softirq_really() as __always_inline
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit cb0ca08b326aa03f87fe94bb91872ce8d2ef1ed8 upstream.
+
+If gcc decides not to inline in_softirq_really(), objtool warns about a
+function call with UACCESS enabled:
+
+kernel/kcov.o: warning: objtool: __sanitizer_cov_trace_pc+0x1e: call to in_softirq_really() with UACCESS enabled
+kernel/kcov.o: warning: objtool: check_kcov_mode+0x11: call to in_softirq_really() with UACCESS enabled
+
+Mark this as __always_inline to avoid the problem.
+
+Link: https://lkml.kernel.org/r/20241217071814.2261620-1-arnd@kernel.org
+Fixes: 7d4df2dad312 ("kcov: properly check for softirq context")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Marco Elver <elver@google.com>
+Cc: Aleksandr Nogikh <nogikh@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Josh Poimboeuf <jpoimboe@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/kcov.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -166,7 +166,7 @@ static void kcov_remote_area_put(struct
+ * Unlike in_serving_softirq(), this function returns false when called during
+ * a hardirq or an NMI that happened in the softirq context.
+ */
+-static inline bool in_softirq_really(void)
++static __always_inline bool in_softirq_really(void)
+ {
+ return in_serving_softirq() && !in_hardirq() && !in_nmi();
+ }
--- /dev/null
+From 1fd8bc7cd889bd73d07a83cb32d674ac68f99153 Mon Sep 17 00:00:00 2001
+From: Yang Erkun <yangerkun@huawei.com>
+Date: Sat, 14 Dec 2024 17:30:05 +0800
+Subject: maple_tree: reload mas before the second call for mas_empty_area
+
+From: Yang Erkun <yangerkun@huawei.com>
+
+commit 1fd8bc7cd889bd73d07a83cb32d674ac68f99153 upstream.
+
+Change the LONG_MAX in simple_offset_add to 1024, and do latter:
+
+[root@fedora ~]# mkdir /tmp/dir
+[root@fedora ~]# for i in {1..1024}; do touch /tmp/dir/$i; done
+touch: cannot touch '/tmp/dir/1024': Device or resource busy
+[root@fedora ~]# rm /tmp/dir/123
+[root@fedora ~]# touch /tmp/dir/1024
+[root@fedora ~]# rm /tmp/dir/100
+[root@fedora ~]# touch /tmp/dir/1025
+touch: cannot touch '/tmp/dir/1025': Device or resource busy
+
+After we delete file 100, actually this is a empty entry, but the latter
+create failed unexpected.
+
+mas_alloc_cyclic has two chance to find empty entry. First find the entry
+with range range_lo and range_hi, if no empty entry exist, and range_lo >
+min, retry find with range min and range_hi. However, the first call
+mas_empty_area may mark mas as EBUSY, and the second call for
+mas_empty_area will return false directly. Fix this by reload mas before
+second call for mas_empty_area.
+
+[Liam.Howlett@Oracle.com: fix mas_alloc_cyclic() second search]
+ Link: https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/
+ Link: https://lkml.kernel.org/r/20241216190113.1226145-2-Liam.Howlett@oracle.com
+Link: https://lkml.kernel.org/r/20241214093005.72284-1-yangerkun@huaweicloud.com
+Fixes: 9b6713cc7522 ("maple_tree: Add mtree_alloc_cyclic()")
+Signed-off-by: Yang Erkun <yangerkun@huawei.com>
+Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Chuck Lever <chuck.lever@oracle.com> says:
+Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/maple_tree.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4367,6 +4367,7 @@ int mas_alloc_cyclic(struct ma_state *ma
+ ret = 1;
+ }
+ if (ret < 0 && range_lo > min) {
++ mas_reset(mas);
+ ret = mas_empty_area(mas, min, range_hi, 1);
+ if (ret == 0)
+ ret = 1;
--- /dev/null
+From 7d390b53067ef745e2d9bee5a9683df4c96b80a0 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 22 Dec 2024 15:12:22 -0800
+Subject: mm/damon/core: fix ignored quota goals and filters of newly committed schemes
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 7d390b53067ef745e2d9bee5a9683df4c96b80a0 upstream.
+
+damon_commit_schemes() ignores quota goals and filters of the newly
+committed schemes. This makes users confused about the behaviors.
+Correctly handle those inputs.
+
+Link: https://lkml.kernel.org/r/20241222231222.85060-3-sj@kernel.org
+Fixes: 9cb3d0b9dfce ("mm/damon/core: implement DAMON context commit function")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index dc52361f1863..0776452a1abb 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -868,6 +868,11 @@ static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
+ NUMA_NO_NODE);
+ if (!new_scheme)
+ return -ENOMEM;
++ err = damos_commit(new_scheme, src_scheme);
++ if (err) {
++ damon_destroy_scheme(new_scheme);
++ return err;
++ }
+ damon_add_scheme(dst, new_scheme);
+ }
+ return 0;
+--
+2.47.1
+
--- /dev/null
+From 8debfc5b1aa569d3d2ac836af2553da037611c61 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 22 Dec 2024 15:12:21 -0800
+Subject: mm/damon/core: fix new damon_target objects leaks on damon_commit_targets()
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 8debfc5b1aa569d3d2ac836af2553da037611c61 upstream.
+
+Patch series "mm/damon/core: fix memory leaks and ignored inputs from
+damon_commit_ctx()".
+
+Due to two bugs in damon_commit_targets() and damon_commit_schemes(),
+which are called from damon_commit_ctx(), some user inputs can be ignored,
+and some mmeory objects can be leaked. Fix those.
+
+Note that only DAMON sysfs interface users are affected. Other DAMON core
+API user modules that more focused more on simple and dedicated production
+usages, including DAMON_RECLAIM and DAMON_LRU_SORT are not using the buggy
+function in the way, so not affected.
+
+
+This patch (of 2):
+
+When new DAMON targets are added via damon_commit_targets(), the newly
+created targets are not deallocated when updating the internal data
+(damon_commit_target()) is failed. Worse yet, even if the setup is
+successfully done, the new target is not linked to the context. Hence,
+the new targets are always leaked regardless of the internal data setup
+failure. Fix the leaks.
+
+Link: https://lkml.kernel.org/r/20241222231222.85060-2-sj@kernel.org
+Fixes: 9cb3d0b9dfce ("mm/damon/core: implement DAMON context commit function")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -966,8 +966,11 @@ static int damon_commit_targets(
+ return -ENOMEM;
+ err = damon_commit_target(new_target, false,
+ src_target, damon_target_has_pid(src));
+- if (err)
++ if (err) {
++ damon_destroy_target(new_target);
+ return err;
++ }
++ damon_add_target(dst, new_target);
+ }
+ return 0;
+ }
--- /dev/null
+From d0e6983a6d1719738cf8d13982a68094f0a1872a Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+Date: Thu, 19 Dec 2024 15:30:08 +0800
+Subject: mm: shmem: fix incorrect index alignment for within_size policy
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+commit d0e6983a6d1719738cf8d13982a68094f0a1872a upstream.
+
+With enabling the shmem per-size within_size policy, using an incorrect
+'order' size to round_up() the index can lead to incorrect i_size checks,
+resulting in an inappropriate large orders being returned.
+
+Changing to use '1 << order' to round_up() the index to fix this issue.
+Additionally, adding an 'aligned_index' variable to avoid affecting the
+index checks.
+
+Link: https://lkml.kernel.org/r/77d8ef76a7d3d646e9225e9af88a76549a68aab1.1734593154.git.baolin.wang@linux.alibaba.com
+Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/shmem.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1664,6 +1664,7 @@ unsigned long shmem_allowable_huge_order
+ unsigned long mask = READ_ONCE(huge_shmem_orders_always);
+ unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
+ unsigned long vm_flags = vma ? vma->vm_flags : 0;
++ pgoff_t aligned_index;
+ bool global_huge;
+ loff_t i_size;
+ int order;
+@@ -1698,9 +1699,9 @@ unsigned long shmem_allowable_huge_order
+ /* Allow mTHP that will be fully within i_size. */
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+- index = round_up(index + 1, order);
++ aligned_index = round_up(index + 1, 1 << order);
+ i_size = round_up(i_size_read(inode), PAGE_SIZE);
+- if (i_size >> PAGE_SHIFT >= index) {
++ if (i_size >> PAGE_SHIFT >= aligned_index) {
+ mask |= within_size_orders;
+ break;
+ }
--- /dev/null
+From d77b90d2b2642655b5f60953c36ad887257e1802 Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+Date: Thu, 19 Dec 2024 15:30:09 +0800
+Subject: mm: shmem: fix the update of 'shmem_falloc->nr_unswapped'
+
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+
+commit d77b90d2b2642655b5f60953c36ad887257e1802 upstream.
+
+The 'shmem_falloc->nr_unswapped' is used to record how many writepage
+refused to swap out because fallocate() is allocating, but after shmem
+supports large folio swap out, the update of 'shmem_falloc->nr_unswapped'
+does not use the correct number of pages in the large folio, which may
+lead to fallocate() not exiting as soon as possible.
+
+Anyway, this is found through code inspection, and I am not sure whether
+it would actually cause serious issues.
+
+Link: https://lkml.kernel.org/r/f66a0119d0564c2c37c84f045835b870d1b2196f.1734593154.git.baolin.wang@linux.alibaba.com
+Fixes: 809bc86517cc ("mm: shmem: support large folio swap out")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/shmem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1527,7 +1527,7 @@ try_split:
+ !shmem_falloc->waitq &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+- shmem_falloc->nr_unswapped++;
++ shmem_falloc->nr_unswapped += nr_pages;
+ else
+ shmem_falloc = NULL;
+ spin_unlock(&inode->i_lock);
--- /dev/null
+From 4a4d38ace1fb0586bffd2aab03caaa05d6011748 Mon Sep 17 00:00:00 2001
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+Date: Fri, 20 Dec 2024 13:26:14 +0530
+Subject: net: ethernet: ti: am65-cpsw: default to round-robin for host port receive
+
+From: Siddharth Vadapalli <s-vadapalli@ti.com>
+
+commit 4a4d38ace1fb0586bffd2aab03caaa05d6011748 upstream.
+
+The Host Port (i.e. CPU facing port) of CPSW receives traffic from Linux
+via TX DMA Channels which are Hardware Queues consisting of traffic
+categorized according to their priority. The Host Port is configured to
+dequeue traffic from these Hardware Queues on the basis of priority i.e.
+as long as traffic exists on a Hardware Queue of a higher priority, the
+traffic on Hardware Queues of lower priority isn't dequeued. An alternate
+operation is also supported wherein traffic can be dequeued by the Host
+Port in a Round-Robin manner.
+
+Until commit under Fixes, the am65-cpsw driver enabled a single TX DMA
+Channel, due to which, unless modified by user via "ethtool", all traffic
+from Linux is transmitted on DMA Channel 0. Therefore, configuring
+the Host Port for priority based dequeuing or Round-Robin operation
+is identical since there is a single DMA Channel.
+
+Since commit under Fixes, all 8 TX DMA Channels are enabled by default.
+Additionally, the default "tc mapping" doesn't take into account
+the possibility of different traffic profiles which various users
+might have. This results in traffic starvation at the Host Port
+due to the priority based dequeuing which has been enabled by default
+since the inception of the driver. The traffic starvation triggers
+NETDEV WATCHDOG timeout for all TX DMA Channels that haven't been serviced
+due to the presence of traffic on the higher priority TX DMA Channels.
+
+Fix this by defaulting to Round-Robin dequeuing at the Host Port, which
+shall ensure that traffic is dequeued from all TX DMA Channels irrespective
+of the traffic profile. This will address the NETDEV WATCHDOG timeouts.
+At the same time, users can still switch from Round-Robin to Priority
+based dequeuing at the Host Port with the help of the "p0-rx-ptype-rrobin"
+private flag of "ethtool". Users are expected to setup an appropriate
+"tc mapping" that suits their traffic profile when switching to priority
+based dequeuing at the Host Port.
+
+Fixes: be397ea3473d ("net: ethernet: am65-cpsw: Set default TX channels to maximum")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Link: https://patch.msgid.link/20241220075618.228202-1-s-vadapalli@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -3525,7 +3525,7 @@ static int am65_cpsw_nuss_probe(struct p
+ init_completion(&common->tdown_complete);
+ common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
+- common->pf_p0_rx_ptype_rrobin = false;
++ common->pf_p0_rx_ptype_rrobin = true;
+ common->default_vlan = 1;
+
+ common->ports = devm_kcalloc(dev, common->port_num,
--- /dev/null
+From 4e86729d1ff329815a6e8a920cb554a1d4cb5b8d Mon Sep 17 00:00:00 2001
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+Date: Thu, 19 Dec 2024 19:21:14 +0300
+Subject: net/sctp: Prevent autoclose integer overflow in sctp_association_init()
+
+From: Nikolay Kuratov <kniv@yandex-team.ru>
+
+commit 4e86729d1ff329815a6e8a920cb554a1d4cb5b8d upstream.
+
+While by default max_autoclose equals to INT_MAX / HZ, one may set
+net.sctp.max_autoclose to UINT_MAX. There is code in
+sctp_association_init() that can consequently trigger overflow.
+
+Cc: stable@vger.kernel.org
+Fixes: 9f70f46bd4c7 ("sctp: properly latch and use autoclose value from sock to association")
+Signed-off-by: Nikolay Kuratov <kniv@yandex-team.ru>
+Acked-by: Xin Long <lucien.xin@gmail.com>
+Link: https://patch.msgid.link/20241219162114.2863827-1-kniv@yandex-team.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/associola.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -137,7 +137,8 @@ static struct sctp_association *sctp_ass
+ = 5 * asoc->rto_max;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
++ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
++ (unsigned long)sp->autoclose * HZ;
+
+ /* Initializes the timers */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
--- /dev/null
+From 5f3fd772d152229d94602bca243fbb658068a597 Mon Sep 17 00:00:00 2001
+From: Dennis Lam <dennis.lamerice@gmail.com>
+Date: Tue, 17 Dec 2024 21:39:25 -0500
+Subject: ocfs2: fix slab-use-after-free due to dangling pointer dqi_priv
+
+From: Dennis Lam <dennis.lamerice@gmail.com>
+
+commit 5f3fd772d152229d94602bca243fbb658068a597 upstream.
+
+When mounting ocfs2 and then remounting it as read-only, a
+slab-use-after-free occurs after the user uses a syscall to
+quota_getnextquota. Specifically, sb_dqinfo(sb, type)->dqi_priv is the
+dangling pointer.
+
+During the remounting process, the pointer dqi_priv is freed but is never
+set as null leaving it to be accessed. Additionally, the read-only option
+for remounting sets the DQUOT_SUSPENDED flag instead of setting the
+DQUOT_USAGE_ENABLED flags. Moreover, later in the process of getting the
+next quota, the function ocfs2_get_next_id is called and only checks the
+quota usage flags and not the quota suspended flags.
+
+To fix this, I set dqi_priv to null when it is freed after remounting with
+read-only and put a check for DQUOT_SUSPENDED in ocfs2_get_next_id.
+
+[akpm@linux-foundation.org: coding-style cleanups]
+Link: https://lkml.kernel.org/r/20241218023924.22821-2-dennis.lamerice@gmail.com
+Fixes: 8f9e8f5fcc05 ("ocfs2: Fix Q_GETNEXTQUOTA for filesystem without quotas")
+Signed-off-by: Dennis Lam <dennis.lamerice@gmail.com>
+Reported-by: syzbot+d173bf8a5a7faeede34c@syzkaller.appspotmail.com
+Tested-by: syzbot+d173bf8a5a7faeede34c@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6731d26f.050a0220.1fb99c.014b.GAE@google.com/T/
+Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/quota_global.c | 2 +-
+ fs/ocfs2/quota_local.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct supe
+ int status = 0;
+
+ trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+- if (!sb_has_quota_loaded(sb, type)) {
++ if (!sb_has_quota_active(sb, type)) {
+ status = -ESRCH;
+ goto out;
+ }
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -867,6 +867,7 @@ out:
+ brelse(oinfo->dqi_libh);
+ brelse(oinfo->dqi_lqi_bh);
+ kfree(oinfo);
++ info->dqi_priv = NULL;
+ return status;
+ }
+
--- /dev/null
+From a37eecb705f33726f1fb7cd2a67e514a15dfe693 Mon Sep 17 00:00:00 2001
+From: Evgenii Shatokhin <e.shatokhin@yadro.com>
+Date: Mon, 9 Dec 2024 10:46:59 +0300
+Subject: pinctrl: mcp23s08: Fix sleeping in atomic context due to regmap locking
+
+From: Evgenii Shatokhin <e.shatokhin@yadro.com>
+
+commit a37eecb705f33726f1fb7cd2a67e514a15dfe693 upstream.
+
+If a device uses MCP23xxx IO expander to receive IRQs, the following
+bug can happen:
+
+ BUG: sleeping function called from invalid context
+ at kernel/locking/mutex.c:283
+ in_atomic(): 1, irqs_disabled(): 1, non_block: 0, ...
+ preempt_count: 1, expected: 0
+ ...
+ Call Trace:
+ ...
+ __might_resched+0x104/0x10e
+ __might_sleep+0x3e/0x62
+ mutex_lock+0x20/0x4c
+ regmap_lock_mutex+0x10/0x18
+ regmap_update_bits_base+0x2c/0x66
+ mcp23s08_irq_set_type+0x1ae/0x1d6
+ __irq_set_trigger+0x56/0x172
+ __setup_irq+0x1e6/0x646
+ request_threaded_irq+0xb6/0x160
+ ...
+
+We observed the problem while experimenting with a touchscreen driver which
+used MCP23017 IO expander (I2C).
+
+The regmap in the pinctrl-mcp23s08 driver uses a mutex for protection from
+concurrent accesses, which is the default for regmaps without .fast_io,
+.disable_locking, etc.
+
+mcp23s08_irq_set_type() calls regmap_update_bits_base(), and the latter
+locks the mutex.
+
+However, __setup_irq() locks desc->lock spinlock before calling these
+functions. As a result, the system tries to lock the mutex whole holding
+the spinlock.
+
+It seems, the internal regmap locks are not needed in this driver at all.
+mcp->lock seems to protect the regmap from concurrent accesses already,
+except, probably, in mcp_pinconf_get/set.
+
+mcp23s08_irq_set_type() and mcp23s08_irq_mask/unmask() are called under
+chip_bus_lock(), which calls mcp23s08_irq_bus_lock(). The latter takes
+mcp->lock and enables regmap caching, so that the potentially slow I2C
+accesses are deferred until chip_bus_unlock().
+
+The accesses to the regmap from mcp23s08_probe_one() do not need additional
+locking.
+
+In all remaining places where the regmap is accessed, except
+mcp_pinconf_get/set(), the driver already takes mcp->lock.
+
+This patch adds locking in mcp_pinconf_get/set() and disables internal
+locking in the regmap config. Among other things, it fixes the sleeping
+in atomic context described above.
+
+Fixes: 8f38910ba4f6 ("pinctrl: mcp23s08: switch to regmap caching")
+Cc: stable@vger.kernel.org
+Signed-off-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
+Link: https://lore.kernel.org/20241209074659.1442898-1-e.shatokhin@yadro.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/pinctrl-mcp23s08.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regm
+ .num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .max_register = MCP_OLAT,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x08_regmap);
+
+@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regm
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x17_regmap);
+
+@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctr
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_read(mcp, MCP_GPPU, &data);
++ mutex_unlock(&mcp->lock);
+ if (ret < 0)
+ return ret;
+ status = (data & BIT(pin)) ? 1 : 0;
+@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctr
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
++ mutex_unlock(&mcp->lock);
+ break;
+ default:
+ dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
--- /dev/null
+From d0257e089d1bbd35c69b6c97ff73e3690ab149a9 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Sat, 30 Nov 2024 13:06:41 +0300
+Subject: RDMA/uverbs: Prevent integer overflow issue
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit d0257e089d1bbd35c69b6c97ff73e3690ab149a9 upstream.
+
+In the expression "cmd.wqe_size * cmd.wr_count", both variables are u32
+values that come from the user so the multiplication can lead to integer
+wrapping. Then we pass the result to uverbs_request_next_ptr() which also
+could potentially wrap. The "cmd.sge_count * sizeof(struct ib_uverbs_sge)"
+multiplication can also overflow on 32bit systems although it's fine on
+64bit systems.
+
+This patch does two things. First, I've re-arranged the condition in
+uverbs_request_next_ptr() so that the use controlled variable "len" is on
+one side of the comparison by itself without any math. Then I've modified
+all the callers to use size_mul() for the multiplications.
+
+Fixes: 67cdb40ca444 ("[IB] uverbs: Implement more commands")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/b8765ab3-c2da-4611-aae0-ddd6ba173d23@stanley.mountain
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/uverbs_cmd.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -161,7 +161,7 @@ static const void __user *uverbs_request
+ {
+ const void __user *res = iter->cur;
+
+- if (iter->cur + len > iter->end)
++ if (len > iter->end - iter->cur)
+ return (void __force __user *)ERR_PTR(-ENOSPC);
+ iter->cur += len;
+ return res;
+@@ -2010,11 +2010,13 @@ static int ib_uverbs_post_send(struct uv
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+- wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
++ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
++ cmd.wr_count));
+ if (IS_ERR(wqes))
+ return PTR_ERR(wqes);
+- sgls = uverbs_request_next_ptr(
+- &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(&iter,
++ size_mul(cmd.sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return PTR_ERR(sgls);
+ ret = uverbs_request_finish(&iter);
+@@ -2200,11 +2202,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
+ return ERR_PTR(-EINVAL);
+
+- wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
++ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
+ if (IS_ERR(wqes))
+ return ERR_CAST(wqes);
+- sgls = uverbs_request_next_ptr(
+- iter, sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return ERR_CAST(sgls);
+ ret = uverbs_request_finish(iter);
--- /dev/null
+From 18b2093f4598d8ee67a8153badc93f0fa7686b8a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 11 Dec 2024 11:01:51 -1000
+Subject: sched_ext: Fix invalid irq restore in scx_ops_bypass()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 18b2093f4598d8ee67a8153badc93f0fa7686b8a upstream.
+
+While adding outer irqsave/restore locking, 0e7ffff1b811 ("scx: Fix raciness
+in scx_ops_bypass()") forgot to convert an inner rq_unlock_irqrestore() to
+rq_unlock() which could re-enable IRQ prematurely leading to the following
+warning:
+
+ raw_local_irq_restore() called with IRQs enabled
+ WARNING: CPU: 1 PID: 96 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x30/0x40
+ ...
+ Sched_ext: create_dsq (enabling)
+ pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : warn_bogus_irq_restore+0x30/0x40
+ lr : warn_bogus_irq_restore+0x30/0x40
+ ...
+ Call trace:
+ warn_bogus_irq_restore+0x30/0x40 (P)
+ warn_bogus_irq_restore+0x30/0x40 (L)
+ scx_ops_bypass+0x224/0x3b8
+ scx_ops_enable.isra.0+0x2c8/0xaa8
+ bpf_scx_reg+0x18/0x30
+ ...
+ irq event stamp: 33739
+ hardirqs last enabled at (33739): [<ffff8000800b699c>] scx_ops_bypass+0x174/0x3b8
+ hardirqs last disabled at (33738): [<ffff800080d48ad4>] _raw_spin_lock_irqsave+0xb4/0xd8
+
+Drop the stray _irqrestore().
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Ihor Solodrai <ihor.solodrai@pm.me>
+Link: http://lkml.kernel.org/r/qC39k3UsonrBYD_SmuxHnZIQLsuuccoCrkiqb_BT7DvH945A1_LZwE4g-5Pu9FcCtqZt4lY1HhIPi0homRuNWxkgo1rgP3bkxa0donw8kV4=@pm.me
+Fixes: 0e7ffff1b811 ("scx: Fix raciness in scx_ops_bypass()")
+Cc: stable@vger.kernel.org # v6.12
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4367,7 +4367,7 @@ static void scx_ops_bypass(bool bypass)
+ * sees scx_rq_bypassing() before moving tasks to SCX.
+ */
+ if (!scx_enabled()) {
+- rq_unlock_irqrestore(rq, &rf);
++ rq_unlock(rq, &rf);
+ continue;
+ }
+
--- /dev/null
+From 35bf430e08a18fdab6eb94492a06d9ad14c6179b Mon Sep 17 00:00:00 2001
+From: Henry Huang <henry.hj@antgroup.com>
+Date: Sun, 22 Dec 2024 23:43:16 +0800
+Subject: sched_ext: initialize kit->cursor.flags
+
+From: Henry Huang <henry.hj@antgroup.com>
+
+commit 35bf430e08a18fdab6eb94492a06d9ad14c6179b upstream.
+
+struct bpf_iter_scx_dsq *it maybe not initialized.
+If we didn't call scx_bpf_dsq_move_set_vtime and scx_bpf_dsq_move_set_slice
+before scx_bpf_dsq_move, it would cause unexpected behaviors:
+1. Assign a huge slice into p->scx.slice
+2. Assign a invalid vtime into p->scx.dsq_vtime
+
+Signed-off-by: Henry Huang <henry.hj@antgroup.com>
+Fixes: 6462dd53a260 ("sched_ext: Compact struct bpf_iter_scx_dsq_kern")
+Cc: stable@vger.kernel.org # v6.12
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -6637,7 +6637,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(str
+ return -ENOENT;
+
+ INIT_LIST_HEAD(&kit->cursor.node);
+- kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
++ kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ kit->cursor.priv = READ_ONCE(kit->dsq->seq);
+
+ return 0;
--- /dev/null
+From 0210d251162f4033350a94a43f95b1c39ec84a90 Mon Sep 17 00:00:00 2001
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+Date: Thu, 26 Dec 2024 22:03:32 +0800
+Subject: scripts/sorttable: fix orc_sort_cmp() to maintain symmetry and transitivity
+
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+
+commit 0210d251162f4033350a94a43f95b1c39ec84a90 upstream.
+
+The orc_sort_cmp() function, used with qsort(), previously violated the
+symmetry and transitivity rules required by the C standard. Specifically,
+when both entries are ORC_TYPE_UNDEFINED, it could result in both a < b
+and b < a, which breaks the required symmetry and transitivity. This can
+lead to undefined behavior and incorrect sorting results, potentially
+causing memory corruption in glibc implementations [1].
+
+Symmetry: If x < y, then y > x.
+Transitivity: If x < y and y < z, then x < z.
+
+Fix the comparison logic to return 0 when both entries are
+ORC_TYPE_UNDEFINED, ensuring compliance with qsort() requirements.
+
+Link: https://www.qualys.com/2024/01/30/qsort.txt [1]
+Link: https://lkml.kernel.org/r/20241226140332.2670689-1-visitorckw@gmail.com
+Fixes: 57fa18994285 ("scripts/sorttable: Implement build-time ORC unwind table sorting")
+Fixes: fb799447ae29 ("x86,objtool: Split UNWIND_HINT_EMPTY in two")
+Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com>
+Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw>
+Cc: <chuang@cs.nycu.edu.tw>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Josh Poimboeuf <jpoimboe@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/sorttable.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/scripts/sorttable.h
++++ b/scripts/sorttable.h
+@@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const
+
+ static int orc_sort_cmp(const void *_a, const void *_b)
+ {
+- struct orc_entry *orc_a;
++ struct orc_entry *orc_a, *orc_b;
+ const int *a = g_orc_ip_table + *(int *)_a;
+ const int *b = g_orc_ip_table + *(int *)_b;
+ unsigned long a_val = orc_ip(a);
+@@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a,
+ * whitelisted .o files which didn't get objtool generation.
+ */
+ orc_a = g_orc_table + (a - g_orc_ip_table);
++ orc_b = g_orc_table + (b - g_orc_ip_table);
++ if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
++ return 0;
+ return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
+ }
+
drm-xe-wait-for-migration-job-before-unmapping-pages.patch
alsa-hda-realtek-add-quirk-for-framework-f111-000c.patch
alsa-seq-oss-fix-races-at-processing-sysex-messages.patch
+ocfs2-fix-slab-use-after-free-due-to-dangling-pointer-dqi_priv.patch
+kcov-mark-in_softirq_really-as-__always_inline.patch
+maple_tree-reload-mas-before-the-second-call-for-mas_empty_area.patch
+clk-clk-imx8mp-audiomix-fix-function-signature.patch
+scripts-sorttable-fix-orc_sort_cmp-to-maintain-symmetry-and-transitivity.patch
+vmstat-disable-vmstat_work-on-vmstat_cpu_down_prep.patch
+sched_ext-fix-invalid-irq-restore-in-scx_ops_bypass.patch
+rdma-uverbs-prevent-integer-overflow-issue.patch
+pinctrl-mcp23s08-fix-sleeping-in-atomic-context-due-to-regmap-locking.patch
+workqueue-do-not-warn-when-cancelling-wq_mem_reclaim-work-from-wq_mem_reclaim-worker.patch
+sky2-add-device-id-11ab-4373-for-marvell-88e8075.patch
+sched_ext-initialize-kit-cursor.flags.patch
+net-sctp-prevent-autoclose-integer-overflow-in-sctp_association_init.patch
+io_uring-rw-fix-downgraded-mshot-read.patch
+drm-adv7511-drop-dsi-single-lane-support.patch
+dt-bindings-display-adi-adv7533-drop-single-lane-support.patch
+drm-adv7511-fix-use-after-free-in-adv7533_attach_dsi.patch
+wifi-iwlwifi-mvm-fix-__counted_by-usage-in-cfg80211_wowlan_nd_.patch
+fgraph-add-read_once-when-accessing-fgraph_array.patch
+net-ethernet-ti-am65-cpsw-default-to-round-robin-for-host-port-receive.patch
+mm-damon-core-fix-ignored-quota-goals-and-filters-of-newly-committed-schemes.patch
+mm-damon-core-fix-new-damon_target-objects-leaks-on-damon_commit_targets.patch
+mm-shmem-fix-the-update-of-shmem_falloc-nr_unswapped.patch
+mm-shmem-fix-incorrect-index-alignment-for-within_size-policy.patch
+fs-proc-task_mmu-fix-pagemap-flags-with-pmd-thp-entries-on-32bit.patch
--- /dev/null
+From 03c8d0af2e409e15c16130b185e12b5efba0a6b9 Mon Sep 17 00:00:00 2001
+From: Pascal Hambourg <pascal@plouf.fr.eu.org>
+Date: Mon, 23 Dec 2024 17:44:01 +0100
+Subject: sky2: Add device ID 11ab:4373 for Marvell 88E8075
+
+From: Pascal Hambourg <pascal@plouf.fr.eu.org>
+
+commit 03c8d0af2e409e15c16130b185e12b5efba0a6b9 upstream.
+
+A Marvell 88E8075 ethernet controller has this device ID instead of
+11ab:4370 and works fine with the sky2 driver.
+
+Signed-off-by: Pascal Hambourg <pascal@plouf.fr.eu.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/10165a62-99fb-4be6-8c64-84afd6234085@plouf.fr.eu.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/sky2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_i
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
--- /dev/null
+From adcfb264c3ed51fbbf5068ddf10d309a63683868 Mon Sep 17 00:00:00 2001
+From: Koichiro Den <koichiro.den@canonical.com>
+Date: Sat, 21 Dec 2024 12:33:20 +0900
+Subject: vmstat: disable vmstat_work on vmstat_cpu_down_prep()
+
+From: Koichiro Den <koichiro.den@canonical.com>
+
+commit adcfb264c3ed51fbbf5068ddf10d309a63683868 upstream.
+
+Even after mm/vmstat:online teardown, shepherd may still queue work for
+the dying cpu until the cpu is removed from online mask. While it's quite
+rare, this means that after unbind_workers() unbinds a per-cpu kworker, it
+potentially runs vmstat_update for the dying CPU on an irrelevant cpu
+before entering atomic AP states. When CONFIG_DEBUG_PREEMPT=y, it results
+in the following error with the backtrace.
+
+ BUG: using smp_processor_id() in preemptible [00000000] code: \
+ kworker/7:3/1702
+ caller is refresh_cpu_vm_stats+0x235/0x5f0
+ CPU: 0 UID: 0 PID: 1702 Comm: kworker/7:3 Tainted: G
+ Tainted: [N]=TEST
+ Workqueue: mm_percpu_wq vmstat_update
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x8d/0xb0
+ check_preemption_disabled+0xce/0xe0
+ refresh_cpu_vm_stats+0x235/0x5f0
+ vmstat_update+0x17/0xa0
+ process_one_work+0x869/0x1aa0
+ worker_thread+0x5e5/0x1100
+ kthread+0x29e/0x380
+ ret_from_fork+0x2d/0x70
+ ret_from_fork_asm+0x1a/0x30
+ </TASK>
+
+So, for mm/vmstat:online, disable vmstat_work reliably on teardown and
+symmetrically enable it on startup.
+
+Link: https://lkml.kernel.org/r/20241221033321.4154409-1-koichiro.den@canonical.com
+Signed-off-by: Koichiro Den <koichiro.den@canonical.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/vmstat.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -2139,13 +2139,14 @@ static int vmstat_cpu_online(unsigned in
+ if (!node_state(cpu_to_node(cpu), N_CPU)) {
+ node_set_state(cpu_to_node(cpu), N_CPU);
+ }
++ enable_delayed_work(&per_cpu(vmstat_work, cpu));
+
+ return 0;
+ }
+
+ static int vmstat_cpu_down_prep(unsigned int cpu)
+ {
+- cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
++ disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ return 0;
+ }
+
--- /dev/null
+From cc0c53f4fac562efb3aca2bc493515e77642ae33 Mon Sep 17 00:00:00 2001
+From: Kees Cook <kees@kernel.org>
+Date: Wed, 19 Jun 2024 14:12:45 -0700
+Subject: wifi: iwlwifi: mvm: Fix __counted_by usage in cfg80211_wowlan_nd_*
+
+From: Kees Cook <kees@kernel.org>
+
+commit cc0c53f4fac562efb3aca2bc493515e77642ae33 upstream.
+
+Both struct cfg80211_wowlan_nd_match and struct cfg80211_wowlan_nd_info
+pre-allocate space for channels and matches, but then may end up using
+fewer that the full allocation. Shrink the associated counter
+(n_channels and n_matches) after counting the results. This avoids
+compile-time (and run-time) warnings from __counted_by. (The counter
+member needs to be updated _before_ accessing the array index.)
+
+Seen with coming GCC 15:
+
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function 'iwl_mvm_query_set_freqs':
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2877:66: warning: operation on 'match->n_channels' may be undefined [-Wsequence-point]
+ 2877 | match->channels[match->n_channels++] =
+ | ~~~~~~~~~~~~~~~~~^~
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2885:66: warning: operation on 'match->n_channels' may be undefined [-Wsequence-point]
+ 2885 | match->channels[match->n_channels++] =
+ | ~~~~~~~~~~~~~~~~~^~
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c: In function 'iwl_mvm_query_netdetect_reasons':
+drivers/net/wireless/intel/iwlwifi/mvm/d3.c:2982:58: warning: operation on 'net_detect->n_matches' may be undefined [-Wsequence-point]
+ 2982 | net_detect->matches[net_detect->n_matches++] = match;
+ | ~~~~~~~~~~~~~~~~~~~~~^~
+
+Cc: stable@vger.kernel.org
+Fixes: aa4ec06c455d ("wifi: cfg80211: use __counted_by where appropriate")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Link: https://patch.msgid.link/20240619211233.work.355-kees@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2871,6 +2871,7 @@ static void iwl_mvm_query_set_freqs(stru
+ int idx)
+ {
+ int i;
++ int n_channels = 0;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+@@ -2879,7 +2880,7 @@ static void iwl_mvm_query_set_freqs(stru
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+- match->channels[match->n_channels++] =
++ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+@@ -2887,9 +2888,11 @@ static void iwl_mvm_query_set_freqs(stru
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+- match->channels[match->n_channels++] =
++ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ }
++ /* We may have ended up with fewer channels than we allocated. */
++ match->n_channels = n_channels;
+ }
+
+ /**
+@@ -2970,6 +2973,8 @@ static void iwl_mvm_query_netdetect_reas
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
++ net_detect->n_matches = n_matches;
++ n_matches = 0;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct cfg80211_wowlan_nd_match *match;
+@@ -2983,8 +2988,9 @@ static void iwl_mvm_query_netdetect_reas
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
++ match->n_channels = n_channels;
+
+- net_detect->matches[net_detect->n_matches++] = match;
++ net_detect->matches[n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+@@ -2999,6 +3005,8 @@ static void iwl_mvm_query_netdetect_reas
+
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
+ }
++ /* We may have fewer matches than we allocated. */
++ net_detect->n_matches = n_matches;
+
+ out_report_nd:
+ wakeup.net_detect = net_detect;
--- /dev/null
+From de35994ecd2dd6148ab5a6c5050a1670a04dec77 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Thu, 19 Dec 2024 09:30:30 +0000
+Subject: workqueue: Do not warn when cancelling WQ_MEM_RECLAIM work from !WQ_MEM_RECLAIM worker
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit de35994ecd2dd6148ab5a6c5050a1670a04dec77 upstream.
+
+After commit
+746ae46c1113 ("drm/sched: Mark scheduler work queues with WQ_MEM_RECLAIM")
+amdgpu started seeing the following warning:
+
+ [ ] workqueue: WQ_MEM_RECLAIM sdma0:drm_sched_run_job_work [gpu_sched] is flushing !WQ_MEM_RECLAIM events:amdgpu_device_delay_enable_gfx_off [amdgpu]
+...
+ [ ] Workqueue: sdma0 drm_sched_run_job_work [gpu_sched]
+...
+ [ ] Call Trace:
+ [ ] <TASK>
+...
+ [ ] ? check_flush_dependency+0xf5/0x110
+...
+ [ ] cancel_delayed_work_sync+0x6e/0x80
+ [ ] amdgpu_gfx_off_ctrl+0xab/0x140 [amdgpu]
+ [ ] amdgpu_ring_alloc+0x40/0x50 [amdgpu]
+ [ ] amdgpu_ib_schedule+0xf4/0x810 [amdgpu]
+ [ ] ? drm_sched_run_job_work+0x22c/0x430 [gpu_sched]
+ [ ] amdgpu_job_run+0xaa/0x1f0 [amdgpu]
+ [ ] drm_sched_run_job_work+0x257/0x430 [gpu_sched]
+ [ ] process_one_work+0x217/0x720
+...
+ [ ] </TASK>
+
+The intent of the verifcation done in check_flush_depedency is to ensure
+forward progress during memory reclaim, by flagging cases when either a
+memory reclaim process, or a memory reclaim work item is flushed from a
+context not marked as memory reclaim safe.
+
+This is correct when flushing, but when called from the
+cancel(_delayed)_work_sync() paths it is a false positive because work is
+either already running, or will not be running at all. Therefore
+cancelling it is safe and we can relax the warning criteria by letting the
+helper know of the calling context.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: fca839c00a12 ("workqueue: warn if memory reclaim tries to flush !WQ_MEM_RECLAIM workqueue")
+References: 746ae46c1113 ("drm/sched: Mark scheduler work queues with WQ_MEM_RECLAIM")
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: Christian König <christian.koenig@amd.com
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: <stable@vger.kernel.org> # v4.5+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/workqueue.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int
+ * check_flush_dependency - check for flush dependency sanity
+ * @target_wq: workqueue being flushed
+ * @target_work: work item being flushed (NULL for workqueue flushes)
++ * @from_cancel: are we called from the work cancel path
+ *
+ * %current is trying to flush the whole @target_wq or @target_work on it.
+- * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
+- * reclaiming memory or running on a workqueue which doesn't have
+- * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
+- * a deadlock.
++ * If this is not the cancel path (which implies work being flushed is either
++ * already running, or will not be at all), check if @target_wq doesn't have
++ * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
++ * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
++ * progress guarantee leading to a deadlock.
+ */
+ static void check_flush_dependency(struct workqueue_struct *target_wq,
+- struct work_struct *target_work)
++ struct work_struct *target_work,
++ bool from_cancel)
+ {
+- work_func_t target_func = target_work ? target_work->func : NULL;
++ work_func_t target_func;
+ struct worker *worker;
+
+- if (target_wq->flags & WQ_MEM_RECLAIM)
++ if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
+ return;
+
+ worker = current_wq_worker();
++ target_func = target_work ? target_work->func : NULL;
+
+ WARN_ONCE(current->flags & PF_MEMALLOC,
+ "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
+@@ -3966,7 +3970,7 @@ void __flush_workqueue(struct workqueue_
+ list_add_tail(&this_flusher.list, &wq->flusher_overflow);
+ }
+
+- check_flush_dependency(wq, NULL);
++ check_flush_dependency(wq, NULL, false);
+
+ mutex_unlock(&wq->mutex);
+
+@@ -4141,7 +4145,7 @@ static bool start_flush_work(struct work
+ }
+
+ wq = pwq->wq;
+- check_flush_dependency(wq, work);
++ check_flush_dependency(wq, work, from_cancel);
+
+ insert_wq_barrier(pwq, barr, work, worker);
+ raw_spin_unlock_irq(&pool->lock);