--- /dev/null
+From 0525064bb82e50d59543b62b9d41a606198a4a44 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 29 Nov 2024 12:25:30 +0000
+Subject: btrfs: fix race with memory mapped writes when activating swap file
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 0525064bb82e50d59543b62b9d41a606198a4a44 upstream.
+
+When activating the swap file we flush all delalloc and wait for ordered
+extent completion, so that we don't miss any delalloc and extents before
+we check that the file's extent layout is usable for a swap file and
+activate the swap file. We are called with the inode's VFS lock acquired,
+so we won't race with buffered and direct IO writes, however we can still
+race with memory mapped writes since they don't acquire the inode's VFS
+lock. The race window is between flushing all delalloc and locking the
+whole file's extent range, since memory mapped writes lock an extent range
+with the length of a page.
+
+Fix this by acquiring the inode's mmap lock before we flush delalloc.
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 31 ++++++++++++++++++++++++-------
+ 1 file changed, 24 insertions(+), 7 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9791,28 +9791,40 @@ static int btrfs_swap_activate(struct sw
+ u64 start;
+
+ /*
++ * Acquire the inode's mmap lock to prevent races with memory mapped
++ * writes, as they could happen after we flush delalloc below and before
++ * we lock the extent range further below. The inode was already locked
++ * up in the call chain.
++ */
++ btrfs_assert_inode_locked(BTRFS_I(inode));
++ down_write(&BTRFS_I(inode)->i_mmap_lock);
++
++ /*
+ * If the swap file was just created, make sure delalloc is done. If the
+ * file changes again after this, the user is doing something stupid and
+ * we don't really care.
+ */
+ ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
+ if (ret)
+- return ret;
++ goto out_unlock_mmap;
+
+ /*
+ * The inode is locked, so these flags won't change after we check them.
+ */
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
+ btrfs_warn(fs_info, "swapfile must not be compressed");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_unlock_mmap;
+ }
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
+ btrfs_warn(fs_info, "swapfile must not be copy-on-write");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_unlock_mmap;
+ }
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ btrfs_warn(fs_info, "swapfile must not be checksummed");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_unlock_mmap;
+ }
+
+ /*
+@@ -9827,7 +9839,8 @@ static int btrfs_swap_activate(struct sw
+ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
+ btrfs_warn(fs_info,
+ "cannot activate swapfile while exclusive operation is running");
+- return -EBUSY;
++ ret = -EBUSY;
++ goto out_unlock_mmap;
+ }
+
+ /*
+@@ -9841,7 +9854,8 @@ static int btrfs_swap_activate(struct sw
+ btrfs_exclop_finish(fs_info);
+ btrfs_warn(fs_info,
+ "cannot activate swapfile because snapshot creation is in progress");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out_unlock_mmap;
+ }
+ /*
+ * Snapshots can create extents which require COW even if NODATACOW is
+@@ -9862,7 +9876,8 @@ static int btrfs_swap_activate(struct sw
+ btrfs_warn(fs_info,
+ "cannot activate swapfile because subvolume %llu is being deleted",
+ btrfs_root_id(root));
+- return -EPERM;
++ ret = -EPERM;
++ goto out_unlock_mmap;
+ }
+ atomic_inc(&root->nr_swapfiles);
+ spin_unlock(&root->root_item_lock);
+@@ -10017,6 +10032,8 @@ out:
+
+ btrfs_exclop_finish(fs_info);
+
++out_unlock_mmap:
++ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ if (ret)
+ return ret;
+
--- /dev/null
+From e0cec363197e41af870613e8e17b30bf0e3d41b5 Mon Sep 17 00:00:00 2001
+From: Carlos Song <carlos.song@nxp.com>
+Date: Wed, 18 Dec 2024 12:42:38 +0800
+Subject: i2c: imx: add imx7d compatible string for applying erratum ERR007805
+
+From: Carlos Song <carlos.song@nxp.com>
+
+commit e0cec363197e41af870613e8e17b30bf0e3d41b5 upstream.
+
+Compatible string "fsl,imx7d-i2c" is not exited at i2c-imx driver
+compatible string table, at the result, "fsl,imx21-i2c" will be
+matched, but it will cause erratum ERR007805 not be applied in fact.
+
+So Add "fsl,imx7d-i2c" compatible string in i2c-imx driver to apply
+the erratum ERR007805(https://www.nxp.com/docs/en/errata/IMX7DS_3N09P.pdf).
+
+"
+ERR007805 I2C: When the I2C clock speed is configured for 400 kHz,
+the SCL low period violates the I2C spec of 1.3 uS min
+
+Description: When the I2C module is programmed to operate at the
+maximum clock speed of 400 kHz (as defined by the I2C spec), the SCL
+clock low period violates the I2C spec of 1.3 uS min. The user must
+reduce the clock speed to obtain the SCL low time to meet the 1.3us
+I2C minimum required. This behavior means the SoC is not compliant
+to the I2C spec at 400kHz.
+
+Workaround: To meet the clock low period requirement in fast speed
+mode, SCL must be configured to 384KHz or less.
+"
+
+"fsl,imx7d-i2c" already is documented in binding doc. This erratum
+fix has been included in imx6_i2c_hwdata and it is the same in all
+I.MX6/7/8, so just reuse it.
+
+Fixes: 39c025721d70 ("i2c: imx: Implement errata ERR007805 or e7805 bus frequency limit")
+Cc: stable@vger.kernel.org # v5.18+
+Signed-off-by: Carlos Song <carlos.song@nxp.com>
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Fixes: 39c025721d70 ("i2c: imx: Implement errata ERR007805 or e7805 bus frequency limit")
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://lore.kernel.org/r/20241218044238.143414-1-carlos.song@nxp.com
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-imx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -282,6 +282,7 @@ static const struct of_device_id i2c_imx
+ { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
++ { .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
--- /dev/null
+From 9a8f9320d67b27ddd7f1ee88d91820197a0e908f Mon Sep 17 00:00:00 2001
+From: Conor Dooley <conor.dooley@microchip.com>
+Date: Wed, 18 Dec 2024 12:07:40 +0000
+Subject: i2c: microchip-core: actually use repeated sends
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+commit 9a8f9320d67b27ddd7f1ee88d91820197a0e908f upstream.
+
+At present, where repeated sends are intended to be used, the
+i2c-microchip-core driver sends a stop followed by a start. Lots of i2c
+devices must not malfunction in the face of this behaviour, because the
+driver has operated like this for years! Try to keep track of whether or
+not a repeated send is required, and suppress sending a stop in these
+cases.
+
+CC: stable@vger.kernel.org
+Fixes: 64a6f1c4987e ("i2c: add support for microchip fpga i2c controllers")
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Reviewed-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20241218-football-composure-e56df2461461@spud
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-microchip-corei2c.c | 120 ++++++++++++++++++++++-------
+ 1 file changed, 94 insertions(+), 26 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
++++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
+@@ -93,27 +93,35 @@
+ * @base: pointer to register struct
+ * @dev: device reference
+ * @i2c_clk: clock reference for i2c input clock
++ * @msg_queue: pointer to the messages requiring sending
+ * @buf: pointer to msg buffer for easier use
+ * @msg_complete: xfer completion object
+ * @adapter: core i2c abstraction
+ * @msg_err: error code for completed message
+ * @bus_clk_rate: current i2c bus clock rate
+ * @isr_status: cached copy of local ISR status
++ * @total_num: total number of messages to be sent/received
++ * @current_num: index of the current message being sent/received
+ * @msg_len: number of bytes transferred in msg
+ * @addr: address of the current slave
++ * @restart_needed: whether or not a repeated start is required after current message
+ */
+ struct mchp_corei2c_dev {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *i2c_clk;
++ struct i2c_msg *msg_queue;
+ u8 *buf;
+ struct completion msg_complete;
+ struct i2c_adapter adapter;
+ int msg_err;
++ int total_num;
++ int current_num;
+ u32 bus_clk_rate;
+ u32 isr_status;
+ u16 msg_len;
+ u8 addr;
++ bool restart_needed;
+ };
+
+ static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
+@@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct m
+ return 0;
+ }
+
++static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
++{
++ struct i2c_msg *this_msg;
++ u8 ctrl;
++
++ if (idev->current_num >= idev->total_num) {
++ complete(&idev->msg_complete);
++ return;
++ }
++
++ /*
++ * If there's been an error, the isr needs to return control
++ * to the "main" part of the driver, so as not to keep sending
++ * messages once it completes and clears the SI bit.
++ */
++ if (idev->msg_err) {
++ complete(&idev->msg_complete);
++ return;
++ }
++
++ this_msg = idev->msg_queue++;
++
++ if (idev->current_num < (idev->total_num - 1)) {
++ struct i2c_msg *next_msg = idev->msg_queue;
++
++ idev->restart_needed = next_msg->flags & I2C_M_RD;
++ } else {
++ idev->restart_needed = false;
++ }
++
++ idev->addr = i2c_8bit_addr_from_msg(this_msg);
++ idev->msg_len = this_msg->len;
++ idev->buf = this_msg->buf;
++
++ ctrl = readb(idev->base + CORE_I2C_CTRL);
++ ctrl |= CTRL_STA;
++ writeb(ctrl, idev->base + CORE_I2C_CTRL);
++
++ idev->current_num++;
++}
++
+ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
+ {
+ u32 status = idev->isr_status;
+@@ -247,10 +296,14 @@ static irqreturn_t mchp_corei2c_handle_i
+ break;
+ case STATUS_M_SLAW_ACK:
+ case STATUS_M_TX_DATA_ACK:
+- if (idev->msg_len > 0)
++ if (idev->msg_len > 0) {
+ mchp_corei2c_fill_tx(idev);
+- else
+- last_byte = true;
++ } else {
++ if (idev->restart_needed)
++ finished = true;
++ else
++ last_byte = true;
++ }
+ break;
+ case STATUS_M_TX_DATA_NACK:
+ case STATUS_M_SLAR_NACK:
+@@ -287,7 +340,7 @@ static irqreturn_t mchp_corei2c_handle_i
+ mchp_corei2c_stop(idev);
+
+ if (last_byte || finished)
+- complete(&idev->msg_complete);
++ mchp_corei2c_next_msg(idev);
+
+ return IRQ_HANDLED;
+ }
+@@ -311,21 +364,48 @@ static irqreturn_t mchp_corei2c_isr(int
+ return ret;
+ }
+
+-static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
+- struct i2c_msg *msg)
++static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
++ int num)
+ {
+- u8 ctrl;
++ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
++ struct i2c_msg *this_msg = msgs;
+ unsigned long time_left;
++ u8 ctrl;
+
+- idev->addr = i2c_8bit_addr_from_msg(msg);
+- idev->msg_len = msg->len;
+- idev->buf = msg->buf;
++ mchp_corei2c_core_enable(idev);
++
++ /*
++ * The isr controls the flow of a transfer, this info needs to be saved
++ * to a location that it can access the queue information from.
++ */
++ idev->restart_needed = false;
++ idev->msg_queue = msgs;
++ idev->total_num = num;
++ idev->current_num = 0;
++
++ /*
++ * But the first entry to the isr is triggered by the start in this
++ * function, so the first message needs to be "dequeued".
++ */
++ idev->addr = i2c_8bit_addr_from_msg(this_msg);
++ idev->msg_len = this_msg->len;
++ idev->buf = this_msg->buf;
+ idev->msg_err = 0;
+
+- reinit_completion(&idev->msg_complete);
++ if (idev->total_num > 1) {
++ struct i2c_msg *next_msg = msgs + 1;
+
+- mchp_corei2c_core_enable(idev);
++ idev->restart_needed = next_msg->flags & I2C_M_RD;
++ }
+
++ idev->current_num++;
++ idev->msg_queue++;
++
++ reinit_completion(&idev->msg_complete);
++
++ /*
++ * Send the first start to pass control to the isr
++ */
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl |= CTRL_STA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+@@ -335,20 +415,8 @@ static int mchp_corei2c_xfer_msg(struct
+ if (!time_left)
+ return -ETIMEDOUT;
+
+- return idev->msg_err;
+-}
+-
+-static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+- int num)
+-{
+- struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+- int i, ret;
+-
+- for (i = 0; i < num; i++) {
+- ret = mchp_corei2c_xfer_msg(idev, msgs++);
+- if (ret)
+- return ret;
+- }
++ if (idev->msg_err)
++ return idev->msg_err;
+
+ return num;
+ }
--- /dev/null
+From 49e1f0fd0d4cb03a16b8526c4e683e1958f71490 Mon Sep 17 00:00:00 2001
+From: Conor Dooley <conor.dooley@microchip.com>
+Date: Wed, 18 Dec 2024 12:07:42 +0000
+Subject: i2c: microchip-core: fix "ghost" detections
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+commit 49e1f0fd0d4cb03a16b8526c4e683e1958f71490 upstream.
+
+Running i2c-detect currently produces an output akin to:
+ 0 1 2 3 4 5 6 7 8 9 a b c d e f
+00: 08 -- 0a -- 0c -- 0e --
+10: 10 -- 12 -- 14 -- 16 -- UU 19 -- 1b -- 1d -- 1f
+20: -- 21 -- 23 -- 25 -- 27 -- 29 -- 2b -- 2d -- 2f
+30: -- -- -- -- -- -- -- -- 38 -- 3a -- 3c -- 3e --
+40: 40 -- 42 -- 44 -- 46 -- 48 -- 4a -- 4c -- 4e --
+50: -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
+60: 60 -- 62 -- 64 -- 66 -- 68 -- 6a -- 6c -- 6e --
+70: 70 -- 72 -- 74 -- 76 --
+
+This happens because for an i2c_msg with a len of 0 the driver will
+mark the transmission of the message as a success once the START has
+been sent, without waiting for the devices on the bus to respond with an
+ACK/NAK. Since i2cdetect seems to run in a tight loop over all addresses
+the NAK is treated as part of the next test for the next address.
+
+Delete the fast path that marks a message as complete when idev->msg_len
+is zero after sending a START/RESTART since this isn't a valid scenario.
+
+CC: stable@vger.kernel.org
+Fixes: 64a6f1c4987e ("i2c: add support for microchip fpga i2c controllers")
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Reviewed-by: Andi Shyti <andi.shyti@kernel.org>
+Link: https://lore.kernel.org/r/20241218-outbid-encounter-b2e78b1cc707@spud
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-microchip-corei2c.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
++++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
+@@ -287,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_i
+ ctrl &= ~CTRL_STA;
+ writeb(idev->addr, idev->base + CORE_I2C_DATA);
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+- if (idev->msg_len == 0)
+- finished = true;
+ break;
+ case STATUS_M_ARB_LOST:
+ idev->msg_err = -EAGAIN;
--- /dev/null
+From e33ac68e5e21ec1292490dfe061e75c0dbdd3bd4 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Thu, 26 Dec 2024 16:49:23 +0000
+Subject: io_uring/sqpoll: fix sqpoll error handling races
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit e33ac68e5e21ec1292490dfe061e75c0dbdd3bd4 upstream.
+
+BUG: KASAN: slab-use-after-free in __lock_acquire+0x370b/0x4a10 kernel/locking/lockdep.c:5089
+Call Trace:
+<TASK>
+...
+_raw_spin_lock_irqsave+0x3d/0x60 kernel/locking/spinlock.c:162
+class_raw_spinlock_irqsave_constructor include/linux/spinlock.h:551 [inline]
+try_to_wake_up+0xb5/0x23c0 kernel/sched/core.c:4205
+io_sq_thread_park+0xac/0xe0 io_uring/sqpoll.c:55
+io_sq_thread_finish+0x6b/0x310 io_uring/sqpoll.c:96
+io_sq_offload_create+0x162/0x11d0 io_uring/sqpoll.c:497
+io_uring_create io_uring/io_uring.c:3724 [inline]
+io_uring_setup+0x1728/0x3230 io_uring/io_uring.c:3806
+...
+
+Kun Hu reports that the SQPOLL creating error path has UAF, which
+happens if io_uring_alloc_task_context() fails and then io_sq_thread()
+manages to run and complete before the rest of error handling code,
+which means io_sq_thread_finish() is looking at already killed task.
+
+Note that this is mostly theoretical, requiring fault injection on
+the allocation side to trigger in practice.
+
+Cc: stable@vger.kernel.org
+Reported-by: Kun Hu <huk23@m.fudan.edu.cn>
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/0f2f1aa5729332612bd01fe0f2f385fd1f06ce7c.1735231717.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/sqpoll.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/io_uring/sqpoll.c
++++ b/io_uring/sqpoll.c
+@@ -412,6 +412,7 @@ void io_sqpoll_wait_sq(struct io_ring_ct
+ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
+ struct io_uring_params *p)
+ {
++ struct task_struct *task_to_put = NULL;
+ int ret;
+
+ /* Retain compatibility with failing for an invalid attach attempt */
+@@ -492,6 +493,7 @@ __cold int io_sq_offload_create(struct i
+ }
+
+ sqd->thread = tsk;
++ task_to_put = get_task_struct(tsk);
+ ret = io_uring_alloc_task_context(tsk, ctx);
+ wake_up_new_task(tsk);
+ if (ret)
+@@ -502,11 +504,15 @@ __cold int io_sq_offload_create(struct i
+ goto err;
+ }
+
++ if (task_to_put)
++ put_task_struct(task_to_put);
+ return 0;
+ err_sqpoll:
+ complete(&ctx->sq_data->exited);
+ err:
+ io_sq_thread_finish(ctx);
++ if (task_to_put)
++ put_task_struct(task_to_put);
+ return ret;
+ }
+
--- /dev/null
+From a60b990798eb17433d0283788280422b1bd94b18 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 14 Dec 2024 12:50:18 +0100
+Subject: PCI/MSI: Handle lack of irqdomain gracefully
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit a60b990798eb17433d0283788280422b1bd94b18 upstream.
+
+Alexandre observed a warning emitted from pci_msi_setup_msi_irqs() on a
+RISCV platform which does not provide PCI/MSI support:
+
+ WARNING: CPU: 1 PID: 1 at drivers/pci/msi/msi.h:121 pci_msi_setup_msi_irqs+0x2c/0x32
+ __pci_enable_msix_range+0x30c/0x596
+ pci_msi_setup_msi_irqs+0x2c/0x32
+ pci_alloc_irq_vectors_affinity+0xb8/0xe2
+
+RISCV uses hierarchical interrupt domains and correctly does not implement
+the legacy fallback. The warning triggers from the legacy fallback stub.
+
+That warning is bogus as the PCI/MSI layer knows whether a PCI/MSI parent
+domain is associated with the device or not. There is a check for MSI-X,
+which has a legacy assumption. But that legacy fallback assumption is only
+valid when legacy support is enabled, but otherwise the check should simply
+return -ENOTSUPP.
+
+Loongarch tripped over the same problem and blindly enabled legacy support
+without implementing the legacy fallbacks. There are weak implementations
+which return an error, so the problem was papered over.
+
+Correct pci_msi_domain_supports() to evaluate the legacy mode and add
+the missing supported check into the MSI enable path to complete it.
+
+Fixes: d2a463b29741 ("PCI/MSI: Reject multi-MSI early")
+Reported-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/87ed2a8ow5.ffs@tglx
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/msi/irqdomain.c | 7 +++++--
+ drivers/pci/msi/msi.c | 4 ++++
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/pci/msi/irqdomain.c
++++ b/drivers/pci/msi/irqdomain.c
+@@ -350,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_
+
+ domain = dev_get_msi_domain(&pdev->dev);
+
+- if (!domain || !irq_domain_is_hierarchy(domain))
+- return mode == ALLOW_LEGACY;
++ if (!domain || !irq_domain_is_hierarchy(domain)) {
++ if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
++ return mode == ALLOW_LEGACY;
++ return false;
++ }
+
+ if (!irq_domain_is_msi_parent(domain)) {
+ /*
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -433,6 +433,10 @@ int __pci_enable_msi_range(struct pci_de
+ if (WARN_ON_ONCE(dev->msi_enabled))
+ return -EINVAL;
+
++ /* Test for the availability of MSI support */
++ if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
++ return -ENOTSUPP;
++
+ nvec = pci_msi_vec_count(dev);
+ if (nvec < 0)
+ return nvec;
--- /dev/null
+From b8c3a2502a205321fe66c356f4b70cabd8e1a5fc Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Mon, 16 Dec 2024 12:45:02 -0800
+Subject: perf/x86/intel/ds: Add PEBS format 6
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit b8c3a2502a205321fe66c356f4b70cabd8e1a5fc upstream.
+
+The only difference between 5 and 6 is the new counters snapshotting
+group, without the following counters snapshotting enabling patches,
+it's impossible to utilize the feature in a PEBS record. It's safe to
+share the same code path with format 5.
+
+Add format 6, so the end user can at least utilize the legacy PEBS
+features.
+
+Fixes: a932aa0e868f ("perf/x86: Add Lunar Lake and Arrow Lake support")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20241216204505.748363-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/ds.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2496,6 +2496,7 @@ void __init intel_ds_init(void)
+ x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
+ break;
+
++ case 6:
+ case 5:
+ x86_pmu.pebs_ept = 1;
+ fallthrough;
--- /dev/null
+From aa5d2ca7c179c40669edb5e96d931bf9828dea3d Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Mon, 16 Dec 2024 08:02:52 -0800
+Subject: perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit aa5d2ca7c179c40669edb5e96d931bf9828dea3d upstream.
+
+The released OCR and FRONTEND events utilized more bits on Lunar Lake
+p-core. The corresponding mask in the extra_regs has to be extended to
+unblock the extra bits.
+
+Add a dedicated intel_lnc_extra_regs.
+
+Fixes: a932aa0e868f ("perf/x86: Add Lunar Lake and Arrow Lake support")
+Reported-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20241216160252.430858-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -429,6 +429,16 @@ static struct event_constraint intel_lnc
+ EVENT_CONSTRAINT_END
+ };
+
++static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
++ INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
++ INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
++ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
++ INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
++ INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
++ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
++ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
++ EVENT_EXTRA_END
++};
+
+ EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
+ EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
+@@ -6344,7 +6354,7 @@ static __always_inline void intel_pmu_in
+ intel_pmu_init_glc(pmu);
+ hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
+ hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
+- hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
++ hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
+ }
+
+ static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
--- /dev/null
+From b6ccddd6fe1fd49c7a82b6fbed01cccad21a29c7 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Wed, 11 Dec 2024 08:11:46 -0800
+Subject: perf/x86/intel/uncore: Add Clearwater Forest support
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit b6ccddd6fe1fd49c7a82b6fbed01cccad21a29c7 upstream.
+
+From the perspective of the uncore PMU, the Clearwater Forest is the
+same as the previous Sierra Forest. The only difference is the event
+list, which will be supported in the perf tool later.
+
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20241211161146.235253-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/uncore.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_unc
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
++ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init),
+ {},
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
--- /dev/null
+From e5f84d1cf562f7b45e28d6e5f6490626f870f81c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= <linux@weissschuh.net>
+Date: Sun, 8 Dec 2024 15:59:26 +0100
+Subject: power: supply: cros_charge-control: add mutex for driver data
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <linux@weissschuh.net>
+
+commit e5f84d1cf562f7b45e28d6e5f6490626f870f81c upstream.
+
+Concurrent accesses through sysfs may lead to inconsistent state in the
+priv data. Introduce a mutex to avoid this.
+
+Fixes: c6ed48ef5259 ("power: supply: add ChromeOS EC based charge control driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/r/20241208-cros_charge-control-v2-v1-1-8d168d0f08a3@weissschuh.net
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/cros_charge-control.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
+index 17c53591ce19..58ca6d9ed613 100644
+--- a/drivers/power/supply/cros_charge-control.c
++++ b/drivers/power/supply/cros_charge-control.c
+@@ -7,8 +7,10 @@
+ #include <acpi/battery.h>
+ #include <linux/container_of.h>
+ #include <linux/dmi.h>
++#include <linux/lockdep.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/platform_data/cros_ec_commands.h>
+ #include <linux/platform_data/cros_ec_proto.h>
+ #include <linux/platform_device.h>
+@@ -49,6 +51,7 @@ struct cros_chctl_priv {
+ struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
+ struct attribute_group group;
+
++ struct mutex lock; /* protects fields below and cros_ec */
+ enum power_supply_charge_behaviour current_behaviour;
+ u8 current_start_threshold, current_end_threshold;
+ };
+@@ -85,6 +88,8 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
+ {
+ struct ec_params_charge_control req = {};
+
++ lockdep_assert_held(&priv->lock);
++
+ req.cmd = EC_CHARGE_CONTROL_CMD_SET;
+
+ switch (priv->current_behaviour) {
+@@ -159,6 +164,7 @@ static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_START_THRESHOLD);
+
++ guard(mutex)(&priv->lock);
+ return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
+ }
+
+@@ -169,6 +175,7 @@ static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_START_THRESHOLD);
+
++ guard(mutex)(&priv->lock);
+ return cros_chctl_store_threshold(dev, priv, 0, buf, count);
+ }
+
+@@ -178,6 +185,7 @@ static ssize_t charge_control_end_threshold_show(struct device *dev, struct devi
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_END_THRESHOLD);
+
++ guard(mutex)(&priv->lock);
+ return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
+ }
+
+@@ -187,6 +195,7 @@ static ssize_t charge_control_end_threshold_store(struct device *dev, struct dev
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_END_THRESHOLD);
+
++ guard(mutex)(&priv->lock);
+ return cros_chctl_store_threshold(dev, priv, 1, buf, count);
+ }
+
+@@ -195,6 +204,7 @@ static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
+ CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
+
++ guard(mutex)(&priv->lock);
+ return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
+ priv->current_behaviour, buf);
+ }
+@@ -210,6 +220,7 @@ static ssize_t charge_behaviour_store(struct device *dev, struct device_attribut
+ if (ret < 0)
+ return ret;
+
++ guard(mutex)(&priv->lock);
+ priv->current_behaviour = ret;
+
+ ret = cros_chctl_configure_ec(priv);
+@@ -290,6 +301,10 @@ static int cros_chctl_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
++ ret = devm_mutex_init(dev, &priv->lock);
++ if (ret)
++ return ret;
++
+ ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL);
+ if (ret < 0)
+ return ret;
+@@ -327,7 +342,8 @@ static int cros_chctl_probe(struct platform_device *pdev)
+ priv->current_end_threshold = 100;
+
+ /* Bring EC into well-known state */
+- ret = cros_chctl_configure_ec(priv);
++ scoped_guard(mutex, &priv->lock)
++ ret = cros_chctl_configure_ec(priv);
+ if (ret < 0)
+ return ret;
+
+--
+2.47.1
+
--- /dev/null
+From e65a1b7fad0e112573eea7d64d4ab4fc513b8695 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= <linux@weissschuh.net>
+Date: Sun, 8 Dec 2024 15:59:27 +0100
+Subject: power: supply: cros_charge-control: allow start_threshold == end_threshold
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <linux@weissschuh.net>
+
+commit e65a1b7fad0e112573eea7d64d4ab4fc513b8695 upstream.
+
+Allow setting the start and stop thresholds to the same value.
+There is no reason to disallow it.
+
+Suggested-by: Thomas Koch <linrunner@gmx.net>
+Fixes: c6ed48ef5259 ("power: supply: add ChromeOS EC based charge control driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/r/20241208-cros_charge-control-v2-v1-2-8d168d0f08a3@weissschuh.net
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/cros_charge-control.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
+index 58ca6d9ed613..108b121db442 100644
+--- a/drivers/power/supply/cros_charge-control.c
++++ b/drivers/power/supply/cros_charge-control.c
+@@ -139,11 +139,11 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
+ return -EINVAL;
+
+ if (is_end_threshold) {
+- if (val <= priv->current_start_threshold)
++ if (val < priv->current_start_threshold)
+ return -EINVAL;
+ priv->current_end_threshold = val;
+ } else {
+- if (val >= priv->current_end_threshold)
++ if (val > priv->current_end_threshold)
+ return -EINVAL;
+ priv->current_start_threshold = val;
+ }
+--
+2.47.1
+
--- /dev/null
+From c28dc9fc24f5fa802d44ef7620a511035bdd803e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= <linux@weissschuh.net>
+Date: Sun, 8 Dec 2024 15:59:28 +0100
+Subject: power: supply: cros_charge-control: hide start threshold on v2 cmd
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <linux@weissschuh.net>
+
+commit c28dc9fc24f5fa802d44ef7620a511035bdd803e upstream.
+
+ECs implementing the v2 command will not stop charging when the end
+threshold is reached. Instead they will begin discharging until the
+start threshold is reached, leading to permanent charge and discharge
+cycles. This defeats the point of the charge control mechanism.
+
+Avoid the issue by hiding the start threshold on v2 systems.
+Instead on those systems program the EC with start == end which forces
+the EC to reach and stay at that level.
+
+v1 does not support thresholds and v3 works correctly,
+at least judging from the code.
+
+Reported-by: Thomas Koch <linrunner@gmx.net>
+Fixes: c6ed48ef5259 ("power: supply: add ChromeOS EC based charge control driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/r/20241208-cros_charge-control-v2-v1-3-8d168d0f08a3@weissschuh.net
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/cros_charge-control.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
+index 108b121db442..9b0a7500296b 100644
+--- a/drivers/power/supply/cros_charge-control.c
++++ b/drivers/power/supply/cros_charge-control.c
+@@ -139,6 +139,10 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
+ return -EINVAL;
+
+ if (is_end_threshold) {
++ /* Start threshold is not exposed, use fixed value */
++ if (priv->cmd_version == 2)
++ priv->current_start_threshold = val == 100 ? 0 : val;
++
+ if (val < priv->current_start_threshold)
+ return -EINVAL;
+ priv->current_end_threshold = val;
+@@ -234,12 +238,10 @@ static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute
+ {
+ struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
+
+- if (priv->cmd_version < 2) {
+- if (n == CROS_CHCTL_ATTR_START_THRESHOLD)
+- return 0;
+- if (n == CROS_CHCTL_ATTR_END_THRESHOLD)
+- return 0;
+- }
++ if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3)
++ return 0;
++ else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2)
++ return 0;
+
+ return attr->mode;
+ }
+--
+2.47.1
+
--- /dev/null
+From afc6e39e824ad0e44b2af50a97885caec8d213d1 Mon Sep 17 00:00:00 2001
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Date: Mon, 9 Dec 2024 11:46:15 +0100
+Subject: power: supply: gpio-charger: Fix set charge current limits
+
+From: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+
+commit afc6e39e824ad0e44b2af50a97885caec8d213d1 upstream.
+
+Fix set charge current limits for devices which allow to set the lowest
+charge current limit to be greater zero. If requested charge current limit
+is below lowest limit, the index equals current_limit_map_size which leads
+to accessing memory beyond allocated memory.
+
+Fixes: be2919d8355e ("power: supply: gpio-charger: add charge-current-limit feature")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dimitri Fedrau <dimitri.fedrau@liebherr.com>
+Link: https://lore.kernel.org/r/20241209-fix-charge-current-limit-v1-1-760d9b8f2af3@liebherr.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/gpio-charger.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/power/supply/gpio-charger.c
++++ b/drivers/power/supply/gpio-charger.c
+@@ -67,6 +67,14 @@ static int set_charge_current_limit(stru
+ if (gpio_charger->current_limit_map[i].limit_ua <= val)
+ break;
+ }
++
++ /*
++ * If a valid charge current limit isn't found, default to smallest
++ * current limitation for safety reasons.
++ */
++ if (i >= gpio_charger->current_limit_map_size)
++ i = gpio_charger->current_limit_map_size - 1;
++
+ mapping = gpio_charger->current_limit_map[i];
+
+ for (i = 0; i < ndescs; i++) {
--- /dev/null
+From 6cc45f8c1f898570916044f606be9890d295e129 Mon Sep 17 00:00:00 2001
+From: Tomas Glozar <tglozar@redhat.com>
+Date: Wed, 27 Nov 2024 14:41:30 +0100
+Subject: rtla/timerlat: Fix histogram ALL for zero samples
+
+From: Tomas Glozar <tglozar@redhat.com>
+
+commit 6cc45f8c1f898570916044f606be9890d295e129 upstream.
+
+rtla timerlat hist currently computers the minimum, maximum and average
+latency even in cases when there are zero samples. This leads to
+nonsensical values being calculated for maximum and minimum, and to
+divide by zero for average.
+
+A similar bug is fixed by 01b05fc0e5f3 ("rtla/timerlat: Fix histogram
+report when a cpu count is 0") but the bug still remains for printing
+the sum over all CPUs in timerlat_print_stats_all.
+
+The issue can be reproduced with this command:
+
+$ rtla timerlat hist -U -d 1s
+Index
+over:
+count:
+min:
+avg:
+max:
+Floating point exception (core dumped)
+
+(There are always no samples with -U unless the user workload is
+created.)
+
+Fix the bug by omitting max/min/avg when sample count is zero,
+displaying a dash instead, just like we already do for the individual
+CPUs. The logic is moved into a new function called
+format_summary_value, which is used for both the individual CPUs
+and for the overall summary.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/20241127134130.51171-1-tglozar@redhat.com
+Fixes: 1462501c7a8 ("rtla/timerlat: Add a summary for hist mode")
+Signed-off-by: Tomas Glozar <tglozar@redhat.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/tracing/rtla/src/timerlat_hist.c | 189 +++++++++++++++++----------------
+ 1 file changed, 102 insertions(+), 87 deletions(-)
+
+--- a/tools/tracing/rtla/src/timerlat_hist.c
++++ b/tools/tracing/rtla/src/timerlat_hist.c
+@@ -281,6 +281,21 @@ static void timerlat_hist_header(struct
+ }
+
+ /*
++ * format_summary_value - format a line of summary value (min, max or avg)
++ * of hist data
++ */
++static void format_summary_value(struct trace_seq *seq,
++ int count,
++ unsigned long long val,
++ bool avg)
++{
++ if (count)
++ trace_seq_printf(seq, "%9llu ", avg ? val / count : val);
++ else
++ trace_seq_printf(seq, "%9c ", '-');
++}
++
++/*
+ * timerlat_print_summary - print the summary of the hist data to the output
+ */
+ static void
+@@ -327,29 +342,23 @@ timerlat_print_summary(struct timerlat_h
+ if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ continue;
+
+- if (!params->no_irq) {
+- if (data->hist[cpu].irq_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_irq);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (!params->no_thread) {
+- if (data->hist[cpu].thread_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_thread);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (params->user_hist) {
+- if (data->hist[cpu].user_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].min_user);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
++ if (!params->no_irq)
++ format_summary_value(trace->seq,
++ data->hist[cpu].irq_count,
++ data->hist[cpu].min_irq,
++ false);
++
++ if (!params->no_thread)
++ format_summary_value(trace->seq,
++ data->hist[cpu].thread_count,
++ data->hist[cpu].min_thread,
++ false);
++
++ if (params->user_hist)
++ format_summary_value(trace->seq,
++ data->hist[cpu].user_count,
++ data->hist[cpu].min_user,
++ false);
+ }
+ trace_seq_printf(trace->seq, "\n");
+
+@@ -363,29 +372,23 @@ timerlat_print_summary(struct timerlat_h
+ if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ continue;
+
+- if (!params->no_irq) {
+- if (data->hist[cpu].irq_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].sum_irq / data->hist[cpu].irq_count);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (!params->no_thread) {
+- if (data->hist[cpu].thread_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].sum_thread / data->hist[cpu].thread_count);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (params->user_hist) {
+- if (data->hist[cpu].user_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].sum_user / data->hist[cpu].user_count);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
++ if (!params->no_irq)
++ format_summary_value(trace->seq,
++ data->hist[cpu].irq_count,
++ data->hist[cpu].sum_irq,
++ true);
++
++ if (!params->no_thread)
++ format_summary_value(trace->seq,
++ data->hist[cpu].thread_count,
++ data->hist[cpu].sum_thread,
++ true);
++
++ if (params->user_hist)
++ format_summary_value(trace->seq,
++ data->hist[cpu].user_count,
++ data->hist[cpu].sum_user,
++ true);
+ }
+ trace_seq_printf(trace->seq, "\n");
+
+@@ -399,29 +402,23 @@ timerlat_print_summary(struct timerlat_h
+ if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
+ continue;
+
+- if (!params->no_irq) {
+- if (data->hist[cpu].irq_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_irq);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (!params->no_thread) {
+- if (data->hist[cpu].thread_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_thread);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
+-
+- if (params->user_hist) {
+- if (data->hist[cpu].user_count)
+- trace_seq_printf(trace->seq, "%9llu ",
+- data->hist[cpu].max_user);
+- else
+- trace_seq_printf(trace->seq, " - ");
+- }
++ if (!params->no_irq)
++ format_summary_value(trace->seq,
++ data->hist[cpu].irq_count,
++ data->hist[cpu].max_irq,
++ false);
++
++ if (!params->no_thread)
++ format_summary_value(trace->seq,
++ data->hist[cpu].thread_count,
++ data->hist[cpu].max_thread,
++ false);
++
++ if (params->user_hist)
++ format_summary_value(trace->seq,
++ data->hist[cpu].user_count,
++ data->hist[cpu].max_user,
++ false);
+ }
+ trace_seq_printf(trace->seq, "\n");
+ trace_seq_do_printf(trace->seq);
+@@ -505,16 +502,22 @@ timerlat_print_stats_all(struct timerlat
+ trace_seq_printf(trace->seq, "min: ");
+
+ if (!params->no_irq)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.min_irq);
++ format_summary_value(trace->seq,
++ sum.irq_count,
++ sum.min_irq,
++ false);
+
+ if (!params->no_thread)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.min_thread);
++ format_summary_value(trace->seq,
++ sum.thread_count,
++ sum.min_thread,
++ false);
+
+ if (params->user_hist)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.min_user);
++ format_summary_value(trace->seq,
++ sum.user_count,
++ sum.min_user,
++ false);
+
+ trace_seq_printf(trace->seq, "\n");
+
+@@ -522,16 +525,22 @@ timerlat_print_stats_all(struct timerlat
+ trace_seq_printf(trace->seq, "avg: ");
+
+ if (!params->no_irq)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.sum_irq / sum.irq_count);
++ format_summary_value(trace->seq,
++ sum.irq_count,
++ sum.sum_irq,
++ true);
+
+ if (!params->no_thread)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.sum_thread / sum.thread_count);
++ format_summary_value(trace->seq,
++ sum.thread_count,
++ sum.sum_thread,
++ true);
+
+ if (params->user_hist)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.sum_user / sum.user_count);
++ format_summary_value(trace->seq,
++ sum.user_count,
++ sum.sum_user,
++ true);
+
+ trace_seq_printf(trace->seq, "\n");
+
+@@ -539,16 +548,22 @@ timerlat_print_stats_all(struct timerlat
+ trace_seq_printf(trace->seq, "max: ");
+
+ if (!params->no_irq)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.max_irq);
++ format_summary_value(trace->seq,
++ sum.irq_count,
++ sum.max_irq,
++ false);
+
+ if (!params->no_thread)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.max_thread);
++ format_summary_value(trace->seq,
++ sum.thread_count,
++ sum.max_thread,
++ false);
+
+ if (params->user_hist)
+- trace_seq_printf(trace->seq, "%9llu ",
+- sum.max_user);
++ format_summary_value(trace->seq,
++ sum.user_count,
++ sum.max_user,
++ false);
+
+ trace_seq_printf(trace->seq, "\n");
+ trace_seq_do_printf(trace->seq);
objtool-add-bch2_trans_unlocked_error-to-bcachefs-no.patch
freezer-sched-report-frozen-tasks-as-d-instead-of-r.patch
dmaengine-loongson2-apb-change-genmask-to-genmask_ul.patch
+perf-x86-intel-uncore-add-clearwater-forest-support.patch
+tracing-constify-string-literal-data-member-in-struct-trace_event_call.patch
+tracing-prevent-bad-count-for-tracing_cpumask_write.patch
+rtla-timerlat-fix-histogram-all-for-zero-samples.patch
+io_uring-sqpoll-fix-sqpoll-error-handling-races.patch
+i2c-microchip-core-actually-use-repeated-sends.patch
+x86-fred-clear-wfe-in-missing-endbranch-cps.patch
+virt-tdx-guest-just-leak-decrypted-memory-on-unrecoverable-errors.patch
+pci-msi-handle-lack-of-irqdomain-gracefully.patch
+perf-x86-intel-fix-bitmask-of-ocr-and-frontend-events-for-lnc.patch
+i2c-imx-add-imx7d-compatible-string-for-applying-erratum-err007805.patch
+i2c-microchip-core-fix-ghost-detections.patch
+perf-x86-intel-ds-add-pebs-format-6.patch
+power-supply-cros_charge-control-add-mutex-for-driver-data.patch
+power-supply-cros_charge-control-allow-start_threshold-end_threshold.patch
+power-supply-cros_charge-control-hide-start-threshold-on-v2-cmd.patch
+power-supply-gpio-charger-fix-set-charge-current-limits.patch
+btrfs-fix-race-with-memory-mapped-writes-when-activating-swap-file.patch
--- /dev/null
+From 452f4b31e3f70a52b97890888eeb9eaa9a87139a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20G=C3=B6ttsche?= <cgzones@googlemail.com>
+Date: Mon, 25 Nov 2024 11:50:25 +0100
+Subject: tracing: Constify string literal data member in struct trace_event_call
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian Göttsche <cgzones@googlemail.com>
+
+commit 452f4b31e3f70a52b97890888eeb9eaa9a87139a upstream.
+
+The name member of the struct trace_event_call is assigned with
+generated string literals; declare them pointer to read-only.
+
+Reported by clang:
+
+ security/landlock/syscalls.c:179:1: warning: initializing 'char *' with an expression of type 'const char[34]' discards qualifiers [-Wincompatible-pointer-types-discards-qualifiers]
+ 179 | SYSCALL_DEFINE3(landlock_create_ruleset,
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 180 | const struct landlock_ruleset_attr __user *const, attr,
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 181 | const size_t, size, const __u32, flags)
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ./include/linux/syscalls.h:226:36: note: expanded from macro 'SYSCALL_DEFINE3'
+ 226 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ./include/linux/syscalls.h:234:2: note: expanded from macro 'SYSCALL_DEFINEx'
+ 234 | SYSCALL_METADATA(sname, x, __VA_ARGS__) \
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ./include/linux/syscalls.h:184:2: note: expanded from macro 'SYSCALL_METADATA'
+ 184 | SYSCALL_TRACE_ENTER_EVENT(sname); \
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ ./include/linux/syscalls.h:151:30: note: expanded from macro 'SYSCALL_TRACE_ENTER_EVENT'
+ 151 | .name = "sys_enter"#sname, \
+ | ^~~~~~~~~~~~~~~~~
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Mickaël Salaün <mic@digikod.net>
+Cc: Günther Noack <gnoack@google.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Bill Wendling <morbo@google.com>
+Cc: Justin Stitt <justinstitt@google.com>
+Link: https://lore.kernel.org/20241125105028.42807-1-cgoettsche@seltendoof.de
+Fixes: b77e38aa240c3 ("tracing: add event trace infrastructure")
+Signed-off-by: Christian Göttsche <cgzones@googlemail.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/trace_events.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -379,7 +379,7 @@ struct trace_event_call {
+ struct list_head list;
+ struct trace_event_class *class;
+ union {
+- char *name;
++ const char *name;
+ /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
+ struct tracepoint *tp;
+ };
--- /dev/null
+From 98feccbf32cfdde8c722bc4587aaa60ee5ac33f0 Mon Sep 17 00:00:00 2001
+From: Lizhi Xu <lizhi.xu@windriver.com>
+Date: Mon, 16 Dec 2024 15:32:38 +0800
+Subject: tracing: Prevent bad count for tracing_cpumask_write
+
+From: Lizhi Xu <lizhi.xu@windriver.com>
+
+commit 98feccbf32cfdde8c722bc4587aaa60ee5ac33f0 upstream.
+
+If a large count is provided, it will trigger a warning in bitmap_parse_user.
+Also check zero for it.
+
+Cc: stable@vger.kernel.org
+Fixes: 9e01c1b74c953 ("cpumask: convert kernel trace functions")
+Link: https://lore.kernel.org/20241216073238.2573704-1-lizhi.xu@windriver.com
+Reported-by: syzbot+0aecfd34fb878546f3fd@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0aecfd34fb878546f3fd
+Tested-by: syzbot+0aecfd34fb878546f3fd@syzkaller.appspotmail.com
+Signed-off-by: Lizhi Xu <lizhi.xu@windriver.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5111,6 +5111,9 @@ tracing_cpumask_write(struct file *filp,
+ cpumask_var_t tracing_cpumask_new;
+ int err;
+
++ if (count == 0 || count > KMALLOC_MAX_SIZE)
++ return -EINVAL;
++
+ if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
--- /dev/null
+From 27834971f616c5e154423c578fa95e0444444ce1 Mon Sep 17 00:00:00 2001
+From: Li RongQing <lirongqing@baidu.com>
+Date: Wed, 19 Jun 2024 19:18:01 +0800
+Subject: virt: tdx-guest: Just leak decrypted memory on unrecoverable errors
+
+From: Li RongQing <lirongqing@baidu.com>
+
+commit 27834971f616c5e154423c578fa95e0444444ce1 upstream.
+
+In CoCo VMs it is possible for the untrusted host to cause
+set_memory_decrypted() to fail such that an error is returned
+and the resulting memory is shared. Callers need to take care
+to handle these errors to avoid returning decrypted (shared)
+memory to the page allocator, which could lead to functional
+or security issues.
+
+Leak the decrypted memory when set_memory_decrypted() fails,
+and don't need to print an error since set_memory_decrypted()
+will call WARN_ONCE().
+
+Fixes: f4738f56d1dc ("virt: tdx-guest: Add Quote generation support using TSM_REPORTS")
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20240619111801.25630-1-lirongqing%40baidu.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/virt/coco/tdx-guest/tdx-guest.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
++++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
+@@ -124,10 +124,8 @@ static void *alloc_quote_buf(void)
+ if (!addr)
+ return NULL;
+
+- if (set_memory_decrypted((unsigned long)addr, count)) {
+- free_pages_exact(addr, len);
++ if (set_memory_decrypted((unsigned long)addr, count))
+ return NULL;
+- }
+
+ return addr;
+ }
--- /dev/null
+From dc81e556f2a017d681251ace21bf06c126d5a192 Mon Sep 17 00:00:00 2001
+From: "Xin Li (Intel)" <xin@zytor.com>
+Date: Wed, 13 Nov 2024 09:59:34 -0800
+Subject: x86/fred: Clear WFE in missing-ENDBRANCH #CPs
+
+From: Xin Li (Intel) <xin@zytor.com>
+
+commit dc81e556f2a017d681251ace21bf06c126d5a192 upstream.
+
+An indirect branch instruction sets the CPU indirect branch tracker
+(IBT) into WAIT_FOR_ENDBRANCH (WFE) state and WFE stays asserted
+across the instruction boundary. When the decoder finds an
+inappropriate instruction while WFE is set ENDBR, the CPU raises a #CP
+fault.
+
+For the "kernel IBT no ENDBR" selftest where #CPs are deliberately
+triggered, the WFE state of the interrupted context needs to be
+cleared to let execution continue. Otherwise when the CPU resumes
+from the instruction that just caused the previous #CP, another
+missing-ENDBRANCH #CP is raised and the CPU enters a dead loop.
+
+This is not a problem with IDT because it doesn't preserve WFE and
+IRET doesn't set WFE. But FRED provides space on the entry stack
+(in an expanded CS area) to save and restore the WFE state, thus the
+WFE state is no longer clobbered, so software must clear it.
+
+Clear WFE to avoid dead looping in ibt_clear_fred_wfe() and the
+!ibt_fatal code path when execution is allowed to continue.
+
+Clobbering WFE in any other circumstance is a security-relevant bug.
+
+[ dhansen: changelog rewording ]
+
+Fixes: a5f6c2ace997 ("x86/shstk: Add user control-protection fault handler")
+Signed-off-by: Xin Li (Intel) <xin@zytor.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20241113175934.3897541-1-xin%40zytor.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cet.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+--- a/arch/x86/kernel/cet.c
++++ b/arch/x86/kernel/cet.c
+@@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_r
+
+ static __ro_after_init bool ibt_fatal = true;
+
++/*
++ * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
++ *
++ * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
++ * the WFE state of the interrupted context needs to be cleared to let execution
++ * continue. Otherwise when the CPU resumes from the instruction that just
++ * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
++ * enters a dead loop.
++ *
++ * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
++ * set WFE. But FRED provides space on the entry stack (in an expanded CS area)
++ * to save and restore the WFE state, thus the WFE state is no longer clobbered,
++ * so software must clear it.
++ */
++static void ibt_clear_fred_wfe(struct pt_regs *regs)
++{
++ /*
++ * No need to do any FRED checks.
++ *
++ * For IDT event delivery, the high-order 48 bits of CS are pushed
++ * as 0s into the stack, and later IRET ignores these bits.
++ *
++ * For FRED, a test to check if fred_cs.wfe is set would be dropped
++ * by compilers.
++ */
++ regs->fred_cs.wfe = 0;
++}
++
+ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ {
+ if ((error_code & CP_EC) != CP_ENDBR) {
+@@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt
+
+ if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
+ regs->ax = 0;
++ ibt_clear_fred_wfe(regs);
+ return;
+ }
+
+@@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt
+ if (!ibt_fatal) {
+ printk(KERN_DEFAULT CUT_HERE);
+ __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
++ ibt_clear_fred_wfe(regs);
+ return;
+ }
+ BUG();