--- /dev/null
+From e02bfea4d7ef587bb285ad5825da4e1973ac8263 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Barnab=C3=A1s=20Cz=C3=A9m=C3=A1n?=
+ <barnabas.czeman@mainlining.org>
+Date: Sun, 6 Oct 2024 22:51:58 +0200
+Subject: clk: qcom: clk-alpha-pll: Fix pll post div mask when width is not set
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Barnabás Czémán <barnabas.czeman@mainlining.org>
+
+commit e02bfea4d7ef587bb285ad5825da4e1973ac8263 upstream.
+
+Many qcom clock drivers do not have .width set. In that case value of
+(p)->width - 1 will be negative which breaks clock tree. Fix this
+by checking if width is zero, and pass 3 to GENMASK if that's the case.
+
+Fixes: 1c3541145cbf ("clk: qcom: support for 2 bit PLL post divider")
+Signed-off-by: Barnabás Czémán <barnabas.czeman@mainlining.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Christopher Obbard <christopher.obbard@linaro.org>
+Tested-by: Christopher Obbard <christopher.obbard@linaro.org>
+Link: https://lore.kernel.org/r/20241006-fix-postdiv-mask-v3-1-160354980433@mainlining.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/clk-alpha-pll.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -40,7 +40,7 @@
+
+ #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+ # define PLL_POST_DIV_SHIFT 8
+-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
++# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0)
+ # define PLL_ALPHA_MSB BIT(15)
+ # define PLL_ALPHA_EN BIT(24)
+ # define PLL_ALPHA_MODE BIT(25)
--- /dev/null
+From e7f37a7d16310d3c9474825de26a67f00983ebea Mon Sep 17 00:00:00 2001
+From: Abel Vesa <abel.vesa@linaro.org>
+Date: Mon, 21 Oct 2024 15:46:25 +0300
+Subject: clk: qcom: gcc-x1e80100: Fix USB MP SS1 PHY GDSC pwrsts flags
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+commit e7f37a7d16310d3c9474825de26a67f00983ebea upstream.
+
+Allowing these GDSCs to collapse makes the QMP combo PHYs lose their
+configuration on machine suspend. Currently, the QMP combo PHY driver
+doesn't reinitialise the HW on resume. Under such conditions, the USB
+SuperSpeed support is broken. To avoid this, mark the pwrsts flags with
+RET_ON. This has been already done for USB 0 and 1 SS PHY GDSCs,
+Do this also for the USB MP SS1 PHY GDSC config. The USB MP SS0 PHY GDSC
+already has it.
+
+Fixes: 161b7c401f4b ("clk: qcom: Add Global Clock controller (GCC) driver for X1E80100")
+Reviewed-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Link: https://lore.kernel.org/r/20241021-x1e80100-clk-gcc-fix-usb-mp-phy-gdsc-pwrsts-flags-v2-1-0bfd64556238@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/qcom/gcc-x1e80100.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
+index 81ba5ceab342..8ea25aa25dff 100644
+--- a/drivers/clk/qcom/gcc-x1e80100.c
++++ b/drivers/clk/qcom/gcc-x1e80100.c
+@@ -6155,7 +6155,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+- .pwrsts = PWRSTS_OFF_ON,
++ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+ };
+
+--
+2.47.0
+
--- /dev/null
+From ace149e0830c380ddfce7e466fe860ca502fe4ee Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Fri, 13 Sep 2024 13:57:04 -0400
+Subject: filemap: Fix bounds checking in filemap_read()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit ace149e0830c380ddfce7e466fe860ca502fe4ee upstream.
+
+If the caller supplies an iocb->ki_pos value that is close to the
+filesystem upper limit, and an iterator with a count that causes us to
+overflow that limit, then filemap_read() enters an infinite loop.
+
+This behaviour was discovered when testing xfstests generic/525 with the
+"localio" optimisation for loopback NFS mounts.
+
+Reported-by: Mike Snitzer <snitzer@kernel.org>
+Fixes: c2a9737f45e2 ("vfs,mm: fix a dead loop in truncate_inode_pages_range()")
+Tested-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/filemap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2609,7 +2609,7 @@ ssize_t filemap_read(struct kiocb *iocb,
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+- iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
++ iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos);
+ folio_batch_init(&fbatch);
+
+ do {
--- /dev/null
+From b8ee299855f08539e04d6c1a6acb3dc9e5423c00 Mon Sep 17 00:00:00 2001
+From: Qi Xi <xiqi2@huawei.com>
+Date: Fri, 1 Nov 2024 11:48:03 +0800
+Subject: fs/proc: fix compile warning about variable 'vmcore_mmap_ops'
+
+From: Qi Xi <xiqi2@huawei.com>
+
+commit b8ee299855f08539e04d6c1a6acb3dc9e5423c00 upstream.
+
+When build with !CONFIG_MMU, the variable 'vmcore_mmap_ops'
+is defined but not used:
+
+>> fs/proc/vmcore.c:458:42: warning: unused variable 'vmcore_mmap_ops'
+ 458 | static const struct vm_operations_struct vmcore_mmap_ops = {
+
+Fix this by only defining it when CONFIG_MMU is enabled.
+
+Link: https://lkml.kernel.org/r/20241101034803.9298-1-xiqi2@huawei.com
+Fixes: 9cb218131de1 ("vmcore: introduce remap_oldmem_pfn_range()")
+Signed-off-by: Qi Xi <xiqi2@huawei.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/lkml/202410301936.GcE8yUos-lkp@intel.com/
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Cc: Wang ShaoBo <bobo.shaobowang@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/proc/vmcore.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -457,10 +457,6 @@ static vm_fault_t mmap_vmcore_fault(stru
+ #endif
+ }
+
+-static const struct vm_operations_struct vmcore_mmap_ops = {
+- .fault = mmap_vmcore_fault,
+-};
+-
+ /**
+ * vmcore_alloc_buf - allocate buffer in vmalloc memory
+ * @size: size of buffer
+@@ -488,6 +484,11 @@ static inline char *vmcore_alloc_buf(siz
+ * virtually contiguous user-space in ELF layout.
+ */
+ #ifdef CONFIG_MMU
++
++static const struct vm_operations_struct vmcore_mmap_ops = {
++ .fault = mmap_vmcore_fault,
++};
++
+ /*
+ * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
+ * reported as not being ram with the zero page.
--- /dev/null
+From 8de3e97f3d3d62cd9f3067f073e8ac93261597db Mon Sep 17 00:00:00 2001
+From: Liu Peibao <loven.liu@jaguarmicro.com>
+Date: Fri, 1 Nov 2024 16:12:43 +0800
+Subject: i2c: designware: do not hold SCL low when I2C_DYNAMIC_TAR_UPDATE is not set
+
+From: Liu Peibao <loven.liu@jaguarmicro.com>
+
+commit 8de3e97f3d3d62cd9f3067f073e8ac93261597db upstream.
+
+When the Tx FIFO is empty and the last command has no STOP bit
+set, the master holds SCL low. If I2C_DYNAMIC_TAR_UPDATE is not
+set, BIT(13) MST_ON_HOLD of IC_RAW_INTR_STAT is not enabled,
+causing the __i2c_dw_disable() timeout. This is quite similar to
+commit 2409205acd3c ("i2c: designware: fix __i2c_dw_disable() in
+case master is holding SCL low"). Also check BIT(7)
+MST_HOLD_TX_FIFO_EMPTY in IC_STATUS, which is available when
+IC_STAT_FOR_CLK_STRETCH is set.
+
+Fixes: 2409205acd3c ("i2c: designware: fix __i2c_dw_disable() in case master is holding SCL low")
+Co-developed-by: Xiaowu Ding <xiaowu.ding@jaguarmicro.com>
+Signed-off-by: Xiaowu Ding <xiaowu.ding@jaguarmicro.com>
+Co-developed-by: Angus Chen <angus.chen@jaguarmicro.com>
+Signed-off-by: Angus Chen <angus.chen@jaguarmicro.com>
+Signed-off-by: Liu Peibao <loven.liu@jaguarmicro.com>
+Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-designware-common.c | 6 ++++--
+ drivers/i2c/busses/i2c-designware-core.h | 1 +
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -442,7 +442,7 @@ err_release_lock:
+ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+ {
+ struct i2c_timings *t = &dev->timings;
+- unsigned int raw_intr_stats;
++ unsigned int raw_intr_stats, ic_stats;
+ unsigned int enable;
+ int timeout = 100;
+ bool abort_needed;
+@@ -450,9 +450,11 @@ void __i2c_dw_disable(struct dw_i2c_dev
+ int ret;
+
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
++ regmap_read(dev->map, DW_IC_STATUS, &ic_stats);
+ regmap_read(dev->map, DW_IC_ENABLE, &enable);
+
+- abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
++ abort_needed = (raw_intr_stats & DW_IC_INTR_MST_ON_HOLD) ||
++ (ic_stats & DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY);
+ if (abort_needed) {
+ if (!(enable & DW_IC_ENABLE_ENABLE)) {
+ regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
+--- a/drivers/i2c/busses/i2c-designware-core.h
++++ b/drivers/i2c/busses/i2c-designware-core.h
+@@ -117,6 +117,7 @@
+ #define DW_IC_STATUS_RFNE BIT(3)
+ #define DW_IC_STATUS_MASTER_ACTIVITY BIT(5)
+ #define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6)
++#define DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY BIT(7)
+
+ #define DW_IC_SDA_HOLD_RX_SHIFT 16
+ #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16)
--- /dev/null
+From 4401e9d10ab0281a520b9f8c220f30f60b5c248f Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 31 Oct 2024 09:12:03 -0700
+Subject: mm/damon/core: avoid overflow in damon_feed_loop_next_input()
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 4401e9d10ab0281a520b9f8c220f30f60b5c248f upstream.
+
+damon_feed_loop_next_input() is inefficient and fragile to overflows.
+Specifically, 'score_goal_diff_bp' calculation can overflow when 'score'
+is high. The calculation is actually unnecessary at all because 'goal' is
+a constant of value 10,000. Calculation of 'compensation' is again
+fragile to overflow. Final calculation of return value for under-achiving
+case is again fragile to overflow when the current score is
+under-achieving the target.
+
+Add two corner cases handling at the beginning of the function to make the
+body easier to read, and rewrite the body of the function to avoid
+overflows and the unnecessary bp value calcuation.
+
+Link: https://lkml.kernel.org/r/20241031161203.47751-1-sj@kernel.org
+Fixes: 9294a037c015 ("mm/damon/core: implement goal-oriented feedback-driven quota auto-tuning")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Closes: https://lore.kernel.org/944f3d5b-9177-48e7-8ec9-7f1331a3fea3@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Cc: <stable@vger.kernel.org> [6.8.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 28 +++++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1450,17 +1450,31 @@ static unsigned long damon_feed_loop_nex
+ unsigned long score)
+ {
+ const unsigned long goal = 10000;
+- unsigned long score_goal_diff = max(goal, score) - min(goal, score);
+- unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
+- unsigned long compensation = last_input * score_goal_diff_bp / 10000;
+ /* Set minimum input as 10000 to avoid compensation be zero */
+ const unsigned long min_input = 10000;
++ unsigned long score_goal_diff, compensation;
++ bool over_achieving = score > goal;
+
+- if (goal > score)
++ if (score == goal)
++ return last_input;
++ if (score >= goal * 2)
++ return min_input;
++
++ if (over_achieving)
++ score_goal_diff = score - goal;
++ else
++ score_goal_diff = goal - score;
++
++ if (last_input < ULONG_MAX / score_goal_diff)
++ compensation = last_input * score_goal_diff / goal;
++ else
++ compensation = last_input / goal * score_goal_diff;
++
++ if (over_achieving)
++ return max(last_input - compensation, min_input);
++ if (last_input < ULONG_MAX - compensation)
+ return last_input + compensation;
+- if (last_input > compensation + min_input)
+- return last_input - compensation;
+- return min_input;
++ return ULONG_MAX;
+ }
+
+ #ifdef CONFIG_PSI
--- /dev/null
+From 3488af0970445ff5532c7e8dc5e6456b877aee5e Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 31 Oct 2024 11:37:56 -0700
+Subject: mm/damon/core: handle zero {aggregation,ops_update} intervals
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 3488af0970445ff5532c7e8dc5e6456b877aee5e upstream.
+
+Patch series "mm/damon/core: fix handling of zero non-sampling intervals".
+
+DAMON's internal intervals accounting logic is not correctly handling
+non-sampling intervals of zero values for a wrong assumption. This could
+cause unexpected monitoring behavior, and even result in infinite hang of
+DAMON sysfs interface user threads in case of zero aggregation interval.
+Fix those by updating the intervals accounting logic. For details of the
+root case and solutions, please refer to commit messages of fixes.
+
+
+This patch (of 2):
+
+DAMON's logics to determine if this is the time to do aggregation and ops
+update assumes next_{aggregation,ops_update}_sis are always set larger
+than current passed_sample_intervals. And therefore it further assumes
+continuously incrementing passed_sample_intervals every sampling interval
+will make it reaches to the next_{aggregation,ops_update}_sis in future.
+The logic therefore make the action and update
+next_{aggregation,ops_updaste}_sis only if passed_sample_intervals is same
+to the counts, respectively.
+
+If Aggregation interval or Ops update interval are zero, however,
+next_aggregation_sis or next_ops_update_sis are set same to current
+passed_sample_intervals, respectively. And passed_sample_intervals is
+incremented before doing the next_{aggregation,ops_update}_sis check.
+Hence, passed_sample_intervals becomes larger than
+next_{aggregation,ops_update}_sis, and the logic says it is not the time
+to do the action and update next_{aggregation,ops_update}_sis forever,
+until an overflow happens. In other words, DAMON stops doing aggregations
+or ops updates effectively forever, and users cannot get monitoring
+results.
+
+Based on the documents and the common sense, a reasonable behavior for
+such inputs is doing an aggregation and an ops update for every sampling
+interval. Handle the case by removing the assumption.
+
+Note that this could incur particular real issue for DAMON sysfs interface
+users, in case of zero Aggregation interval. When user starts DAMON with
+zero Aggregation interval and asks online DAMON parameter tuning via DAMON
+sysfs interface, the request is handled by the aggregation callback.
+Until the callback finishes the work, the user who requested the online
+tuning just waits. Hence, the user will be stuck until the
+passed_sample_intervals overflows.
+
+Link: https://lkml.kernel.org/r/20241031183757.49610-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20241031183757.49610-2-sj@kernel.org
+Fixes: 4472edf63d66 ("mm/damon/core: use number of passed access sampling as a timer")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.7.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -2001,7 +2001,7 @@ static int kdamond_fn(void *data)
+ if (ctx->ops.check_accesses)
+ max_nr_accesses = ctx->ops.check_accesses(ctx);
+
+- if (ctx->passed_sample_intervals == next_aggregation_sis) {
++ if (ctx->passed_sample_intervals >= next_aggregation_sis) {
+ kdamond_merge_regions(ctx,
+ max_nr_accesses / 10,
+ sz_limit);
+@@ -2019,7 +2019,7 @@ static int kdamond_fn(void *data)
+
+ sample_interval = ctx->attrs.sample_interval ?
+ ctx->attrs.sample_interval : 1;
+- if (ctx->passed_sample_intervals == next_aggregation_sis) {
++ if (ctx->passed_sample_intervals >= next_aggregation_sis) {
+ ctx->next_aggregation_sis = next_aggregation_sis +
+ ctx->attrs.aggr_interval / sample_interval;
+
+@@ -2029,7 +2029,7 @@ static int kdamond_fn(void *data)
+ ctx->ops.reset_aggregated(ctx);
+ }
+
+- if (ctx->passed_sample_intervals == next_ops_update_sis) {
++ if (ctx->passed_sample_intervals >= next_ops_update_sis) {
+ ctx->next_ops_update_sis = next_ops_update_sis +
+ ctx->attrs.ops_update_interval /
+ sample_interval;
--- /dev/null
+From 8e7bde615f634a82a44b1f3d293c049fd3ef9ca9 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 31 Oct 2024 11:37:57 -0700
+Subject: mm/damon/core: handle zero schemes apply interval
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 8e7bde615f634a82a44b1f3d293c049fd3ef9ca9 upstream.
+
+DAMON's logics to determine if this is the time to apply damos schemes
+assumes next_apply_sis is always set larger than current
+passed_sample_intervals. And therefore assume continuously incrementing
+passed_sample_intervals will make it reaches to the next_apply_sis in
+future. The logic hence does apply the scheme and update next_apply_sis
+only if passed_sample_intervals is same to next_apply_sis.
+
+If Schemes apply interval is set as zero, however, next_apply_sis is set
+same to current passed_sample_intervals, respectively. And
+passed_sample_intervals is incremented before doing the next_apply_sis
+check. Hence, next_apply_sis becomes larger than next_apply_sis, and the
+logic says it is not the time to apply schemes and update next_apply_sis.
+In other words, DAMON stops applying schemes until passed_sample_intervals
+overflows.
+
+Based on the documents and the common sense, a reasonable behavior for
+such inputs would be applying the schemes for every sampling interval.
+Handle the case by removing the assumption.
+
+Link: https://lkml.kernel.org/r/20241031183757.49610-3-sj@kernel.org
+Fixes: 42f994b71404 ("mm/damon/core: implement scheme-specific apply interval")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> [6.7.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1406,7 +1406,7 @@ static void damon_do_apply_schemes(struc
+ damon_for_each_scheme(s, c) {
+ struct damos_quota *quota = &s->quota;
+
+- if (c->passed_sample_intervals != s->next_apply_sis)
++ if (c->passed_sample_intervals < s->next_apply_sis)
+ continue;
+
+ if (!s->wmarks.activated)
+@@ -1627,7 +1627,7 @@ static void kdamond_apply_schemes(struct
+ bool has_schemes_to_apply = false;
+
+ damon_for_each_scheme(s, c) {
+- if (c->passed_sample_intervals != s->next_apply_sis)
++ if (c->passed_sample_intervals < s->next_apply_sis)
+ continue;
+
+ if (!s->wmarks.activated)
+@@ -1647,9 +1647,9 @@ static void kdamond_apply_schemes(struct
+ }
+
+ damon_for_each_scheme(s, c) {
+- if (c->passed_sample_intervals != s->next_apply_sis)
++ if (c->passed_sample_intervals < s->next_apply_sis)
+ continue;
+- s->next_apply_sis +=
++ s->next_apply_sis = c->passed_sample_intervals +
+ (s->apply_interval_us ? s->apply_interval_us :
+ c->attrs.aggr_interval) / sample_interval;
+ }
--- /dev/null
+From faa242b1d2a97143150bdc50d5b61fd70fcd17cd Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Sun, 27 Oct 2024 12:33:21 +0000
+Subject: mm/mlock: set the correct prev on failure
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit faa242b1d2a97143150bdc50d5b61fd70fcd17cd upstream.
+
+After commit 94d7d9233951 ("mm: abstract the vma_merge()/split_vma()
+pattern for mprotect() et al."), if vma_modify_flags() return error, the
+vma is set to an error code. This will lead to an invalid prev be
+returned.
+
+Generally this shouldn't matter as the caller should treat an error as
+indicating state is now invalidated, however unfortunately
+apply_mlockall_flags() does not check for errors and assumes that
+mlock_fixup() correctly maintains prev even if an error were to occur.
+
+This patch fixes that assumption.
+
+[lorenzo.stoakes@oracle.com: provide a better fix and rephrase the log]
+Link: https://lkml.kernel.org/r/20241027123321.19511-1-richard.weiyang@gmail.com
+Fixes: 94d7d9233951 ("mm: abstract the vma_merge()/split_vma() pattern for mprotect() et al.")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jann Horn <jannh@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mlock.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/mm/mlock.c b/mm/mlock.c
+index e3e3dc2b2956..cde076fa7d5e 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -725,14 +725,17 @@ static int apply_mlockall_flags(int flags)
+ }
+
+ for_each_vma(vmi, vma) {
++ int error;
+ vm_flags_t newflags;
+
+ newflags = vma->vm_flags & ~VM_LOCKED_MASK;
+ newflags |= to_add;
+
+- /* Ignore errors */
+- mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
+- newflags);
++ error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
++ newflags);
++ /* Ignore errors, but prev needs fixing up. */
++ if (error)
++ prev = vma;
+ cond_resched();
+ }
+ out:
+--
+2.47.0
+
--- /dev/null
+From f8f931bba0f92052cf842b7e30917b1afcc77d5a Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Sun, 27 Oct 2024 13:02:13 -0700
+Subject: mm/thp: fix deferred split unqueue naming and locking
+
+From: Hugh Dickins <hughd@google.com>
+
+commit f8f931bba0f92052cf842b7e30917b1afcc77d5a upstream.
+
+Recent changes are putting more pressure on THP deferred split queues:
+under load revealing long-standing races, causing list_del corruptions,
+"Bad page state"s and worse (I keep BUGs in both of those, so usually
+don't get to see how badly they end up without). The relevant recent
+changes being 6.8's mTHP, 6.10's mTHP swapout, and 6.12's mTHP swapin,
+improved swap allocation, and underused THP splitting.
+
+Before fixing locking: rename misleading folio_undo_large_rmappable(),
+which does not undo large_rmappable, to folio_unqueue_deferred_split(),
+which is what it does. But that and its out-of-line __callee are mm
+internals of very limited usability: add comment and WARN_ON_ONCEs to
+check usage; and return a bool to say if a deferred split was unqueued,
+which can then be used in WARN_ON_ONCEs around safety checks (sparing
+callers the arcane conditionals in __folio_unqueue_deferred_split()).
+
+Just omit the folio_unqueue_deferred_split() from free_unref_folios(), all
+of whose callers now call it beforehand (and if any forget then bad_page()
+will tell) - except for its caller put_pages_list(), which itself no
+longer has any callers (and will be deleted separately).
+
+Swapout: mem_cgroup_swapout() has been resetting folio->memcg_data 0
+without checking and unqueueing a THP folio from deferred split list;
+which is unfortunate, since the split_queue_lock depends on the memcg
+(when memcg is enabled); so swapout has been unqueueing such THPs later,
+when freeing the folio, using the pgdat's lock instead: potentially
+corrupting the memcg's list. __remove_mapping() has frozen refcount to 0
+here, so no problem with calling folio_unqueue_deferred_split() before
+resetting memcg_data.
+
+That goes back to 5.4 commit 87eaceb3faa5 ("mm: thp: make deferred split
+shrinker memcg aware"): which included a check on swapcache before adding
+to deferred queue, but no check on deferred queue before adding THP to
+swapcache. That worked fine with the usual sequence of events in reclaim
+(though there were a couple of rare ways in which a THP on deferred queue
+could have been swapped out), but 6.12 commit dafff3f4c850 ("mm: split
+underused THPs") avoids splitting underused THPs in reclaim, which makes
+swapcache THPs on deferred queue commonplace.
+
+Keep the check on swapcache before adding to deferred queue? Yes: it is
+no longer essential, but preserves the existing behaviour, and is likely
+to be a worthwhile optimization (vmstat showed much more traffic on the
+queue under swapping load if the check was removed); update its comment.
+
+Memcg-v1 move (deprecated): mem_cgroup_move_account() has been changing
+folio->memcg_data without checking and unqueueing a THP folio from the
+deferred list, sometimes corrupting "from" memcg's list, like swapout.
+Refcount is non-zero here, so folio_unqueue_deferred_split() can only be
+used in a WARN_ON_ONCE to validate the fix, which must be done earlier:
+mem_cgroup_move_charge_pte_range() first try to split the THP (splitting
+of course unqueues), or skip it if that fails. Not ideal, but moving
+charge has been requested, and khugepaged should repair the THP later:
+nobody wants new custom unqueueing code just for this deprecated case.
+
+The 87eaceb3faa5 commit did have the code to move from one deferred list
+to another (but was not conscious of its unsafety while refcount non-0);
+but that was removed by 5.6 commit fac0516b5534 ("mm: thp: don't need care
+deferred split queue in memcg charge move path"), which argued that the
+existence of a PMD mapping guarantees that the THP cannot be on a deferred
+list. As above, false in rare cases, and now commonly false.
+
+Backport to 6.11 should be straightforward. Earlier backports must take
+care that other _deferred_list fixes and dependencies are included. There
+is not a strong case for backports, but they can fix cornercases.
+
+Link: https://lkml.kernel.org/r/8dc111ae-f6db-2da7-b25c-7a20b1effe3b@google.com
+Fixes: 87eaceb3faa5 ("mm: thp: make deferred split shrinker memcg aware")
+Fixes: dafff3f4c850 ("mm: split underused THPs")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Yang Shi <shy828301@gmail.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Chris Li <chrisl@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Nhat Pham <nphamcs@gmail.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Usama Arif <usamaarif642@gmail.com>
+Cc: Wei Yang <richard.weiyang@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 35 ++++++++++++++++++++++++++---------
+ mm/internal.h | 10 +++++-----
+ mm/memcontrol-v1.c | 25 +++++++++++++++++++++++++
+ mm/memcontrol.c | 8 +++++---
+ mm/migrate.c | 4 ++--
+ mm/page_alloc.c | 1 -
+ mm/swap.c | 4 ++--
+ mm/vmscan.c | 4 ++--
+ 8 files changed, 67 insertions(+), 24 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3268,18 +3268,38 @@ out:
+ return ret;
+ }
+
+-void __folio_undo_large_rmappable(struct folio *folio)
++/*
++ * __folio_unqueue_deferred_split() is not to be called directly:
++ * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h
++ * limits its calls to those folios which may have a _deferred_list for
++ * queueing THP splits, and that list is (racily observed to be) non-empty.
++ *
++ * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
++ * zero: because even when split_queue_lock is held, a non-empty _deferred_list
++ * might be in use on deferred_split_scan()'s unlocked on-stack list.
++ *
++ * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
++ * therefore important to unqueue deferred split before changing folio memcg.
++ */
++bool __folio_unqueue_deferred_split(struct folio *folio)
+ {
+ struct deferred_split *ds_queue;
+ unsigned long flags;
++ bool unqueued = false;
++
++ WARN_ON_ONCE(folio_ref_count(folio));
++ WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio));
+
+ ds_queue = get_deferred_split_queue(folio);
+ spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ if (!list_empty(&folio->_deferred_list)) {
+ ds_queue->split_queue_len--;
+ list_del_init(&folio->_deferred_list);
++ unqueued = true;
+ }
+ spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
++
++ return unqueued; /* useful for debug warnings */
+ }
+
+ void deferred_split_folio(struct folio *folio)
+@@ -3298,14 +3318,11 @@ void deferred_split_folio(struct folio *
+ return;
+
+ /*
+- * The try_to_unmap() in page reclaim path might reach here too,
+- * this may cause a race condition to corrupt deferred split queue.
+- * And, if page reclaim is already handling the same folio, it is
+- * unnecessary to handle it again in shrinker.
+- *
+- * Check the swapcache flag to determine if the folio is being
+- * handled by page reclaim since THP swap would add the folio into
+- * swap cache before calling try_to_unmap().
++ * Exclude swapcache: originally to avoid a corrupt deferred split
++ * queue. Nowadays that is fully prevented by mem_cgroup_swapout();
++ * but if page reclaim is already handling the same folio, it is
++ * unnecessary to handle it again in the shrinker, so excluding
++ * swapcache here may still be a useful optimization.
+ */
+ if (folio_test_swapcache(folio))
+ return;
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -631,11 +631,11 @@ static inline void folio_set_order(struc
+ #endif
+ }
+
+-void __folio_undo_large_rmappable(struct folio *folio);
+-static inline void folio_undo_large_rmappable(struct folio *folio)
++bool __folio_unqueue_deferred_split(struct folio *folio);
++static inline bool folio_unqueue_deferred_split(struct folio *folio)
+ {
+ if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+- return;
++ return false;
+
+ /*
+ * At this point, there is no one trying to add the folio to
+@@ -643,9 +643,9 @@ static inline void folio_undo_large_rmap
+ * to check without acquiring the split_queue_lock.
+ */
+ if (data_race(list_empty(&folio->_deferred_list)))
+- return;
++ return false;
+
+- __folio_undo_large_rmappable(folio);
++ return __folio_unqueue_deferred_split(folio);
+ }
+
+ static inline struct folio *page_rmappable_folio(struct page *page)
+--- a/mm/memcontrol-v1.c
++++ b/mm/memcontrol-v1.c
+@@ -845,6 +845,8 @@ static int mem_cgroup_move_account(struc
+ css_get(&to->css);
+ css_put(&from->css);
+
++ /* Warning should never happen, so don't worry about refcount non-0 */
++ WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
+ folio->memcg_data = (unsigned long)to;
+
+ __folio_memcg_unlock(from);
+@@ -1214,7 +1216,9 @@ static int mem_cgroup_move_charge_pte_ra
+ enum mc_target_type target_type;
+ union mc_target target;
+ struct folio *folio;
++ bool tried_split_before = false;
+
++retry_pmd:
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
+ if (mc.precharge < HPAGE_PMD_NR) {
+@@ -1224,6 +1228,27 @@ static int mem_cgroup_move_charge_pte_ra
+ target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
+ if (target_type == MC_TARGET_PAGE) {
+ folio = target.folio;
++ /*
++ * Deferred split queue locking depends on memcg,
++ * and unqueue is unsafe unless folio refcount is 0:
++ * split or skip if on the queue? first try to split.
++ */
++ if (!list_empty(&folio->_deferred_list)) {
++ spin_unlock(ptl);
++ if (!tried_split_before)
++ split_folio(folio);
++ folio_unlock(folio);
++ folio_put(folio);
++ if (tried_split_before)
++ return 0;
++ tried_split_before = true;
++ goto retry_pmd;
++ }
++ /*
++ * So long as that pmd lock is held, the folio cannot
++ * be racily added to the _deferred_list, because
++ * __folio_remove_rmap() will find !partially_mapped.
++ */
+ if (folio_isolate_lru(folio)) {
+ if (!mem_cgroup_move_account(folio, true,
+ mc.from, mc.to)) {
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4604,9 +4604,6 @@ static void uncharge_folio(struct folio
+ struct obj_cgroup *objcg;
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+- VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
+- !folio_test_hugetlb(folio) &&
+- !list_empty(&folio->_deferred_list), folio);
+
+ /*
+ * Nobody should be changing or seriously looking at
+@@ -4653,6 +4650,7 @@ static void uncharge_folio(struct folio
+ ug->nr_memory += nr_pages;
+ ug->pgpgout++;
+
++ WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
+ folio->memcg_data = 0;
+ }
+
+@@ -4769,6 +4767,9 @@ void mem_cgroup_migrate(struct folio *ol
+
+ /* Transfer the charge and the css ref */
+ commit_charge(new, memcg);
++
++ /* Warning should never happen, so don't worry about refcount non-0 */
++ WARN_ON_ONCE(folio_unqueue_deferred_split(old));
+ old->memcg_data = 0;
+ }
+
+@@ -4955,6 +4956,7 @@ void mem_cgroup_swapout(struct folio *fo
+ VM_BUG_ON_FOLIO(oldid, folio);
+ mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
+
++ folio_unqueue_deferred_split(folio);
+ folio->memcg_data = 0;
+
+ if (!mem_cgroup_is_root(memcg))
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -415,7 +415,7 @@ static int __folio_migrate_mapping(struc
+ folio_test_large_rmappable(folio)) {
+ if (!folio_ref_freeze(folio, expected_count))
+ return -EAGAIN;
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+ folio_ref_unfreeze(folio, expected_count);
+ }
+
+@@ -438,7 +438,7 @@ static int __folio_migrate_mapping(struc
+ }
+
+ /* Take off deferred split queue while frozen and memcg set */
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+
+ /*
+ * Now we know that no one else is looking at the folio:
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2663,7 +2663,6 @@ void free_unref_folios(struct folio_batc
+ unsigned long pfn = folio_pfn(folio);
+ unsigned int order = folio_order(folio);
+
+- folio_undo_large_rmappable(folio);
+ if (!free_pages_prepare(&folio->page, order))
+ continue;
+ /*
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -123,7 +123,7 @@ void __folio_put(struct folio *folio)
+ }
+
+ page_cache_release(folio);
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+ mem_cgroup_uncharge(folio);
+ free_unref_page(&folio->page, folio_order(folio));
+ }
+@@ -1020,7 +1020,7 @@ void folios_put_refs(struct folio_batch
+ free_huge_folio(folio);
+ continue;
+ }
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+ __page_cache_release(folio, &lruvec, &flags);
+
+ if (j != i)
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1462,7 +1462,7 @@ free_it:
+ */
+ nr_reclaimed += nr_pages;
+
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+ if (folio_batch_add(&free_folios, folio) == 0) {
+ mem_cgroup_uncharge_folios(&free_folios);
+ try_to_unmap_flush();
+@@ -1849,7 +1849,7 @@ static unsigned int move_folios_to_lru(s
+ if (unlikely(folio_put_testzero(folio))) {
+ __folio_clear_lru_flags(folio);
+
+- folio_undo_large_rmappable(folio);
++ folio_unqueue_deferred_split(folio);
+ if (folio_batch_add(&free_folios, folio) == 0) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ mem_cgroup_uncharge_folios(&free_folios);
--- /dev/null
+From cb6fcef8b4b6c655b6a25cc3a415cd9eb81b3da8 Mon Sep 17 00:00:00 2001
+From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
+Date: Mon, 28 Oct 2024 12:26:27 +0900
+Subject: objpool: fix to make percpu slot allocation more robust
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+commit cb6fcef8b4b6c655b6a25cc3a415cd9eb81b3da8 upstream.
+
+Since gfp & GFP_ATOMIC == GFP_ATOMIC is true for GFP_KERNEL | GFP_HIGH, it
+will use kmalloc if user specifies that combination. Here the reason why
+combining the __vmalloc_node() and kmalloc_node() is that the vmalloc does
+not support all GFP flag, especially GFP_ATOMIC. So we should check if
+gfp & (GFP_ATOMIC | GFP_KERNEL) != GFP_ATOMIC for vmalloc first. This
+ensures caller can sleep. And for the robustness, even if vmalloc fails,
+it should retry with kmalloc to allocate it.
+
+Link: https://lkml.kernel.org/r/173008598713.1262174.2959179484209897252.stgit@mhiramat.roam.corp.google.com
+Fixes: aff1871bfc81 ("objpool: fix choosing allocation for percpu slots")
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Closes: https://lore.kernel.org/all/CAHk-=whO+vSH+XVRio8byJU8idAWES0SPGVZ7KAVdc4qrV0VUA@mail.gmail.com/
+Cc: Leo Yan <leo.yan@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Wu <wuqiang.matt@bytedance.com>
+Cc: Mikel Rychliski <mikel@mikelr.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Cc: Viktor Malik <vmalik@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/objpool.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/lib/objpool.c b/lib/objpool.c
+index fd108fe0d095..b998b720c732 100644
+--- a/lib/objpool.c
++++ b/lib/objpool.c
+@@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
+ * warm caches and TLB hits. in default vmalloc is used to
+ * reduce the pressure of kernel slab system. as we know,
+ * mimimal size of vmalloc is one page since vmalloc would
+- * always align the requested size to page size
++ * always align the requested size to page size.
++ * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC)
++ * allocate percpu slot with kmalloc.
+ */
+- if ((pool->gfp & GFP_ATOMIC) == GFP_ATOMIC)
+- slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+- else
++ slot = NULL;
++
++ if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC)
+ slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
+ cpu_to_node(i), __builtin_return_address(0));
+- if (!slot)
+- return -ENOMEM;
++
++ if (!slot) {
++ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
++ if (!slot)
++ return -ENOMEM;
++ }
+ memset(slot, 0, size);
+ pool->cpu_slots[i] = slot;
+
+--
+2.47.0
+
platform-x86-amd-pmf-update-smu-metrics-table-for-1ah-family-series.patch
platform-x86-amd-pmf-add-smu-metrics-table-support-for-1ah-family-60h-model.patch
media-uvcvideo-skip-parsing-frames-of-type-uvc_vs_undefined-in-uvc_parse_format.patch
+filemap-fix-bounds-checking-in-filemap_read.patch
+i2c-designware-do-not-hold-scl-low-when-i2c_dynamic_tar_update-is-not-set.patch
+clk-qcom-gcc-x1e80100-fix-usb-mp-ss1-phy-gdsc-pwrsts-flags.patch
+clk-qcom-clk-alpha-pll-fix-pll-post-div-mask-when-width-is-not-set.patch
+fs-proc-fix-compile-warning-about-variable-vmcore_mmap_ops.patch
+objpool-fix-to-make-percpu-slot-allocation-more-robust.patch
+signal-restore-the-override_rlimit-logic.patch
+mm-damon-core-avoid-overflow-in-damon_feed_loop_next_input.patch
+mm-damon-core-handle-zero-aggregation-ops_update-intervals.patch
+mm-damon-core-handle-zero-schemes-apply-interval.patch
+mm-mlock-set-the-correct-prev-on-failure.patch
+mm-thp-fix-deferred-split-unqueue-naming-and-locking.patch
+thunderbolt-add-only-on-board-retimers-when-config_usb4_debugfs_margining.patch
+usb-musb-sunxi-fix-accessing-an-released-usb-phy.patch
+usb-dwc3-fix-fault-at-system-suspend-if-device-was-already-runtime-suspended.patch
+usb-typec-qcom-pmic-init-value-of-hdr_len-txbuf_len-earlier.patch
+usb-typec-fix-potential-out-of-bounds-in-ucsi_ccg_update_set_new_cam_cmd.patch
+usb-serial-io_edgeport-fix-use-after-free-in-debug-printk.patch
+usb-serial-qcserial-add-support-for-sierra-wireless-em86xx.patch
+usb-serial-option-add-fibocom-fg132-0x0112-composition.patch
+usb-serial-option-add-quectel-rg650v.patch
--- /dev/null
+From 9e05e5c7ee8758141d2db7e8fea2cab34500c6ed Mon Sep 17 00:00:00 2001
+From: Roman Gushchin <roman.gushchin@linux.dev>
+Date: Mon, 4 Nov 2024 19:54:19 +0000
+Subject: signal: restore the override_rlimit logic
+
+From: Roman Gushchin <roman.gushchin@linux.dev>
+
+commit 9e05e5c7ee8758141d2db7e8fea2cab34500c6ed upstream.
+
+Prior to commit d64696905554 ("Reimplement RLIMIT_SIGPENDING on top of
+ucounts") UCOUNT_RLIMIT_SIGPENDING rlimit was not enforced for a class of
+signals. However now it's enforced unconditionally, even if
+override_rlimit is set. This behavior change caused production issues.
+
+For example, if the limit is reached and a process receives a SIGSEGV
+signal, sigqueue_alloc fails to allocate the necessary resources for the
+signal delivery, preventing the signal from being delivered with siginfo.
+This prevents the process from correctly identifying the fault address and
+handling the error. From the user-space perspective, applications are
+unaware that the limit has been reached and that the siginfo is
+effectively 'corrupted'. This can lead to unpredictable behavior and
+crashes, as we observed with java applications.
+
+Fix this by passing override_rlimit into inc_rlimit_get_ucounts() and skip
+the comparison to max there if override_rlimit is set. This effectively
+restores the old behavior.
+
+Link: https://lkml.kernel.org/r/20241104195419.3962584-1-roman.gushchin@linux.dev
+Fixes: d64696905554 ("Reimplement RLIMIT_SIGPENDING on top of ucounts")
+Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
+Co-developed-by: Andrei Vagin <avagin@google.com>
+Signed-off-by: Andrei Vagin <avagin@google.com>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Alexey Gladkov <legion@kernel.org>
+Cc: Kees Cook <kees@kernel.org>
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/user_namespace.h | 3 ++-
+ kernel/signal.c | 3 ++-
+ kernel/ucount.c | 6 ++++--
+ 3 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -139,7 +139,8 @@ static inline long get_rlimit_value(stru
+
+ long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
++long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
++ bool override_rlimit);
+ void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+ bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -419,7 +419,8 @@ __sigqueue_alloc(int sig, struct task_st
+ */
+ rcu_read_lock();
+ ucounts = task_ucounts(t);
+- sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
++ sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
++ override_rlimit);
+ rcu_read_unlock();
+ if (!sigpending)
+ return NULL;
+--- a/kernel/ucount.c
++++ b/kernel/ucount.c
+@@ -307,7 +307,8 @@ void dec_rlimit_put_ucounts(struct ucoun
+ do_dec_rlimit_put_ucounts(ucounts, NULL, type);
+ }
+
+-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
++long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
++ bool override_rlimit)
+ {
+ /* Caller must hold a reference to ucounts */
+ struct ucounts *iter;
+@@ -320,7 +321,8 @@ long inc_rlimit_get_ucounts(struct ucoun
+ goto unwind;
+ if (iter == ucounts)
+ ret = new;
+- max = get_userns_rlimit_max(iter->ns, type);
++ if (!override_rlimit)
++ max = get_userns_rlimit_max(iter->ns, type);
+ /*
+ * Grab an extra ucount reference for the caller when
+ * the rlimit count was previously 0.
--- /dev/null
+From bf791751162ac875a9439426d13f8d4d18151549 Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Thu, 24 Oct 2024 12:26:53 +0300
+Subject: thunderbolt: Add only on-board retimers when !CONFIG_USB4_DEBUGFS_MARGINING
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit bf791751162ac875a9439426d13f8d4d18151549 upstream.
+
+Normally there is no need to enumerate retimers on the other side of the
+cable. This is only needed in special cases where user wants to run
+receiver lane margining against the downstream facing port of a retimer.
+Furthermore this might confuse the userspace tools such as fwupd because
+it cannot read the information it expects from these retimers.
+
+Fix this by changing the retimer enumeration code to add only on-board
+retimers when CONFIG_USB4_DEBUGFS_MARGINING is not enabled.
+
+Reported-by: AceLan Kao <acelan.kao@canonical.com>
+Tested-by: AceLan Kao <acelan.kao@canonical.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219420
+Cc: stable@vger.kernel.org
+Fixes: ff6ab055e070 ("thunderbolt: Add receiver lane margining support for retimers")
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/retimer.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -532,6 +532,8 @@ int tb_retimer_scan(struct tb_port *port
+ }
+
+ ret = 0;
++ if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
++ max = min(last_idx, max);
+
+ /* Add retimers if they do not exist already */
+ for (i = 1; i <= max; i++) {
--- /dev/null
+From 9cfb31e4c89d200d8ab7cb1e0bb9e6e8d621ca0b Mon Sep 17 00:00:00 2001
+From: Roger Quadros <rogerq@kernel.org>
+Date: Mon, 4 Nov 2024 16:00:11 +0200
+Subject: usb: dwc3: fix fault at system suspend if device was already runtime suspended
+
+From: Roger Quadros <rogerq@kernel.org>
+
+commit 9cfb31e4c89d200d8ab7cb1e0bb9e6e8d621ca0b upstream.
+
+If the device was already runtime suspended then during system suspend
+we cannot access the device registers else it will crash.
+
+Also we cannot access any registers after dwc3_core_exit() on some
+platforms so move the dwc3_enable_susphy() call to the top.
+
+Cc: stable@vger.kernel.org # v5.15+
+Reported-by: William McVicker <willmcvicker@google.com>
+Closes: https://lore.kernel.org/all/ZyVfcUuPq56R2m1Y@google.com
+Fixes: 705e3ce37bcc ("usb: dwc3: core: Fix system suspend on TI AM62 platforms")
+Signed-off-by: Roger Quadros <rogerq@kernel.org>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Tested-by: Will McVicker <willmcvicker@google.com>
+Link: https://lore.kernel.org/r/20241104-am62-lpm-usb-fix-v1-1-e93df73a4f0d@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/core.c | 25 ++++++++++++-------------
+ 1 file changed, 12 insertions(+), 13 deletions(-)
+
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2342,10 +2342,18 @@ static int dwc3_suspend_common(struct dw
+ u32 reg;
+ int i;
+
+- dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
+- DWC3_GUSB2PHYCFG_SUSPHY) ||
+- (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
+- DWC3_GUSB3PIPECTL_SUSPHY);
++ if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) {
++ dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
++ DWC3_GUSB2PHYCFG_SUSPHY) ||
++ (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
++ DWC3_GUSB3PIPECTL_SUSPHY);
++ /*
++ * TI AM62 platform requires SUSPHY to be
++ * enabled for system suspend to work.
++ */
++ if (!dwc->susphy_state)
++ dwc3_enable_susphy(dwc, true);
++ }
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+@@ -2398,15 +2406,6 @@ static int dwc3_suspend_common(struct dw
+ break;
+ }
+
+- if (!PMSG_IS_AUTO(msg)) {
+- /*
+- * TI AM62 platform requires SUSPHY to be
+- * enabled for system suspend to work.
+- */
+- if (!dwc->susphy_state)
+- dwc3_enable_susphy(dwc, true);
+- }
+-
+ return 0;
+ }
+
--- /dev/null
+From 498dbd9aea205db9da674994b74c7bf8e18448bd Mon Sep 17 00:00:00 2001
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+Date: Tue, 29 Oct 2024 23:13:38 +0800
+Subject: usb: musb: sunxi: Fix accessing an released usb phy
+
+From: Zijun Hu <quic_zijuhu@quicinc.com>
+
+commit 498dbd9aea205db9da674994b74c7bf8e18448bd upstream.
+
+Commit 6ed05c68cbca ("usb: musb: sunxi: Explicitly release USB PHY on
+exit") will cause that usb phy @glue->xceiv is accessed after released.
+
+1) register platform driver @sunxi_musb_driver
+// get the usb phy @glue->xceiv
+sunxi_musb_probe() -> devm_usb_get_phy().
+
+2) register and unregister platform driver @musb_driver
+musb_probe() -> sunxi_musb_init()
+use the phy here
+//the phy is released here
+musb_remove() -> sunxi_musb_exit() -> devm_usb_put_phy()
+
+3) register @musb_driver again
+musb_probe() -> sunxi_musb_init()
+use the phy here but the phy has been released at 2).
+...
+
+Fixed by reverting the commit, namely, removing devm_usb_put_phy()
+from sunxi_musb_exit().
+
+Fixes: 6ed05c68cbca ("usb: musb: sunxi: Explicitly release USB PHY on exit")
+Cc: stable@vger.kernel.org
+Signed-off-by: Zijun Hu <quic_zijuhu@quicinc.com>
+Link: https://lore.kernel.org/r/20241029-sunxi_fix-v1-1-9431ed2ab826@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/musb/sunxi.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/usb/musb/sunxi.c
++++ b/drivers/usb/musb/sunxi.c
+@@ -293,8 +293,6 @@ static int sunxi_musb_exit(struct musb *
+ if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ sunxi_sram_release(musb->controller->parent);
+
+- devm_usb_put_phy(glue->dev, glue->xceiv);
+-
+ return 0;
+ }
+
--- /dev/null
+From 37bb5628379295c1254c113a407cab03a0f4d0b4 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Thu, 31 Oct 2024 12:48:30 +0300
+Subject: USB: serial: io_edgeport: fix use after free in debug printk
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit 37bb5628379295c1254c113a407cab03a0f4d0b4 upstream.
+
+The "dev_dbg(&urb->dev->dev, ..." which happens after usb_free_urb(urb)
+is a use after free of the "urb" pointer. Store the "dev" pointer at the
+start of the function to avoid this issue.
+
+Fixes: 984f68683298 ("USB: serial: io_edgeport.c: remove dbg() usage")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/io_edgeport.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -770,11 +770,12 @@ static void edge_bulk_out_data_callback(
+ static void edge_bulk_out_cmd_callback(struct urb *urb)
+ {
+ struct edgeport_port *edge_port = urb->context;
++ struct device *dev = &urb->dev->dev;
+ int status = urb->status;
+
+ atomic_dec(&CmdUrbs);
+- dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n",
+- __func__, urb, atomic_read(&CmdUrbs));
++ dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb,
++ atomic_read(&CmdUrbs));
+
+
+ /* clean up the transfer buffer */
+@@ -784,8 +785,7 @@ static void edge_bulk_out_cmd_callback(s
+ usb_free_urb(urb);
+
+ if (status) {
+- dev_dbg(&urb->dev->dev,
+- "%s - nonzero write bulk status received: %d\n",
++ dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
+ __func__, status);
+ return;
+ }
--- /dev/null
+From 393c74ccbd847bacf18865a01b422586fc7341cf Mon Sep 17 00:00:00 2001
+From: Reinhard Speyerer <rspmn@arcor.de>
+Date: Fri, 18 Oct 2024 23:07:06 +0200
+Subject: USB: serial: option: add Fibocom FG132 0x0112 composition
+
+From: Reinhard Speyerer <rspmn@arcor.de>
+
+commit 393c74ccbd847bacf18865a01b422586fc7341cf upstream.
+
+Add Fibocom FG132 0x0112 composition:
+
+T: Bus=03 Lev=02 Prnt=06 Port=01 Cnt=02 Dev#= 10 Spd=12 MxCh= 0
+D: Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=2cb7 ProdID=0112 Rev= 5.15
+S: Manufacturer=Fibocom Wireless Inc.
+S: Product=Fibocom Module
+S: SerialNumber=xxxxxxxx
+C:* #Ifs= 4 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan
+E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+E: Ad=81(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=01(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=83(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option
+E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=84(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=86(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms
+
+Signed-off-by: Reinhard Speyerer <rspmn@arcor.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2320,6 +2320,9 @@ static const struct usb_device_id option
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x30) }, /* Fibocom FG132 Diag */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x40) }, /* Fibocom FG132 AT */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0, 0) }, /* Fibocom FG132 NMEA */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
--- /dev/null
+From 3b05949ba39f305b585452d0e177470607842165 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Beno=C3=AEt=20Monin?= <benoit.monin@gmx.fr>
+Date: Thu, 24 Oct 2024 17:09:19 +0200
+Subject: USB: serial: option: add Quectel RG650V
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Benoît Monin <benoit.monin@gmx.fr>
+
+commit 3b05949ba39f305b585452d0e177470607842165 upstream.
+
+Add support for Quectel RG650V which is based on Qualcomm SDX65 chip.
+The composition is DIAG / NMEA / AT / AT / QMI.
+
+T: Bus=02 Lev=01 Prnt=01 Port=03 Cnt=01 Dev#= 4 Spd=5000 MxCh= 0
+D: Ver= 3.20 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs= 1
+P: Vendor=2c7c ProdID=0122 Rev=05.15
+S: Manufacturer=Quectel
+S: Product=RG650V-EU
+S: SerialNumber=xxxxxxx
+C: #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=896mA
+I: If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option
+E: Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+I: If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=84(I) Atr=03(Int.) MxPS= 10 Ivl=9ms
+I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=85(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=86(I) Atr=03(Int.) MxPS= 10 Ivl=9ms
+I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E: Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=87(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms
+E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=9ms
+
+Signed-off-by: Benoît Monin <benoit.monin@gmx.fr>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/option.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -251,6 +251,7 @@ static void option_instat_callback(struc
+ #define QUECTEL_VENDOR_ID 0x2c7c
+ /* These Quectel products use Quectel's vendor ID */
+ #define QUECTEL_PRODUCT_EC21 0x0121
++#define QUECTEL_PRODUCT_RG650V 0x0122
+ #define QUECTEL_PRODUCT_EM061K_LTA 0x0123
+ #define QUECTEL_PRODUCT_EM061K_LMS 0x0124
+ #define QUECTEL_PRODUCT_EC25 0x0125
+@@ -1273,6 +1274,8 @@ static const struct usb_device_id option
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) },
+
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
--- /dev/null
+From 25eb47eed52979c2f5eee3f37e6c67714e02c49c Mon Sep 17 00:00:00 2001
+From: Jack Wu <wojackbb@gmail.com>
+Date: Wed, 6 Nov 2024 18:50:29 +0800
+Subject: USB: serial: qcserial: add support for Sierra Wireless EM86xx
+
+From: Jack Wu <wojackbb@gmail.com>
+
+commit 25eb47eed52979c2f5eee3f37e6c67714e02c49c upstream.
+
+Add support for Sierra Wireless EM86xx with USB-id 0x1199:0x90e5 and
+0x1199:0x90e4.
+
+0x1199:0x90e5
+T: Bus=03 Lev=01 Prnt=01 Port=05 Cnt=01 Dev#= 14 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1
+P: Vendor=1199 ProdID=90e5 Rev= 5.15
+S: Manufacturer=Sierra Wireless, Incorporated
+S: Product=Semtech EM8695 Mobile Broadband Adapter
+S: SerialNumber=004403161882339
+C:* #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA
+A: FirstIf#=12 IfCount= 2 Cls=02(comm.) Sub=0e Prot=00
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=qcserial
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=qcserial
+E: Ad=84(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none)
+E: Ad=85(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+I:* If#=12 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=0e Prot=00 Driver=cdc_mbim
+E: Ad=87(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+I: If#=13 Alt= 0 #EPs= 0 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+I:* If#=13 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim
+E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+0x1199:0x90e4
+T: Bus=03 Lev=01 Prnt=01 Port=05 Cnt=01 Dev#= 16 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=1199 ProdID=90e4 Rev= 0.00
+S: Manufacturer=Sierra Wireless, Incorporated
+S: SerialNumber=004403161882339
+C:* #Ifs= 1 Cfg#= 1 Atr=a0 MxPwr= 2mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=10 Driver=qcserial
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Signed-off-by: Jack Wu <wojackbb@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/serial/qcserial.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -166,6 +166,8 @@ static const struct usb_device_id id_tab
+ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
+ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
+ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */
++ {DEVICE_SWI(0x1199, 0x90e4)}, /* Sierra Wireless EM86xx QDL*/
++ {DEVICE_SWI(0x1199, 0x90e5)}, /* Sierra Wireless EM86xx */
+ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */
+ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */
+ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
--- /dev/null
+From 7dd08a0b4193087976db6b3ee7807de7e8316f96 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Mon, 4 Nov 2024 20:16:42 +0300
+Subject: usb: typec: fix potential out of bounds in ucsi_ccg_update_set_new_cam_cmd()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit 7dd08a0b4193087976db6b3ee7807de7e8316f96 upstream.
+
+The "*cmd" variable can be controlled by the user via debugfs. That means
+"new_cam" can be as high as 255 while the size of the uc->updated[] array
+is UCSI_MAX_ALTMODES (30).
+
+The call tree is:
+ucsi_cmd() // val comes from simple_attr_write_xsigned()
+-> ucsi_send_command()
+ -> ucsi_send_command_common()
+ -> ucsi_run_command() // calls ucsi->ops->sync_control()
+ -> ucsi_ccg_sync_control()
+
+Fixes: 170a6726d0e2 ("usb: typec: ucsi: add support for separate DP altmode devices")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/325102b3-eaa8-4918-a947-22aca1146586@stanley.mountain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/ucsi/ucsi_ccg.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -482,6 +482,8 @@ static void ucsi_ccg_update_set_new_cam_
+
+ port = uc->orig;
+ new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
++ if (new_cam >= ARRAY_SIZE(uc->updated))
++ return;
+ new_port = &uc->updated[new_cam];
+ cam = new_port->linked_idx;
+ enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
--- /dev/null
+From 029778a4fd2c90c2e76a902b797c2348a722f1b8 Mon Sep 17 00:00:00 2001
+From: Rex Nie <rex.nie@jaguarmicro.com>
+Date: Wed, 30 Oct 2024 21:36:32 +0800
+Subject: usb: typec: qcom-pmic: init value of hdr_len/txbuf_len earlier
+
+From: Rex Nie <rex.nie@jaguarmicro.com>
+
+commit 029778a4fd2c90c2e76a902b797c2348a722f1b8 upstream.
+
+If the read of USB_PDPHY_RX_ACKNOWLEDGE_REG failed, then hdr_len and
+txbuf_len are uninitialized. This commit stops to print uninitialized
+value and misleading/false data.
+
+Cc: stable@vger.kernel.org
+Fixes: a4422ff22142 (" usb: typec: qcom: Add Qualcomm PMIC Type-C driver")
+Signed-off-by: Rex Nie <rex.nie@jaguarmicro.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Reviewed-by: Bjorn Andersson <andersson@kernel.org>
+Acked-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Link: https://lore.kernel.org/r/20241030133632.2116-1-rex.nie@jaguarmicro.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
++++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+@@ -227,6 +227,10 @@ qcom_pmic_typec_pdphy_pd_transmit_payloa
+
+ spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
+
++ hdr_len = sizeof(msg->header);
++ txbuf_len = pd_header_cnt_le(msg->header) * 4;
++ txsize_len = hdr_len + txbuf_len - 1;
++
+ ret = regmap_read(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
+ &val);
+@@ -244,10 +248,6 @@ qcom_pmic_typec_pdphy_pd_transmit_payloa
+ if (ret)
+ goto done;
+
+- hdr_len = sizeof(msg->header);
+- txbuf_len = pd_header_cnt_le(msg->header) * 4;
+- txsize_len = hdr_len + txbuf_len - 1;
+-
+ /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
+ ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
+ pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,