]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Mon, 18 Nov 2024 14:35:46 +0000 (09:35 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 18 Nov 2024 18:07:51 +0000 (13:07 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.6/mm-damon-core-handle-zero-aggregation-ops_update-int.patch [new file with mode: 0644]
queue-6.6/mm-damon-core-implement-scheme-specific-apply-interv.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/staging-vchiq_arm-get-the-rid-off-struct-vchiq_2835_.patch [new file with mode: 0644]
queue-6.6/staging-vchiq_arm-use-devm_kzalloc-for-vchiq_arm_sta.patch [new file with mode: 0644]

diff --git a/queue-6.6/mm-damon-core-handle-zero-aggregation-ops_update-int.patch b/queue-6.6/mm-damon-core-handle-zero-aggregation-ops_update-int.patch
new file mode 100644 (file)
index 0000000..b45eef9
--- /dev/null
@@ -0,0 +1,97 @@
+From 5bd7fe8c7e9559fbf24792d78e6f8d3b40b62ed5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Oct 2024 11:37:56 -0700
+Subject: mm/damon/core: handle zero {aggregation,ops_update} intervals
+
+From: SeongJae Park <sj@kernel.org>
+
+[ Upstream commit 3488af0970445ff5532c7e8dc5e6456b877aee5e ]
+
+Patch series "mm/damon/core: fix handling of zero non-sampling intervals".
+
+DAMON's internal intervals accounting logic is not correctly handling
+non-sampling intervals of zero values for a wrong assumption.  This could
+cause unexpected monitoring behavior, and even result in infinite hang of
+DAMON sysfs interface user threads in case of zero aggregation interval.
+Fix those by updating the intervals accounting logic.  For details of the
+root case and solutions, please refer to commit messages of fixes.
+
+This patch (of 2):
+
+DAMON's logics to determine if this is the time to do aggregation and ops
+update assumes next_{aggregation,ops_update}_sis are always set larger
+than current passed_sample_intervals.  And therefore it further assumes
+continuously incrementing passed_sample_intervals every sampling interval
+will make it reaches to the next_{aggregation,ops_update}_sis in future.
+The logic therefore make the action and update
+next_{aggregation,ops_updaste}_sis only if passed_sample_intervals is same
+to the counts, respectively.
+
+If Aggregation interval or Ops update interval are zero, however,
+next_aggregation_sis or next_ops_update_sis are set same to current
+passed_sample_intervals, respectively.  And passed_sample_intervals is
+incremented before doing the next_{aggregation,ops_update}_sis check.
+Hence, passed_sample_intervals becomes larger than
+next_{aggregation,ops_update}_sis, and the logic says it is not the time
+to do the action and update next_{aggregation,ops_update}_sis forever,
+until an overflow happens.  In other words, DAMON stops doing aggregations
+or ops updates effectively forever, and users cannot get monitoring
+results.
+
+Based on the documents and the common sense, a reasonable behavior for
+such inputs is doing an aggregation and an ops update for every sampling
+interval.  Handle the case by removing the assumption.
+
+Note that this could incur particular real issue for DAMON sysfs interface
+users, in case of zero Aggregation interval.  When user starts DAMON with
+zero Aggregation interval and asks online DAMON parameter tuning via DAMON
+sysfs interface, the request is handled by the aggregation callback.
+Until the callback finishes the work, the user who requested the online
+tuning just waits.  Hence, the user will be stuck until the
+passed_sample_intervals overflows.
+
+Link: https://lkml.kernel.org/r/20241031183757.49610-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20241031183757.49610-2-sj@kernel.org
+Fixes: 4472edf63d66 ("mm/damon/core: use number of passed access sampling as a timer")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>   [6.7.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/damon/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index a29390fd55935..d0441e24a8ed5 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1454,7 +1454,7 @@ static int kdamond_fn(void *data)
+               if (ctx->ops.check_accesses)
+                       max_nr_accesses = ctx->ops.check_accesses(ctx);
+-              if (ctx->passed_sample_intervals == next_aggregation_sis) {
++              if (ctx->passed_sample_intervals >= next_aggregation_sis) {
+                       kdamond_merge_regions(ctx,
+                                       max_nr_accesses / 10,
+                                       sz_limit);
+@@ -1472,7 +1472,7 @@ static int kdamond_fn(void *data)
+               sample_interval = ctx->attrs.sample_interval ?
+                       ctx->attrs.sample_interval : 1;
+-              if (ctx->passed_sample_intervals == next_aggregation_sis) {
++              if (ctx->passed_sample_intervals >= next_aggregation_sis) {
+                       ctx->next_aggregation_sis = next_aggregation_sis +
+                               ctx->attrs.aggr_interval / sample_interval;
+@@ -1482,7 +1482,7 @@ static int kdamond_fn(void *data)
+                               ctx->ops.reset_aggregated(ctx);
+               }
+-              if (ctx->passed_sample_intervals == next_ops_update_sis) {
++              if (ctx->passed_sample_intervals >= next_ops_update_sis) {
+                       ctx->next_ops_update_sis = next_ops_update_sis +
+                               ctx->attrs.ops_update_interval /
+                               sample_interval;
+-- 
+2.43.0
+
diff --git a/queue-6.6/mm-damon-core-implement-scheme-specific-apply-interv.patch b/queue-6.6/mm-damon-core-implement-scheme-specific-apply-interv.patch
new file mode 100644 (file)
index 0000000..154de77
--- /dev/null
@@ -0,0 +1,313 @@
+From 094d70aca44cc2e0a42be91a5a242c31146bdd2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Sep 2023 02:09:40 +0000
+Subject: mm/damon/core: implement scheme-specific apply interval
+
+From: SeongJae Park <sj@kernel.org>
+
+[ Upstream commit 42f994b71404b17abcd6b170de7a6aa95ffe5d4a ]
+
+DAMON-based operation schemes are applied for every aggregation interval.
+That was mainly because schemes were using nr_accesses, which be complete
+to be used for every aggregation interval.  However, the schemes are now
+using nr_accesses_bp, which is updated for each sampling interval in a way
+that reasonable to be used.  Therefore, there is no reason to apply
+schemes for each aggregation interval.
+
+The unnecessary alignment with aggregation interval was also making some
+use cases of DAMOS tricky.  Quotas setting under long aggregation interval
+is one such example.  Suppose the aggregation interval is ten seconds, and
+there is a scheme having CPU quota 100ms per 1s.  The scheme will actually
+uses 100ms per ten seconds, since it cannobe be applied before next
+aggregation interval.  The feature is working as intended, but the results
+might not that intuitive for some users.  This could be fixed by updating
+the quota to 1s per 10s.  But, in the case, the CPU usage of DAMOS could
+look like spikes, and would actually make a bad effect to other
+CPU-sensitive workloads.
+
+Implement a dedicated timing interval for each DAMON-based operation
+scheme, namely apply_interval.  The interval will be sampling interval
+aligned, and each scheme will be applied for its apply_interval.  The
+interval is set to 0 by default, and it means the scheme should use the
+aggregation interval instead.  This avoids old users getting any
+behavioral difference.
+
+Link: https://lkml.kernel.org/r/20230916020945.47296-5-sj@kernel.org
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 3488af097044 ("mm/damon/core: handle zero {aggregation,ops_update} intervals")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/damon.h    | 17 ++++++++--
+ mm/damon/core.c          | 72 ++++++++++++++++++++++++++++++++++++----
+ mm/damon/dbgfs.c         |  3 +-
+ mm/damon/lru_sort.c      |  2 ++
+ mm/damon/reclaim.c       |  2 ++
+ mm/damon/sysfs-schemes.c |  2 +-
+ 6 files changed, 87 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/damon.h b/include/linux/damon.h
+index a953d7083cd59..343132a146cf0 100644
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -298,16 +298,19 @@ struct damos_access_pattern {
+  * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+  * @pattern:          Access pattern of target regions.
+  * @action:           &damo_action to be applied to the target regions.
++ * @apply_interval_us:        The time between applying the @action.
+  * @quota:            Control the aggressiveness of this scheme.
+  * @wmarks:           Watermarks for automated (in)activation of this scheme.
+  * @filters:          Additional set of &struct damos_filter for &action.
+  * @stat:             Statistics of this scheme.
+  * @list:             List head for siblings.
+  *
+- * For each aggregation interval, DAMON finds regions which fit in the
++ * For each @apply_interval_us, DAMON finds regions which fit in the
+  * &pattern and applies &action to those. To avoid consuming too much
+  * CPU time or IO resources for the &action, &quota is used.
+  *
++ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
++ *
+  * To do the work only when needed, schemes can be activated for specific
+  * system situations using &wmarks.  If all schemes that registered to the
+  * monitoring context are inactive, DAMON stops monitoring either, and just
+@@ -327,6 +330,14 @@ struct damos_access_pattern {
+ struct damos {
+       struct damos_access_pattern pattern;
+       enum damos_action action;
++      unsigned long apply_interval_us;
++/* private: internal use only */
++      /*
++       * number of sample intervals that should be passed before applying
++       * @action
++       */
++      unsigned long next_apply_sis;
++/* public: */
+       struct damos_quota quota;
+       struct damos_watermarks wmarks;
+       struct list_head filters;
+@@ -627,7 +638,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f);
+ void damos_destroy_filter(struct damos_filter *f);
+ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+-                      enum damos_action action, struct damos_quota *quota,
++                      enum damos_action action,
++                      unsigned long apply_interval_us,
++                      struct damos_quota *quota,
+                       struct damos_watermarks *wmarks);
+ void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+ void damon_destroy_scheme(struct damos *s);
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index ae55f20835b06..a29390fd55935 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -312,7 +312,9 @@ static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
+ }
+ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+-                      enum damos_action action, struct damos_quota *quota,
++                      enum damos_action action,
++                      unsigned long apply_interval_us,
++                      struct damos_quota *quota,
+                       struct damos_watermarks *wmarks)
+ {
+       struct damos *scheme;
+@@ -322,6 +324,13 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+               return NULL;
+       scheme->pattern = *pattern;
+       scheme->action = action;
++      scheme->apply_interval_us = apply_interval_us;
++      /*
++       * next_apply_sis will be set when kdamond starts.  While kdamond is
++       * running, it will also updated when it is added to the DAMON context,
++       * or damon_attrs are updated.
++       */
++      scheme->next_apply_sis = 0;
+       INIT_LIST_HEAD(&scheme->filters);
+       scheme->stat = (struct damos_stat){};
+       INIT_LIST_HEAD(&scheme->list);
+@@ -334,9 +343,21 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+       return scheme;
+ }
++static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
++{
++      unsigned long sample_interval = ctx->attrs.sample_interval ?
++              ctx->attrs.sample_interval : 1;
++      unsigned long apply_interval = s->apply_interval_us ?
++              s->apply_interval_us : ctx->attrs.aggr_interval;
++
++      s->next_apply_sis = ctx->passed_sample_intervals +
++              apply_interval / sample_interval;
++}
++
+ void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
+ {
+       list_add_tail(&s->list, &ctx->schemes);
++      damos_set_next_apply_sis(s, ctx);
+ }
+ static void damon_del_scheme(struct damos *s)
+@@ -548,6 +569,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
+ {
+       unsigned long sample_interval = attrs->sample_interval ?
+               attrs->sample_interval : 1;
++      struct damos *s;
+       if (attrs->min_nr_regions < 3)
+               return -EINVAL;
+@@ -563,6 +585,10 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
+       damon_update_monitoring_results(ctx, attrs);
+       ctx->attrs = *attrs;
++
++      damon_for_each_scheme(s, ctx)
++              damos_set_next_apply_sis(s, ctx);
++
+       return 0;
+ }
+@@ -1055,14 +1081,29 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
+       struct damon_target *t;
+       struct damon_region *r, *next_r;
+       struct damos *s;
++      unsigned long sample_interval = c->attrs.sample_interval ?
++              c->attrs.sample_interval : 1;
++      bool has_schemes_to_apply = false;
+       damon_for_each_scheme(s, c) {
++              if (c->passed_sample_intervals != s->next_apply_sis)
++                      continue;
++
++              s->next_apply_sis +=
++                      (s->apply_interval_us ? s->apply_interval_us :
++                       c->attrs.aggr_interval) / sample_interval;
++
+               if (!s->wmarks.activated)
+                       continue;
++              has_schemes_to_apply = true;
++
+               damos_adjust_quota(c, s);
+       }
++      if (!has_schemes_to_apply)
++              return;
++
+       damon_for_each_target(t, c) {
+               damon_for_each_region_safe(r, next_r, t)
+                       damon_do_apply_schemes(c, t, r);
+@@ -1348,11 +1389,19 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
+ {
+       unsigned long sample_interval = ctx->attrs.sample_interval ?
+               ctx->attrs.sample_interval : 1;
++      unsigned long apply_interval;
++      struct damos *scheme;
+       ctx->passed_sample_intervals = 0;
+       ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
+       ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
+               sample_interval;
++
++      damon_for_each_scheme(scheme, ctx) {
++              apply_interval = scheme->apply_interval_us ?
++                      scheme->apply_interval_us : ctx->attrs.aggr_interval;
++              scheme->next_apply_sis = apply_interval / sample_interval;
++      }
+ }
+ /*
+@@ -1405,19 +1454,28 @@ static int kdamond_fn(void *data)
+               if (ctx->ops.check_accesses)
+                       max_nr_accesses = ctx->ops.check_accesses(ctx);
+-              sample_interval = ctx->attrs.sample_interval ?
+-                      ctx->attrs.sample_interval : 1;
+               if (ctx->passed_sample_intervals == next_aggregation_sis) {
+-                      ctx->next_aggregation_sis = next_aggregation_sis +
+-                              ctx->attrs.aggr_interval / sample_interval;
+                       kdamond_merge_regions(ctx,
+                                       max_nr_accesses / 10,
+                                       sz_limit);
+                       if (ctx->callback.after_aggregation &&
+                                       ctx->callback.after_aggregation(ctx))
+                               break;
+-                      if (!list_empty(&ctx->schemes))
+-                              kdamond_apply_schemes(ctx);
++              }
++
++              /*
++               * do kdamond_apply_schemes() after kdamond_merge_regions() if
++               * possible, to reduce overhead
++               */
++              if (!list_empty(&ctx->schemes))
++                      kdamond_apply_schemes(ctx);
++
++              sample_interval = ctx->attrs.sample_interval ?
++                      ctx->attrs.sample_interval : 1;
++              if (ctx->passed_sample_intervals == next_aggregation_sis) {
++                      ctx->next_aggregation_sis = next_aggregation_sis +
++                              ctx->attrs.aggr_interval / sample_interval;
++
+                       kdamond_reset_aggregated(ctx);
+                       kdamond_split_regions(ctx);
+                       if (ctx->ops.reset_aggregated)
+diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
+index 124f0f8c97b75..dc0ea1fc30ca5 100644
+--- a/mm/damon/dbgfs.c
++++ b/mm/damon/dbgfs.c
+@@ -278,7 +278,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
+                       goto fail;
+               pos += parsed;
+-              scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
++              scheme = damon_new_scheme(&pattern, action, 0, &quota,
++                              &wmarks);
+               if (!scheme)
+                       goto fail;
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index e84495ab92cf3..3de2916a65c38 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -158,6 +158,8 @@ static struct damos *damon_lru_sort_new_scheme(
+                       pattern,
+                       /* (de)prioritize on LRU-lists */
+                       action,
++                      /* for each aggregation interval */
++                      0,
+                       /* under the quota. */
+                       &quota,
+                       /* (De)activate this according to the watermarks. */
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index eca9d000ecc53..66e190f0374ac 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -142,6 +142,8 @@ static struct damos *damon_reclaim_new_scheme(void)
+                       &pattern,
+                       /* page out those, as soon as found */
+                       DAMOS_PAGEOUT,
++                      /* for each aggregation interval */
++                      0,
+                       /* under the quota. */
+                       &damon_reclaim_quota,
+                       /* (De)activate this according to the watermarks. */
+diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
+index 36dcd881a19c0..26c948f87489e 100644
+--- a/mm/damon/sysfs-schemes.c
++++ b/mm/damon/sysfs-schemes.c
+@@ -1613,7 +1613,7 @@ static struct damos *damon_sysfs_mk_scheme(
+               .low = sysfs_wmarks->low,
+       };
+-      scheme = damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
++      scheme = damon_new_scheme(&pattern, sysfs_scheme->action, 0, &quota,
+                       &wmarks);
+       if (!scheme)
+               return NULL;
+-- 
+2.43.0
+
index fa7940c9ba83e350312871cbdf42ed662cc761b3..9368f5a1eca7a8c2f3f8675f98f2779eaa08caf2 100644 (file)
@@ -52,3 +52,7 @@ pmdomain-imx93-blk-ctrl-correct-remove-path.patch
 nouveau-fw-sync-dma-after-setup-is-called.patch
 drm-amd-fix-initialization-mistake-for-nbio-7.7.0.patch
 drm-amd-display-adjust-vsdb-parser-for-replay-feature.patch
+mm-damon-core-implement-scheme-specific-apply-interv.patch
+mm-damon-core-handle-zero-aggregation-ops_update-int.patch
+staging-vchiq_arm-get-the-rid-off-struct-vchiq_2835_.patch
+staging-vchiq_arm-use-devm_kzalloc-for-vchiq_arm_sta.patch
diff --git a/queue-6.6/staging-vchiq_arm-get-the-rid-off-struct-vchiq_2835_.patch b/queue-6.6/staging-vchiq_arm-get-the-rid-off-struct-vchiq_2835_.patch
new file mode 100644 (file)
index 0000000..5583c1e
--- /dev/null
@@ -0,0 +1,79 @@
+From 6dfbe73bafa5ceacc0e88e81d48e33d3337903d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jun 2024 15:19:53 +0200
+Subject: staging: vchiq_arm: Get the rid off struct vchiq_2835_state
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit 4e2766102da632f26341d5539519b0abf73df887 ]
+
+The whole benefit of this encapsulating struct is questionable.
+It just stores a flag to signalize the init state of vchiq_arm_state.
+Beside the fact this flag is set too soon, the access to uninitialized
+members should be avoided. So initialize vchiq_arm_state properly before
+assign it directly to vchiq_state.
+
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://lore.kernel.org/r/20240621131958.98208-6-wahrenst@gmx.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 404b739e8955 ("staging: vchiq_arm: Use devm_kzalloc() for vchiq_arm_state allocation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../interface/vchiq_arm/vchiq_arm.c           | 25 +++++--------------
+ 1 file changed, 6 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index aa2313f3bcab8..0a97fb237f5e7 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -115,11 +115,6 @@ struct vchiq_arm_state {
+       int first_connect;
+ };
+-struct vchiq_2835_state {
+-      int inited;
+-      struct vchiq_arm_state arm_state;
+-};
+-
+ struct vchiq_pagelist_info {
+       struct pagelist *pagelist;
+       size_t pagelist_buffer_size;
+@@ -580,29 +575,21 @@ vchiq_arm_init_state(struct vchiq_state *state,
+ int
+ vchiq_platform_init_state(struct vchiq_state *state)
+ {
+-      struct vchiq_2835_state *platform_state;
++      struct vchiq_arm_state *platform_state;
+-      state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+-      if (!state->platform_state)
++      platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
++      if (!platform_state)
+               return -ENOMEM;
+-      platform_state = (struct vchiq_2835_state *)state->platform_state;
+-
+-      platform_state->inited = 1;
+-      vchiq_arm_init_state(state, &platform_state->arm_state);
++      vchiq_arm_init_state(state, platform_state);
++      state->platform_state = (struct opaque_platform_state *)platform_state;
+       return 0;
+ }
+ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+ {
+-      struct vchiq_2835_state *platform_state;
+-
+-      platform_state   = (struct vchiq_2835_state *)state->platform_state;
+-
+-      WARN_ON_ONCE(!platform_state->inited);
+-
+-      return &platform_state->arm_state;
++      return (struct vchiq_arm_state *)state->platform_state;
+ }
+ void
+-- 
+2.43.0
+
diff --git a/queue-6.6/staging-vchiq_arm-use-devm_kzalloc-for-vchiq_arm_sta.patch b/queue-6.6/staging-vchiq_arm-use-devm_kzalloc-for-vchiq_arm_sta.patch
new file mode 100644 (file)
index 0000000..1a83cd8
--- /dev/null
@@ -0,0 +1,44 @@
+From 5c6efe7a5e8b28f6ff2fc760f17e1dfcc5006eea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 18:32:24 +0530
+Subject: staging: vchiq_arm: Use devm_kzalloc() for vchiq_arm_state allocation
+
+From: Umang Jain <umang.jain@ideasonboard.com>
+
+[ Upstream commit 404b739e895522838f1abdc340c554654d671dde ]
+
+The struct vchiq_arm_state 'platform_state' is currently allocated
+dynamically using kzalloc(). Unfortunately, it is never freed and is
+subjected to memory leaks in the error handling paths of the probe()
+function.
+
+To address the issue, use device resource management helper
+devm_kzalloc(), to ensure cleanup after its allocation.
+
+Fixes: 71bad7f08641 ("staging: add bcm2708 vchiq driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Umang Jain <umang.jain@ideasonboard.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/20241016130225.61024-2-umang.jain@ideasonboard.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 0a97fb237f5e7..92aa98bbdc662 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -577,7 +577,7 @@ vchiq_platform_init_state(struct vchiq_state *state)
+ {
+       struct vchiq_arm_state *platform_state;
+-      platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
++      platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+       if (!platform_state)
+               return -ENOMEM;
+-- 
+2.43.0
+