]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 Jul 2020 09:54:56 +0000 (11:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 20 Jul 2020 09:54:56 +0000 (11:54 +0200)
added patches:
apparmor-ensure-that-dfa-state-tables-have-entries.patch
perf-stat-zero-all-the-ena-and-run-array-slot-stats-for-interval-mode.patch
soc-qcom-rpmh-invalidate-sleep-and-wake-tcses-before-flushing-new-data.patch
soc-qcom-rpmh-rsc-allow-using-free-wake-tcs-for-active-request.patch
soc-qcom-rpmh-rsc-clear-active-mode-configuration-for-wake-tcs.patch
soc-qcom-rpmh-update-dirty-flag-only-when-data-changes.patch

queue-4.19/apparmor-ensure-that-dfa-state-tables-have-entries.patch [new file with mode: 0644]
queue-4.19/perf-stat-zero-all-the-ena-and-run-array-slot-stats-for-interval-mode.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/soc-qcom-rpmh-invalidate-sleep-and-wake-tcses-before-flushing-new-data.patch [new file with mode: 0644]
queue-4.19/soc-qcom-rpmh-rsc-allow-using-free-wake-tcs-for-active-request.patch [new file with mode: 0644]
queue-4.19/soc-qcom-rpmh-rsc-clear-active-mode-configuration-for-wake-tcs.patch [new file with mode: 0644]
queue-4.19/soc-qcom-rpmh-update-dirty-flag-only-when-data-changes.patch [new file with mode: 0644]

diff --git a/queue-4.19/apparmor-ensure-that-dfa-state-tables-have-entries.patch b/queue-4.19/apparmor-ensure-that-dfa-state-tables-have-entries.patch
new file mode 100644 (file)
index 0000000..1bb80f0
--- /dev/null
@@ -0,0 +1,44 @@
+From c27c6bd2c4d6b6bb779f9b722d5607993e1d5e5c Mon Sep 17 00:00:00 2001
+From: John Johansen <john.johansen@canonical.com>
+Date: Mon, 30 Mar 2020 23:37:54 -0700
+Subject: apparmor: ensure that dfa state tables have entries
+
+From: John Johansen <john.johansen@canonical.com>
+
+commit c27c6bd2c4d6b6bb779f9b722d5607993e1d5e5c upstream.
+
+Currently it is possible to specify a state machine table with 0 length,
+this is not valid as optional tables are specified by not defining
+the table as present. Further this allows by-passing the base tables
+range check against the next/check tables.
+
+Fixes: d901d6a298dc ("apparmor: dfa split verification of table headers")
+Reported-by: Mike Salvatore <mike.salvatore@canonical.com>
+Signed-off-by: John Johansen <john.johansen@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/apparmor/match.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/security/apparmor/match.c
++++ b/security/apparmor/match.c
+@@ -101,6 +101,9 @@ static struct table_header *unpack_table
+             th.td_flags == YYTD_DATA8))
+               goto out;
++      /* if we have a table it must have some entries */
++      if (th.td_lolen == 0)
++              goto out;
+       tsize = table_size(th.td_lolen, th.td_flags);
+       if (bsize < tsize)
+               goto out;
+@@ -202,6 +205,8 @@ static int verify_dfa(struct aa_dfa *dfa
+       state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+       trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
++      if (state_count == 0)
++              goto out;
+       for (i = 0; i < state_count; i++) {
+               if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
+                   (DEFAULT_TABLE(dfa)[i] >= state_count))
diff --git a/queue-4.19/perf-stat-zero-all-the-ena-and-run-array-slot-stats-for-interval-mode.patch b/queue-4.19/perf-stat-zero-all-the-ena-and-run-array-slot-stats-for-interval-mode.patch
new file mode 100644 (file)
index 0000000..ef469be
--- /dev/null
@@ -0,0 +1,49 @@
+From 0e0bf1ea1147fcf74eab19c2d3c853cc3740a72f Mon Sep 17 00:00:00 2001
+From: Jin Yao <yao.jin@linux.intel.com>
+Date: Thu, 9 Apr 2020 15:07:55 +0800
+Subject: perf stat: Zero all the 'ena' and 'run' array slot stats for interval mode
+
+From: Jin Yao <yao.jin@linux.intel.com>
+
+commit 0e0bf1ea1147fcf74eab19c2d3c853cc3740a72f upstream.
+
+As the code comments in perf_stat_process_counter() say, we calculate
+counter's data every interval, and the display code shows ps->res_stats
+avg value. We need to zero the stats for interval mode.
+
+But the current code only zeros the res_stats[0], it doesn't zero the
+res_stats[1] and res_stats[2], which are for ena and run of counter.
+
+This patch zeros the whole res_stats[] for interval mode.
+
+Fixes: 51fd2df1e882 ("perf stat: Fix interval output values")
+Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Jin Yao <yao.jin@intel.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200409070755.17261-1-yao.jin@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/stat.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -358,8 +358,10 @@ int perf_stat_process_counter(struct per
+        * interval mode, otherwise overall avg running
+        * averages will be shown for each interval.
+        */
+-      if (config->interval)
+-              init_stats(ps->res_stats);
++      if (config->interval) {
++              for (i = 0; i < 3; i++)
++                      init_stats(&ps->res_stats[i]);
++      }
+       if (counter->per_pkg)
+               zero_per_pkg(counter);
index 74b767a47a56f86036c67b9d3210db104acc0b89..1d729ff262c654f0efc0524d06fdcd22b08db679 100644 (file)
@@ -67,3 +67,9 @@ revert-thermal-mediatek-fix-register-index-error.patch
 arm-dts-socfpga-align-l2-cache-controller-nodename-w.patch
 regmap-debugfs-don-t-sleep-while-atomic-for-fast_io-.patch
 copy_xstate_to_kernel-fix-typo-which-caused-gdb-regression.patch
+apparmor-ensure-that-dfa-state-tables-have-entries.patch
+perf-stat-zero-all-the-ena-and-run-array-slot-stats-for-interval-mode.patch
+soc-qcom-rpmh-update-dirty-flag-only-when-data-changes.patch
+soc-qcom-rpmh-invalidate-sleep-and-wake-tcses-before-flushing-new-data.patch
+soc-qcom-rpmh-rsc-clear-active-mode-configuration-for-wake-tcs.patch
+soc-qcom-rpmh-rsc-allow-using-free-wake-tcs-for-active-request.patch
diff --git a/queue-4.19/soc-qcom-rpmh-invalidate-sleep-and-wake-tcses-before-flushing-new-data.patch b/queue-4.19/soc-qcom-rpmh-invalidate-sleep-and-wake-tcses-before-flushing-new-data.patch
new file mode 100644 (file)
index 0000000..203e482
--- /dev/null
@@ -0,0 +1,103 @@
+From f5ac95f9ca2f439179a5baf48e1c0f22f83d936e Mon Sep 17 00:00:00 2001
+From: Maulik Shah <mkshah@codeaurora.org>
+Date: Sun, 12 Apr 2020 20:20:01 +0530
+Subject: soc: qcom: rpmh: Invalidate SLEEP and WAKE TCSes before flushing new data
+
+From: Maulik Shah <mkshah@codeaurora.org>
+
+commit f5ac95f9ca2f439179a5baf48e1c0f22f83d936e upstream.
+
+TCSes have previously programmed data when rpmh_flush() is called.
+This can cause old data to trigger along with newly flushed.
+
+Fix this by cleaning SLEEP and WAKE TCSes before new data is flushed.
+
+With this there is no need to invoke rpmh_rsc_invalidate() call from
+rpmh_invalidate().
+
+Simplify rpmh_invalidate() by moving invalidate_batch() inside.
+
+Fixes: 600513dfeef3 ("drivers: qcom: rpmh: cache sleep/wake state requests")
+Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/1586703004-13674-4-git-send-email-mkshah@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/qcom/rpmh.c |   41 ++++++++++++++++++-----------------------
+ 1 file changed, 18 insertions(+), 23 deletions(-)
+
+--- a/drivers/soc/qcom/rpmh.c
++++ b/drivers/soc/qcom/rpmh.c
+@@ -318,19 +318,6 @@ static int flush_batch(struct rpmh_ctrlr
+       return ret;
+ }
+-static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
+-{
+-      struct batch_cache_req *req, *tmp;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&ctrlr->cache_lock, flags);
+-      list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+-              kfree(req);
+-      INIT_LIST_HEAD(&ctrlr->batch_cache);
+-      ctrlr->dirty = true;
+-      spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+-}
+-
+ /**
+  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+  * batch to finish.
+@@ -470,6 +457,13 @@ int rpmh_flush(const struct device *dev)
+               return 0;
+       }
++      /* Invalidate the TCSes first to avoid stale data */
++      do {
++              ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
++      } while (ret == -EAGAIN);
++      if (ret)
++              return ret;
++
+       /* First flush the cached batch requests */
+       ret = flush_batch(ctrlr);
+       if (ret)
+@@ -501,24 +495,25 @@ int rpmh_flush(const struct device *dev)
+ EXPORT_SYMBOL(rpmh_flush);
+ /**
+- * rpmh_invalidate: Invalidate all sleep and active sets
+- * sets.
++ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
+  *
+  * @dev: The device making the request
+  *
+- * Invalidate the sleep and active values in the TCS blocks.
++ * Invalidate the sleep and wake values in batch_cache.
+  */
+ int rpmh_invalidate(const struct device *dev)
+ {
+       struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+-      int ret;
+-
+-      invalidate_batch(ctrlr);
++      struct batch_cache_req *req, *tmp;
++      unsigned long flags;
+-      do {
+-              ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+-      } while (ret == -EAGAIN);
++      spin_lock_irqsave(&ctrlr->cache_lock, flags);
++      list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
++              kfree(req);
++      INIT_LIST_HEAD(&ctrlr->batch_cache);
++      ctrlr->dirty = true;
++      spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+-      return ret;
++      return 0;
+ }
+ EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/queue-4.19/soc-qcom-rpmh-rsc-allow-using-free-wake-tcs-for-active-request.patch b/queue-4.19/soc-qcom-rpmh-rsc-allow-using-free-wake-tcs-for-active-request.patch
new file mode 100644 (file)
index 0000000..3decd67
--- /dev/null
@@ -0,0 +1,80 @@
+From 38427e5a47bf83299da930bd474c6cb2632ad810 Mon Sep 17 00:00:00 2001
+From: Maulik Shah <mkshah@codeaurora.org>
+Date: Sun, 12 Apr 2020 20:20:04 +0530
+Subject: soc: qcom: rpmh-rsc: Allow using free WAKE TCS for active request
+
+From: Maulik Shah <mkshah@codeaurora.org>
+
+commit 38427e5a47bf83299da930bd474c6cb2632ad810 upstream.
+
+When there are more than one WAKE TCS available and there is no dedicated
+ACTIVE TCS available, invalidating all WAKE TCSes and waiting for current
+transfer to complete in first WAKE TCS blocks using another free WAKE TCS
+to complete current request.
+
+Remove rpmh_rsc_invalidate() to happen from tcs_write() when WAKE TCSes
+is re-purposed to be used for Active mode. Clear only currently used
+WAKE TCS's register configuration.
+
+Fixes: 2de4b8d33eab (drivers: qcom: rpmh-rsc: allow active requests from wake TCS)
+Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/1586703004-13674-7-git-send-email-mkshah@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/qcom/rpmh-rsc.c |   23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -148,7 +148,7 @@ int rpmh_rsc_invalidate(struct rsc_drv *
+ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+                                        const struct tcs_request *msg)
+ {
+-      int type, ret;
++      int type;
+       struct tcs_group *tcs;
+       switch (msg->state) {
+@@ -169,19 +169,10 @@ static struct tcs_group *get_tcs_for_msg
+        * If we are making an active request on a RSC that does not have a
+        * dedicated TCS for active state use, then re-purpose a wake TCS to
+        * send active votes.
+-       * NOTE: The driver must be aware that this RSC does not have a
+-       * dedicated AMC, and therefore would invalidate the sleep and wake
+-       * TCSes before making an active state request.
+        */
+       tcs = get_tcs_of_type(drv, type);
+-      if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
++      if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+               tcs = get_tcs_of_type(drv, WAKE_TCS);
+-              if (tcs->num_tcs) {
+-                      ret = rpmh_rsc_invalidate(drv);
+-                      if (ret)
+-                              return ERR_PTR(ret);
+-              }
+-      }
+       return tcs;
+ }
+@@ -406,8 +397,16 @@ static int tcs_write(struct rsc_drv *drv
+       tcs->req[tcs_id - tcs->offset] = msg;
+       set_bit(tcs_id, drv->tcs_in_use);
+-      if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS)
++      if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
++              /*
++               * Clear previously programmed WAKE commands in selected
++               * repurposed TCS to avoid triggering them. tcs->slots will be
++               * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
++               */
++              write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
++              write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+               enable_tcs_irq(drv, tcs_id, true);
++      }
+       spin_unlock(&drv->lock);
+       __tcs_buffer_write(drv, tcs_id, 0, msg);
diff --git a/queue-4.19/soc-qcom-rpmh-rsc-clear-active-mode-configuration-for-wake-tcs.patch b/queue-4.19/soc-qcom-rpmh-rsc-clear-active-mode-configuration-for-wake-tcs.patch
new file mode 100644 (file)
index 0000000..3c34ecb
--- /dev/null
@@ -0,0 +1,148 @@
+From 15b3bf61b8d48f8e0ccd9d7f1bcb468b543da396 Mon Sep 17 00:00:00 2001
+From: "Raju P.L.S.S.S.N" <rplsssn@codeaurora.org>
+Date: Sun, 12 Apr 2020 20:20:03 +0530
+Subject: soc: qcom: rpmh-rsc: Clear active mode configuration for wake TCS
+
+From: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
+
+commit 15b3bf61b8d48f8e0ccd9d7f1bcb468b543da396 upstream.
+
+For RSCs that have sleep & wake TCS but no dedicated active TCS, wake
+TCS can be re-purposed to send active requests. Once the active requests
+are sent and response is received, the active mode configuration needs
+to be cleared so that controller can use wake TCS for sending wake
+requests.
+
+Introduce enable_tcs_irq() to enable completion IRQ for repurposed TCSes.
+
+Fixes: 2de4b8d33eab (drivers: qcom: rpmh-rsc: allow active requests from wake TCS)
+Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
+[mkshah: call enable_tcs_irq() within drv->lock, update commit message]
+Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/1586703004-13674-6-git-send-email-mkshah@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/qcom/rpmh-rsc.c |   77 ++++++++++++++++++++++++++++++--------------
+ 1 file changed, 54 insertions(+), 23 deletions(-)
+
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -201,6 +201,42 @@ static const struct tcs_request *get_req
+       return NULL;
+ }
++static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
++{
++      u32 enable;
++
++      /*
++       * HW req: Clear the DRV_CONTROL and enable TCS again
++       * While clearing ensure that the AMC mode trigger is cleared
++       * and then the mode enable is cleared.
++       */
++      enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
++      enable &= ~TCS_AMC_MODE_TRIGGER;
++      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
++      enable &= ~TCS_AMC_MODE_ENABLE;
++      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
++
++      if (trigger) {
++              /* Enable the AMC mode on the TCS and then trigger the TCS */
++              enable = TCS_AMC_MODE_ENABLE;
++              write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
++              enable |= TCS_AMC_MODE_TRIGGER;
++              write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
++      }
++}
++
++static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
++{
++      u32 data;
++
++      data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
++      if (enable)
++              data |= BIT(tcs_id);
++      else
++              data &= ~BIT(tcs_id);
++      write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
++}
++
+ /**
+  * tcs_tx_done: TX Done interrupt handler
+  */
+@@ -237,6 +273,14 @@ static irqreturn_t tcs_tx_done(int irq,
+               }
+               trace_rpmh_tx_done(drv, i, req, err);
++
++              /*
++               * If wake tcs was re-purposed for sending active
++               * votes, clear AMC trigger & enable modes and
++               * disable interrupt for this TCS
++               */
++              if (!drv->tcs[ACTIVE_TCS].num_tcs)
++                      __tcs_set_trigger(drv, i, false);
+ skip:
+               /* Reclaim the TCS */
+               write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+@@ -244,6 +288,13 @@ skip:
+               write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+               spin_lock(&drv->lock);
+               clear_bit(i, drv->tcs_in_use);
++              /*
++               * Disable interrupt for WAKE TCS to avoid being
++               * spammed with interrupts coming when the solver
++               * sends its wake votes.
++               */
++              if (!drv->tcs[ACTIVE_TCS].num_tcs)
++                      enable_tcs_irq(drv, i, false);
+               spin_unlock(&drv->lock);
+               if (req)
+                       rpmh_tx_done(req, err);
+@@ -285,28 +336,6 @@ static void __tcs_buffer_write(struct rs
+       write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+ }
+-static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
+-{
+-      u32 enable;
+-
+-      /*
+-       * HW req: Clear the DRV_CONTROL and enable TCS again
+-       * While clearing ensure that the AMC mode trigger is cleared
+-       * and then the mode enable is cleared.
+-       */
+-      enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+-      enable &= ~TCS_AMC_MODE_TRIGGER;
+-      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+-      enable &= ~TCS_AMC_MODE_ENABLE;
+-      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+-
+-      /* Enable the AMC mode on the TCS and then trigger the TCS */
+-      enable = TCS_AMC_MODE_ENABLE;
+-      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+-      enable |= TCS_AMC_MODE_TRIGGER;
+-      write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+-}
+-
+ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+                                 const struct tcs_request *msg)
+ {
+@@ -377,10 +406,12 @@ static int tcs_write(struct rsc_drv *drv
+       tcs->req[tcs_id - tcs->offset] = msg;
+       set_bit(tcs_id, drv->tcs_in_use);
++      if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS)
++              enable_tcs_irq(drv, tcs_id, true);
+       spin_unlock(&drv->lock);
+       __tcs_buffer_write(drv, tcs_id, 0, msg);
+-      __tcs_trigger(drv, tcs_id);
++      __tcs_set_trigger(drv, tcs_id, true);
+ done_write:
+       spin_unlock_irqrestore(&tcs->lock, flags);
diff --git a/queue-4.19/soc-qcom-rpmh-update-dirty-flag-only-when-data-changes.patch b/queue-4.19/soc-qcom-rpmh-update-dirty-flag-only-when-data-changes.patch
new file mode 100644 (file)
index 0000000..3e3a275
--- /dev/null
@@ -0,0 +1,101 @@
+From bb7000677a1b287206c8d4327c62442fa3050a8f Mon Sep 17 00:00:00 2001
+From: Maulik Shah <mkshah@codeaurora.org>
+Date: Sun, 12 Apr 2020 20:20:00 +0530
+Subject: soc: qcom: rpmh: Update dirty flag only when data changes
+
+From: Maulik Shah <mkshah@codeaurora.org>
+
+commit bb7000677a1b287206c8d4327c62442fa3050a8f upstream.
+
+Currently rpmh ctrlr dirty flag is set for all cases regardless of data
+is really changed or not. Add changes to update dirty flag when data is
+changed to newer values. Update dirty flag everytime when data in batch
+cache is updated since rpmh_flush() may get invoked from any CPU instead
+of only last CPU going to low power mode.
+
+Also move dirty flag updates to happen from within cache_lock and remove
+unnecessary INIT_LIST_HEAD() call and a default case from switch.
+
+Fixes: 600513dfeef3 ("drivers: qcom: rpmh: cache sleep/wake state requests")
+Signed-off-by: Maulik Shah <mkshah@codeaurora.org>
+Reviewed-by: Srinivas Rao L <lsrao@codeaurora.org>
+Reviewed-by: Evan Green <evgreen@chromium.org>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/1586703004-13674-3-git-send-email-mkshah@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/qcom/rpmh.c |   19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/soc/qcom/rpmh.c
++++ b/drivers/soc/qcom/rpmh.c
+@@ -119,6 +119,7 @@ static struct cache_req *cache_rpm_reque
+ {
+       struct cache_req *req;
+       unsigned long flags;
++      u32 old_sleep_val, old_wake_val;
+       spin_lock_irqsave(&ctrlr->cache_lock, flags);
+       req = __find_req(ctrlr, cmd->addr);
+@@ -133,26 +134,27 @@ static struct cache_req *cache_rpm_reque
+       req->addr = cmd->addr;
+       req->sleep_val = req->wake_val = UINT_MAX;
+-      INIT_LIST_HEAD(&req->list);
+       list_add_tail(&req->list, &ctrlr->cache);
+ existing:
++      old_sleep_val = req->sleep_val;
++      old_wake_val = req->wake_val;
++
+       switch (state) {
+       case RPMH_ACTIVE_ONLY_STATE:
+-              if (req->sleep_val != UINT_MAX)
+-                      req->wake_val = cmd->data;
+-              break;
+       case RPMH_WAKE_ONLY_STATE:
+               req->wake_val = cmd->data;
+               break;
+       case RPMH_SLEEP_STATE:
+               req->sleep_val = cmd->data;
+               break;
+-      default:
+-              break;
+       }
+-      ctrlr->dirty = true;
++      ctrlr->dirty = (req->sleep_val != old_sleep_val ||
++                      req->wake_val != old_wake_val) &&
++                      req->sleep_val != UINT_MAX &&
++                      req->wake_val != UINT_MAX;
++
+ unlock:
+       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+@@ -288,6 +290,7 @@ static void cache_batch(struct rpmh_ctrl
+       spin_lock_irqsave(&ctrlr->cache_lock, flags);
+       list_add_tail(&req->list, &ctrlr->batch_cache);
++      ctrlr->dirty = true;
+       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ }
+@@ -324,6 +327,7 @@ static void invalidate_batch(struct rpmh
+       list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+               kfree(req);
+       INIT_LIST_HEAD(&ctrlr->batch_cache);
++      ctrlr->dirty = true;
+       spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ }
+@@ -510,7 +514,6 @@ int rpmh_invalidate(const struct device
+       int ret;
+       invalidate_batch(ctrlr);
+-      ctrlr->dirty = true;
+       do {
+               ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));