]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 Jul 2015 00:08:58 +0000 (17:08 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 31 Jul 2015 00:08:58 +0000 (17:08 -0700)
added patches:
dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch
hwmon-mcp3021-fix-broken-output-scaling.patch
tracing-filter-do-not-allow-infix-to-exceed-end-of-string.patch
tracing-filter-do-not-warn-on-operand-count-going-below-zero.patch
tracing-have-branch-tracer-use-recursive-field-of-task-struct.patch

queue-3.10/dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch [new file with mode: 0644]
queue-3.10/hwmon-mcp3021-fix-broken-output-scaling.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/tracing-filter-do-not-allow-infix-to-exceed-end-of-string.patch [new file with mode: 0644]
queue-3.10/tracing-filter-do-not-warn-on-operand-count-going-below-zero.patch [new file with mode: 0644]
queue-3.10/tracing-have-branch-tracer-use-recursive-field-of-task-struct.patch [new file with mode: 0644]

diff --git a/queue-3.10/dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch b/queue-3.10/dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch
new file mode 100644 (file)
index 0000000..e86c624
--- /dev/null
@@ -0,0 +1,134 @@
+From 9136291f1dbc1d4d1cacd2840fb35f4f3ce16c46 Mon Sep 17 00:00:00 2001
+From: Lior Amsalem <alior@marvell.com>
+Date: Tue, 26 May 2015 15:07:32 +0200
+Subject: dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup
+
+From: Lior Amsalem <alior@marvell.com>
+
+commit 9136291f1dbc1d4d1cacd2840fb35f4f3ce16c46 upstream.
+
+This patch fixes a bug in the XOR driver where the cleanup function can be
+called and free descriptors that never been processed by the engine (which
+result in data errors).
+
+The cleanup function will free descriptors based on the ownership bit in
+the descriptors.
+
+Fixes: ff7b04796d98 ("dmaengine: DMA engine driver for Marvell XOR engine")
+Signed-off-by: Lior Amsalem <alior@marvell.com>
+Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
+Reviewed-by: Ofer Heifetz <oferh@marvell.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/mv_xor.c |   74 ++++++++++++++++++++++++++++++++-------------------
+ drivers/dma/mv_xor.h |    1 
+ 2 files changed, 48 insertions(+), 27 deletions(-)
+
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -393,7 +393,8 @@ static void __mv_xor_slot_cleanup(struct
+       dma_cookie_t cookie = 0;
+       int busy = mv_chan_is_busy(mv_chan);
+       u32 current_desc = mv_chan_get_current_desc(mv_chan);
+-      int seen_current = 0;
++      int current_cleaned = 0;
++      struct mv_xor_desc *hw_desc;
+       dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+       dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
+@@ -405,38 +406,57 @@ static void __mv_xor_slot_cleanup(struct
+       list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+                                       chain_node) {
+-              prefetch(_iter);
+-              prefetch(&_iter->async_tx);
+-              /* do not advance past the current descriptor loaded into the
+-               * hardware channel, subsequent descriptors are either in
+-               * process or have not been submitted
+-               */
+-              if (seen_current)
+-                      break;
+-
+-              /* stop the search if we reach the current descriptor and the
+-               * channel is busy
+-               */
+-              if (iter->async_tx.phys == current_desc) {
+-                      seen_current = 1;
+-                      if (busy)
++              /* clean finished descriptors */
++              hw_desc = iter->hw_desc;
++              if (hw_desc->status & XOR_DESC_SUCCESS) {
++                      cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
++                                                              cookie);
++
++                      /* done processing desc, clean slot */
++                      mv_xor_clean_slot(iter, mv_chan);
++
++                      /* break if we did cleaned the current */
++                      if (iter->async_tx.phys == current_desc) {
++                              current_cleaned = 1;
+                               break;
++                      }
++              } else {
++                      if (iter->async_tx.phys == current_desc) {
++                              current_cleaned = 0;
++                              break;
++                      }
+               }
+-
+-              cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+-
+-              if (mv_xor_clean_slot(iter, mv_chan))
+-                      break;
+       }
+       if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+-              struct mv_xor_desc_slot *chain_head;
+-              chain_head = list_entry(mv_chan->chain.next,
+-                                      struct mv_xor_desc_slot,
+-                                      chain_node);
+-
+-              mv_xor_start_new_chain(mv_chan, chain_head);
++              if (current_cleaned) {
++                      /*
++                       * current descriptor cleaned and removed, run
++                       * from list head
++                       */
++                      iter = list_entry(mv_chan->chain.next,
++                                        struct mv_xor_desc_slot,
++                                        chain_node);
++                      mv_xor_start_new_chain(mv_chan, iter);
++              } else {
++                      if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
++                              /*
++                               * descriptors are still waiting after
++                               * current, trigger them
++                               */
++                              iter = list_entry(iter->chain_node.next,
++                                                struct mv_xor_desc_slot,
++                                                chain_node);
++                              mv_xor_start_new_chain(mv_chan, iter);
++                      } else {
++                              /*
++                               * some descriptors are still waiting
++                               * to be cleaned
++                               */
++                              tasklet_schedule(&mv_chan->irq_tasklet);
++                      }
++              }
+       }
+       if (cookie > 0)
+--- a/drivers/dma/mv_xor.h
++++ b/drivers/dma/mv_xor.h
+@@ -32,6 +32,7 @@
+ #define XOR_OPERATION_MODE_XOR                0
+ #define XOR_OPERATION_MODE_MEMCPY     2
+ #define XOR_OPERATION_MODE_MEMSET     4
++#define XOR_DESC_SUCCESS              0x40000000
+ #define XOR_CURR_DESC(chan)   (chan->mmr_base + 0x210 + (chan->idx * 4))
+ #define XOR_NEXT_DESC(chan)   (chan->mmr_base + 0x200 + (chan->idx * 4))
diff --git a/queue-3.10/hwmon-mcp3021-fix-broken-output-scaling.patch b/queue-3.10/hwmon-mcp3021-fix-broken-output-scaling.patch
new file mode 100644 (file)
index 0000000..0f212fb
--- /dev/null
@@ -0,0 +1,83 @@
+From 347d7e45bd09ce09cbc30d5cea9de377eb22f55c Mon Sep 17 00:00:00 2001
+From: "Stevens, Nick" <Nick.Stevens@digi.com>
+Date: Wed, 1 Jul 2015 16:07:41 +0000
+Subject: hwmon: (mcp3021) Fix broken output scaling
+
+From: "Stevens, Nick" <Nick.Stevens@digi.com>
+
+commit 347d7e45bd09ce09cbc30d5cea9de377eb22f55c upstream.
+
+The mcp3021 scaling code is dividing the VDD (full-scale) value in
+millivolts by the A2D resolution to obtain the scaling factor. When VDD
+is 3300mV (the standard value) and the resolution is 12-bit (4096
+divisions), the result is a scale factor of 3300/4096, which is always
+one.  Effectively, the raw A2D reading is always being returned because
+no scaling is applied.
+
+This patch fixes the issue and simplifies the register-to-volts
+calculation, removing the unneeded "output_scale" struct member.
+
+Signed-off-by: Nick Stevens <Nick.Stevens@digi.com>
+[Guenter Roeck: Dropped unnecessary value check]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/mcp3021.c |   14 +-------------
+ 1 file changed, 1 insertion(+), 13 deletions(-)
+
+--- a/drivers/hwmon/mcp3021.c
++++ b/drivers/hwmon/mcp3021.c
+@@ -31,14 +31,11 @@
+ /* output format */
+ #define MCP3021_SAR_SHIFT     2
+ #define MCP3021_SAR_MASK      0x3ff
+-
+ #define MCP3021_OUTPUT_RES    10      /* 10-bit resolution */
+-#define MCP3021_OUTPUT_SCALE  4
+ #define MCP3221_SAR_SHIFT     0
+ #define MCP3221_SAR_MASK      0xfff
+ #define MCP3221_OUTPUT_RES    12      /* 12-bit resolution */
+-#define MCP3221_OUTPUT_SCALE  1
+ enum chips {
+       mcp3021,
+@@ -54,7 +51,6 @@ struct mcp3021_data {
+       u16 sar_shift;
+       u16 sar_mask;
+       u8 output_res;
+-      u8 output_scale;
+ };
+ static int mcp3021_read16(struct i2c_client *client)
+@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_cli
+ static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
+ {
+-      if (val == 0)
+-              return 0;
+-
+-      val = val * data->output_scale - data->output_scale / 2;
+-
+-      return val * DIV_ROUND_CLOSEST(data->vdd,
+-                      (1 << data->output_res) * data->output_scale);
++      return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
+ }
+ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
+@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_clie
+               data->sar_shift = MCP3021_SAR_SHIFT;
+               data->sar_mask = MCP3021_SAR_MASK;
+               data->output_res = MCP3021_OUTPUT_RES;
+-              data->output_scale = MCP3021_OUTPUT_SCALE;
+               break;
+       case mcp3221:
+               data->sar_shift = MCP3221_SAR_SHIFT;
+               data->sar_mask = MCP3221_SAR_MASK;
+               data->output_res = MCP3221_OUTPUT_RES;
+-              data->output_scale = MCP3221_OUTPUT_SCALE;
+               break;
+       }
index ad65ecd4638cdf5c03a280671ef9fc99b8e69011..17a3ef3f601a937d2c27b55209bed7de04f47955 100644 (file)
@@ -57,3 +57,8 @@ xfs-fix-remote-symlinks-on-v5-crc-filesystems.patch
 vtpm-set-virtual-device-before-passing-to-ibmvtpm_reset_crq.patch
 libata-add-ata_horkage_notrim.patch
 libata-force-disable-trim-for-supersspeed-s238.patch
+tracing-filter-do-not-warn-on-operand-count-going-below-zero.patch
+tracing-filter-do-not-allow-infix-to-exceed-end-of-string.patch
+tracing-have-branch-tracer-use-recursive-field-of-task-struct.patch
+dmaengine-mv_xor-bug-fix-for-racing-condition-in-descriptors-cleanup.patch
+hwmon-mcp3021-fix-broken-output-scaling.patch
diff --git a/queue-3.10/tracing-filter-do-not-allow-infix-to-exceed-end-of-string.patch b/queue-3.10/tracing-filter-do-not-allow-infix-to-exceed-end-of-string.patch
new file mode 100644 (file)
index 0000000..951ce4c
--- /dev/null
@@ -0,0 +1,61 @@
+From 6b88f44e161b9ee2a803e5b2b1fbcf4e20e8b980 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Thu, 25 Jun 2015 18:10:09 -0400
+Subject: tracing/filter: Do not allow infix to exceed end of string
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 6b88f44e161b9ee2a803e5b2b1fbcf4e20e8b980 upstream.
+
+While debugging a WARN_ON() for filtering, I found that it is possible
+for the filter string to be referenced after its end. With the filter:
+
+ # echo '>' > /sys/kernel/debug/events/ext4/ext4_truncate_exit/filter
+
+The filter_parse() function can call infix_get_op() which calls
+infix_advance() that updates the infix filter pointers for the cnt
+and tail without checking if the filter is already at the end, which
+will put the cnt to zero and the tail beyond the end. The loop then calls
+infix_next() that has
+
+       ps->infix.cnt--;
+       return ps->infix.string[ps->infix.tail++];
+
+The cnt will now be below zero, and the tail that is returned is
+already passed the end of the filter string. So far the allocation
+of the filter string usually has some buffer that is zeroed out, but
+if the filter string is of the exact size of the allocated buffer
+there's no guarantee that the charater after the nul terminating
+character will be zero.
+
+Luckily, only root can write to the filter.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1015,6 +1015,9 @@ static void parse_init(struct filter_par
+ static char infix_next(struct filter_parse_state *ps)
+ {
++      if (!ps->infix.cnt)
++              return 0;
++
+       ps->infix.cnt--;
+       return ps->infix.string[ps->infix.tail++];
+@@ -1030,6 +1033,9 @@ static char infix_peek(struct filter_par
+ static void infix_advance(struct filter_parse_state *ps)
+ {
++      if (!ps->infix.cnt)
++              return;
++
+       ps->infix.cnt--;
+       ps->infix.tail++;
+ }
diff --git a/queue-3.10/tracing-filter-do-not-warn-on-operand-count-going-below-zero.patch b/queue-3.10/tracing-filter-do-not-warn-on-operand-count-going-below-zero.patch
new file mode 100644 (file)
index 0000000..4c9faf5
--- /dev/null
@@ -0,0 +1,46 @@
+From b4875bbe7e68f139bd3383828ae8e994a0df6d28 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Thu, 25 Jun 2015 18:02:29 -0400
+Subject: tracing/filter: Do not WARN on operand count going below zero
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit b4875bbe7e68f139bd3383828ae8e994a0df6d28 upstream.
+
+When testing the fix for the trace filter, I could not come up with
+a scenario where the operand count goes below zero, so I added a
+WARN_ON_ONCE(cnt < 0) to the logic. But there is legitimate case
+that it can happen (although the filter would be wrong).
+
+ # echo '>' > /sys/kernel/debug/events/ext4/ext4_truncate_exit/filter
+
+That is, a single operation without any operands will hit the path
+where the WARN_ON_ONCE() can trigger. Although this is harmless,
+and the filter is reported as a error. But instead of spitting out
+a warning to the kernel dmesg, just fail nicely and report it via
+the proper channels.
+
+Link: http://lkml.kernel.org/r/558C6082.90608@oracle.com
+
+Reported-by: Vince Weaver <vincent.weaver@maine.edu>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_filter.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1342,7 +1342,9 @@ static int check_preds(struct filter_par
+                       continue;
+               }
+               n_normal_preds++;
+-              WARN_ON_ONCE(cnt < 0);
++              /* all ops should have operands */
++              if (cnt < 0)
++                      break;
+       }
+       if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
diff --git a/queue-3.10/tracing-have-branch-tracer-use-recursive-field-of-task-struct.patch b/queue-3.10/tracing-have-branch-tracer-use-recursive-field-of-task-struct.patch
new file mode 100644 (file)
index 0000000..3de0619
--- /dev/null
@@ -0,0 +1,94 @@
+From 6224beb12e190ff11f3c7d4bf50cb2922878f600 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 7 Jul 2015 15:05:03 -0400
+Subject: tracing: Have branch tracer use recursive field of task struct
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 6224beb12e190ff11f3c7d4bf50cb2922878f600 upstream.
+
+Fengguang Wu's tests triggered a bug in the branch tracer's start up
+test when CONFIG_DEBUG_PREEMPT set. This was because that config
+adds some debug logic in the per cpu field, which calls back into
+the branch tracer.
+
+The branch tracer has its own recursive checks, but uses a per cpu
+variable to implement it. If retrieving the per cpu variable calls
+back into the branch tracer, you can see how things will break.
+
+Instead of using a per cpu variable, use the trace_recursion field
+of the current task struct. Simply set a bit when entering the
+branch tracing and clear it when leaving. If the bit is set on
+entry, just don't do the tracing.
+
+There's also the case with lockdep, as the local_irq_save() called
+before the recursion can also trigger code that can call back into
+the function. Changing that to a raw_local_irq_save() will protect
+that as well.
+
+This prevents the recursion and the inevitable crash that follows.
+
+Link: http://lkml.kernel.org/r/20150630141803.GA28071@wfg-t540p.sh.intel.com
+
+Reported-by: Fengguang Wu <fengguang.wu@intel.com>
+Tested-by: Fengguang Wu <fengguang.wu@intel.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.h        |    1 +
+ kernel/trace/trace_branch.c |   17 ++++++++++-------
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -428,6 +428,7 @@ enum {
+       TRACE_CONTROL_BIT,
++      TRACE_BRANCH_BIT,
+ /*
+  * Abuse of the trace_recursion.
+  * As we need a way to maintain state if we are tracing the function
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -37,9 +37,12 @@ probe_likely_condition(struct ftrace_bra
+       struct trace_branch *entry;
+       struct ring_buffer *buffer;
+       unsigned long flags;
+-      int cpu, pc;
++      int pc;
+       const char *p;
++      if (current->trace_recursion & TRACE_BRANCH_BIT)
++              return;
++
+       /*
+        * I would love to save just the ftrace_likely_data pointer, but
+        * this code can also be used by modules. Ugly things can happen
+@@ -50,10 +53,10 @@ probe_likely_condition(struct ftrace_bra
+       if (unlikely(!tr))
+               return;
+-      local_irq_save(flags);
+-      cpu = raw_smp_processor_id();
+-      data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+-      if (atomic_inc_return(&data->disabled) != 1)
++      raw_local_irq_save(flags);
++      current->trace_recursion |= TRACE_BRANCH_BIT;
++      data = this_cpu_ptr(tr->trace_buffer.data);
++      if (atomic_read(&data->disabled))
+               goto out;
+       pc = preempt_count();
+@@ -82,8 +85,8 @@ probe_likely_condition(struct ftrace_bra
+               __buffer_unlock_commit(buffer, event);
+  out:
+-      atomic_dec(&data->disabled);
+-      local_irq_restore(flags);
++      current->trace_recursion &= ~TRACE_BRANCH_BIT;
++      raw_local_irq_restore(flags);
+ }
+ static inline