]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Jul 2014 19:29:45 +0000 (12:29 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 14 Jul 2014 19:29:45 +0000 (12:29 -0700)
added patches:
dma-cma-fix-possible-memory-leak.patch
ring-buffer-check-if-buffer-exists-before-polling.patch

queue-3.15/dma-cma-fix-possible-memory-leak.patch [new file with mode: 0644]
queue-3.15/ring-buffer-check-if-buffer-exists-before-polling.patch [new file with mode: 0644]
queue-3.15/series

diff --git a/queue-3.15/dma-cma-fix-possible-memory-leak.patch b/queue-3.15/dma-cma-fix-possible-memory-leak.patch
new file mode 100644 (file)
index 0000000..fac56c2
--- /dev/null
@@ -0,0 +1,64 @@
+From fe8eea4f4a3f299ef83ed090d5354698ebe4fda8 Mon Sep 17 00:00:00 2001
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Date: Mon, 23 Jun 2014 13:22:07 -0700
+Subject: DMA, CMA: fix possible memory leak
+
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+
+commit fe8eea4f4a3f299ef83ed090d5354698ebe4fda8 upstream.
+
+We should free memory for bitmap when we find zone mismatch, otherwise
+this memory will leak.
+
+Additionally, I copy code comment from PPC KVM's CMA code to inform why
+we need to check zone mis-match.
+
+Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
+Reviewed-by: Michal Nazarewicz <mina86@mina86.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Gleb Natapov <gleb@kernel.org>
+Cc: Alexander Graf <agraf@suse.de>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/base/dma-contiguous.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/dma-contiguous.c
++++ b/drivers/base/dma-contiguous.c
+@@ -155,13 +155,23 @@ static int __init cma_activate_area(stru
+               base_pfn = pfn;
+               for (j = pageblock_nr_pages; j; --j, pfn++) {
+                       WARN_ON_ONCE(!pfn_valid(pfn));
++                      /*
++                       * alloc_contig_range requires the pfn range
++                       * specified to be in the same zone. Make this
++                       * simple by forcing the entire CMA resv range
++                       * to be in the same zone.
++                       */
+                       if (page_zone(pfn_to_page(pfn)) != zone)
+-                              return -EINVAL;
++                              goto err;
+               }
+               init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+       } while (--i);
+       return 0;
++
++err:
++      kfree(cma->bitmap);
++      return -EINVAL;
+ }
+ static struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/queue-3.15/ring-buffer-check-if-buffer-exists-before-polling.patch b/queue-3.15/ring-buffer-check-if-buffer-exists-before-polling.patch
new file mode 100644 (file)
index 0000000..26f1335
--- /dev/null
@@ -0,0 +1,176 @@
+From 8b8b36834d0fff67fc8668093f4312dd04dcf21d Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 10 Jun 2014 09:46:00 -0400
+Subject: ring-buffer: Check if buffer exists before polling
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 8b8b36834d0fff67fc8668093f4312dd04dcf21d upstream.
+
+The per_cpu buffers are created one per possible CPU. But these do
+not mean that those CPUs are online, nor do they even exist.
+
+With the addition of the ring buffer polling, it assumes that the
+caller polls on an existing buffer. But this is not the case if
+the user reads trace_pipe from a CPU that does not exist, and this
+causes the kernel to crash.
+
+Simple fix is to check the cpu against buffer bitmask against to see
+if the buffer was allocated or not and return -ENODEV if it is
+not.
+
+More updates were done to pass the -ENODEV back up to userspace.
+
+Link: http://lkml.kernel.org/r/5393DB61.6060707@oracle.com
+
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ring_buffer.h |    2 +-
+ kernel/trace/ring_buffer.c  |    5 ++++-
+ kernel/trace/trace.c        |   25 ++++++++++++++++++-------
+ kernel/trace/trace.h        |    4 ++--
+ 4 files changed, 25 insertions(+), 11 deletions(-)
+
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size,
+       __ring_buffer_alloc((size), (flags), &__key);   \
+ })
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+                         struct file *filp, poll_table *poll_table);
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct ir
+  * as data is added to any of the @buffer's cpu buffers. Otherwise
+  * it will wait for data to be added to a specific cpu buffer.
+  */
+-void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
++int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+ {
+       struct ring_buffer_per_cpu *cpu_buffer;
+       DEFINE_WAIT(wait);
+@@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer
+       if (cpu == RING_BUFFER_ALL_CPUS)
+               work = &buffer->irq_work;
+       else {
++              if (!cpumask_test_cpu(cpu, buffer->cpumask))
++                      return -ENODEV;
+               cpu_buffer = buffer->buffers[cpu];
+               work = &cpu_buffer->irq_work;
+       }
+@@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer
+               schedule();
+       finish_wait(&work->waiters, &wait);
++      return 0;
+ }
+ /**
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1103,13 +1103,13 @@ update_max_tr_single(struct trace_array
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+-static void default_wait_pipe(struct trace_iterator *iter)
++static int default_wait_pipe(struct trace_iterator *iter)
+ {
+       /* Iterators are static, they should be filled or empty */
+       if (trace_buffer_iter(iter, iter->cpu_file))
+-              return;
++              return 0;
+-      ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
++      return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+ }
+ #ifdef CONFIG_FTRACE_STARTUP_TEST
+@@ -4236,17 +4236,19 @@ tracing_poll_pipe(struct file *filp, pol
+  *
+  *     Anyway, this is really very primitive wakeup.
+  */
+-void poll_wait_pipe(struct trace_iterator *iter)
++int poll_wait_pipe(struct trace_iterator *iter)
+ {
+       set_current_state(TASK_INTERRUPTIBLE);
+       /* sleep for 100 msecs, and try again. */
+       schedule_timeout(HZ / 10);
++      return 0;
+ }
+ /* Must be called with trace_types_lock mutex held. */
+ static int tracing_wait_pipe(struct file *filp)
+ {
+       struct trace_iterator *iter = filp->private_data;
++      int ret;
+       while (trace_empty(iter)) {
+@@ -4256,10 +4258,13 @@ static int tracing_wait_pipe(struct file
+               mutex_unlock(&iter->mutex);
+-              iter->trace->wait_pipe(iter);
++              ret = iter->trace->wait_pipe(iter);
+               mutex_lock(&iter->mutex);
++              if (ret)
++                      return ret;
++
+               if (signal_pending(current))
+                       return -EINTR;
+@@ -5196,8 +5201,12 @@ tracing_buffers_read(struct file *filp,
+                               goto out_unlock;
+                       }
+                       mutex_unlock(&trace_types_lock);
+-                      iter->trace->wait_pipe(iter);
++                      ret = iter->trace->wait_pipe(iter);
+                       mutex_lock(&trace_types_lock);
++                      if (ret) {
++                              size = ret;
++                              goto out_unlock;
++                      }
+                       if (signal_pending(current)) {
+                               size = -EINTR;
+                               goto out_unlock;
+@@ -5407,8 +5416,10 @@ tracing_buffers_splice_read(struct file
+                       goto out;
+               }
+               mutex_unlock(&trace_types_lock);
+-              iter->trace->wait_pipe(iter);
++              ret = iter->trace->wait_pipe(iter);
+               mutex_lock(&trace_types_lock);
++              if (ret)
++                      goto out;
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       goto out;
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -342,7 +342,7 @@ struct tracer {
+       void                    (*stop)(struct trace_array *tr);
+       void                    (*open)(struct trace_iterator *iter);
+       void                    (*pipe_open)(struct trace_iterator *iter);
+-      void                    (*wait_pipe)(struct trace_iterator *iter);
++      int                     (*wait_pipe)(struct trace_iterator *iter);
+       void                    (*close)(struct trace_iterator *iter);
+       void                    (*pipe_close)(struct trace_iterator *iter);
+       ssize_t                 (*read)(struct trace_iterator *iter,
+@@ -560,7 +560,7 @@ void trace_init_global_iter(struct trace
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+-void poll_wait_pipe(struct trace_iterator *iter);
++int poll_wait_pipe(struct trace_iterator *iter);
+ void tracing_sched_switch_trace(struct trace_array *tr,
+                               struct task_struct *prev,
index 01e6ded304138a2ca65804bf7ebdc51539320f7c..5c08e9304139b1960dd68a46b1a1958ca928b4b5 100644 (file)
@@ -62,3 +62,5 @@ drm-i915-acer-c720-and-c720p-have-controllable-backlights.patch
 drm-i915-toshiba-cb35-has-a-controllable-backlight.patch
 drm-i915-don-t-clobber-the-gtt-when-it-s-within-stolen-memory.patch
 x86-64-espfix-don-t-leak-bits-31-16-of-esp-returning-to-16-bit-stack.patch
+dma-cma-fix-possible-memory-leak.patch
+ring-buffer-check-if-buffer-exists-before-polling.patch