From: Greg Kroah-Hartman Date: Fri, 31 Aug 2012 17:36:54 +0000 (-0700) Subject: 3.5-stable patches X-Git-Tag: v3.5.4~26 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c7e0dc9c9783172976a1313cb452878f514413f6;p=thirdparty%2Fkernel%2Fstable-queue.git 3.5-stable patches added patches: alsa-usb-audio-fix-scheduling-while-atomic-bug-in-pcm-capture-stream.patch sched-cgroup-fix-up-task_groups-list.patch sched-fix-divide-by-zero-at-thread_group-task-_times.patch uvcvideo-reset-the-bytesused-field-when-recycling-an-erroneous-buffer.patch --- diff --git a/queue-3.5/alsa-usb-audio-fix-scheduling-while-atomic-bug-in-pcm-capture-stream.patch b/queue-3.5/alsa-usb-audio-fix-scheduling-while-atomic-bug-in-pcm-capture-stream.patch new file mode 100644 index 00000000000..32c824f59d4 --- /dev/null +++ b/queue-3.5/alsa-usb-audio-fix-scheduling-while-atomic-bug-in-pcm-capture-stream.patch @@ -0,0 +1,53 @@ +From e9ba389c5ffc4dd29dfe17e00e48877302111135 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai +Date: Wed, 15 Aug 2012 12:32:00 +0200 +Subject: ALSA: usb-audio: Fix scheduling-while-atomic bug in PCM capture stream + +From: Takashi Iwai + +commit e9ba389c5ffc4dd29dfe17e00e48877302111135 upstream. + +A PCM capture stream on usb-audio causes a scheduling-while-atomic +BUG, as reported in the bugzilla entry below. It's because +snd_usb_endpoint_start() is called at first at trigger START for a +capture stream, and this function contains the left-over EP +deactivation codes. The problem doesn't happen for a playback stream +because the function is called at PCM prepare time, which can sleep. + +This patch fixes the BUG by moving the EP deactivation code into the +PCM prepare callback. + +Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=46011 +Signed-off-by: Takashi Iwai +Signed-off-by: Greg Kroah-Hartman + +--- + sound/usb/endpoint.c | 4 ---- + sound/usb/pcm.c | 3 +++ + 2 files changed, 3 insertions(+), 4 deletions(-) + +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -821,10 +821,6 @@ int snd_usb_endpoint_start(struct snd_us + if (++ep->use_count != 1) + return 0; + +- /* just to be sure */ +- deactivate_urbs(ep, 0, 1); +- wait_clear_urbs(ep); +- + ep->active_mask = 0; + ep->unlink_mask = 0; + ep->phase = 0; +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -544,6 +544,9 @@ static int snd_usb_pcm_prepare(struct sn + subs->last_frame_number = 0; + runtime->delay = 0; + ++ /* clear the pending deactivation on the target EPs */ ++ deactivate_endpoints(subs); ++ + /* for playback, submit the URBs now; otherwise, the first hwptr_done + * updates for all URBs would happen at the same time when starting */ + if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) diff --git a/queue-3.5/sched-cgroup-fix-up-task_groups-list.patch b/queue-3.5/sched-cgroup-fix-up-task_groups-list.patch new file mode 100644 index 00000000000..971ba033351 --- /dev/null +++ b/queue-3.5/sched-cgroup-fix-up-task_groups-list.patch @@ -0,0 +1,47 @@ +From 35cf4e50b16331def6cfcbee11e49270b6db07f5 Mon Sep 17 00:00:00 2001 +From: Mike Galbraith +Date: Tue, 7 Aug 2012 05:00:13 +0200 +Subject: sched,cgroup: Fix up task_groups list + +From: Mike Galbraith + +commit 35cf4e50b16331def6cfcbee11e49270b6db07f5 upstream. + +With multiple instances of task_groups, for_each_rt_rq() is a noop, +no task groups having been added to the rt.c list instance. This +renders __enable/disable_runtime() and print_rt_stats() noop, the +user (non) visible effect being that rt task groups are missing in +/proc/sched_debug. + +Signed-off-by: Mike Galbraith +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1344308413.6846.7.camel@marge.simpson.net +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/core.c | 1 + + kernel/sched/sched.h | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7179,6 +7179,7 @@ int in_sched_functions(unsigned long add + + #ifdef CONFIG_CGROUP_SCHED + struct task_group root_task_group; ++LIST_HEAD(task_groups); + #endif + + DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex; + struct cfs_rq; + struct rt_rq; + +-static LIST_HEAD(task_groups); ++extern struct list_head task_groups; + + struct cfs_bandwidth { + #ifdef CONFIG_CFS_BANDWIDTH diff --git a/queue-3.5/sched-fix-divide-by-zero-at-thread_group-task-_times.patch b/queue-3.5/sched-fix-divide-by-zero-at-thread_group-task-_times.patch new file mode 100644 index 00000000000..6510760222b --- /dev/null +++ b/queue-3.5/sched-fix-divide-by-zero-at-thread_group-task-_times.patch @@ -0,0 +1,111 @@ +From bea6832cc8c4a0a9a65dd17da6aaa657fe27bc3e Mon Sep 17 00:00:00 2001 +From: Stanislaw Gruszka +Date: Wed, 8 Aug 2012 11:27:15 +0200 +Subject: sched: fix divide by zero at {thread_group,task}_times + +From: Stanislaw Gruszka + +commit bea6832cc8c4a0a9a65dd17da6aaa657fe27bc3e upstream. + +On architectures where cputime_t is 64 bit type, is possible to trigger +divide by zero on do_div(temp, (__force u32) total) line, if total is a +non zero number but has lower 32 bit's zeroed. Removing casting is not +a good solution since some do_div() implementations do cast to u32 +internally. + +This problem can be triggered in practice on very long lived processes: + + PID: 2331 TASK: ffff880472814b00 CPU: 2 COMMAND: "oraagent.bin" + #0 [ffff880472a51b70] machine_kexec at ffffffff8103214b + #1 [ffff880472a51bd0] crash_kexec at ffffffff810b91c2 + #2 [ffff880472a51ca0] oops_end at ffffffff814f0b00 + #3 [ffff880472a51cd0] die at ffffffff8100f26b + #4 [ffff880472a51d00] do_trap at ffffffff814f03f4 + #5 [ffff880472a51d60] do_divide_error at ffffffff8100cfff + #6 [ffff880472a51e00] divide_error at ffffffff8100be7b + [exception RIP: thread_group_times+0x56] + RIP: ffffffff81056a16 RSP: ffff880472a51eb8 RFLAGS: 00010046 + RAX: bc3572c9fe12d194 RBX: ffff880874150800 RCX: 0000000110266fad + RDX: 0000000000000000 RSI: ffff880472a51eb8 RDI: 001038ae7d9633dc + RBP: ffff880472a51ef8 R8: 00000000b10a3a64 R9: ffff880874150800 + R10: 00007fcba27ab680 R11: 0000000000000202 R12: ffff880472a51f08 + R13: ffff880472a51f10 R14: 0000000000000000 R15: 0000000000000007 + ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 + #7 [ffff880472a51f00] do_sys_times at ffffffff8108845d + #8 [ffff880472a51f40] sys_times at ffffffff81088524 + #9 [ffff880472a51f80] system_call_fastpath at ffffffff8100b0f2 + RIP: 0000003808caac3a RSP: 00007fcba27ab6d8 RFLAGS: 00000202 + RAX: 0000000000000064 RBX: ffffffff8100b0f2 RCX: 0000000000000000 + RDX: 00007fcba27ab6e0 RSI: 000000000076d58e RDI: 00007fcba27ab6e0 + RBP: 00007fcba27ab700 R8: 0000000000000020 R9: 000000000000091b + R10: 00007fcba27ab680 R11: 0000000000000202 R12: 00007fff9ca41940 + R13: 0000000000000000 R14: 00007fcba27ac9c0 R15: 00007fff9ca41940 + ORIG_RAX: 0000000000000064 CS: 0033 SS: 002b + +Signed-off-by: Stanislaw Gruszka +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/20120808092714.GA3580@redhat.com +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/core.c | 34 ++++++++++++++++++++-------------- + 1 file changed, 20 insertions(+), 14 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_stru + # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) + #endif + ++static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) ++{ ++ u64 temp = (__force u64) rtime; ++ ++ temp *= (__force u64) utime; ++ ++ if (sizeof(cputime_t) == 4) ++ temp = div_u64(temp, (__force u32) total); ++ else ++ temp = div64_u64(temp, (__force u64) total); ++ ++ return (__force cputime_t) temp; ++} ++ + void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) + { + cputime_t rtime, utime = p->utime, total = utime + p->stime; +@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, c + */ + rtime = nsecs_to_cputime(p->se.sum_exec_runtime); + +- if (total) { +- u64 temp = (__force u64) rtime; +- +- temp *= (__force u64) utime; +- do_div(temp, (__force u32) total); +- utime = (__force cputime_t) temp; +- } else ++ if (total) ++ utime = scale_utime(utime, rtime, total); ++ else + utime = rtime; + + /* +@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_stru + total = cputime.utime + cputime.stime; + rtime = nsecs_to_cputime(cputime.sum_exec_runtime); + +- if (total) { +- u64 temp = (__force u64) rtime; +- +- temp *= (__force u64) cputime.utime; +- do_div(temp, (__force u32) total); +- utime = (__force cputime_t) temp; +- } else ++ if (total) ++ utime = scale_utime(cputime.utime, rtime, total); ++ else + utime = rtime; + + sig->prev_utime = max(sig->prev_utime, utime); diff --git a/queue-3.5/series b/queue-3.5/series index ff0f422f174..fdbcedba735 100644 --- a/queue-3.5/series +++ b/queue-3.5/series @@ -56,3 +56,7 @@ pci-ehci-fix-crash-during-hibernation-on-asus-computers.patch gma500-consider-crtc-initially-active.patch block-replace-__getblk_slow-misfix-by-grow_dev_page-fix.patch jbd-don-t-write-superblock-when-unmounting-an-ro-filesystem.patch +alsa-usb-audio-fix-scheduling-while-atomic-bug-in-pcm-capture-stream.patch +sched-cgroup-fix-up-task_groups-list.patch +sched-fix-divide-by-zero-at-thread_group-task-_times.patch +uvcvideo-reset-the-bytesused-field-when-recycling-an-erroneous-buffer.patch diff --git a/queue-3.5/uvcvideo-reset-the-bytesused-field-when-recycling-an-erroneous-buffer.patch b/queue-3.5/uvcvideo-reset-the-bytesused-field-when-recycling-an-erroneous-buffer.patch new file mode 100644 index 00000000000..9e8e364dfbc --- /dev/null +++ b/queue-3.5/uvcvideo-reset-the-bytesused-field-when-recycling-an-erroneous-buffer.patch @@ -0,0 +1,37 @@ +From 8a3f0ede2b3f5477122060af1a816c6bbf09fcd2 Mon Sep 17 00:00:00 2001 +From: Jayakrishnan Memana +Date: Sun, 15 Jul 2012 10:54:03 -0300 +Subject: [media] uvcvideo: Reset the bytesused field when recycling an erroneous buffer + +From: Jayakrishnan Memana + +commit 8a3f0ede2b3f5477122060af1a816c6bbf09fcd2 upstream. + +Buffers marked as erroneous are recycled immediately by the driver if +the nodrop module parameter isn't set. The buffer payload size is reset +to 0, but the buffer bytesused field isn't. This results in the buffer +being immediately considered as complete, leading to an infinite loop in +interrupt context. + +Fix the problem by resetting the bytesused field when recycling the +buffer. + +Signed-off-by: Jayakrishnan Memana +Signed-off-by: Laurent Pinchart +Signed-off-by: Mauro Carvalho Chehab +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/media/video/uvc/uvc_queue.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/media/video/uvc/uvc_queue.c ++++ b/drivers/media/video/uvc/uvc_queue.c +@@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer + if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { + buf->error = 0; + buf->state = UVC_BUF_STATE_QUEUED; ++ buf->bytesused = 0; + vb2_set_plane_payload(&buf->buf, 0, 0); + return buf; + }