--- /dev/null
+From 03a1cec1f17ac1a6041996b3e40f96b5a2f90e1b Mon Sep 17 00:00:00 2001
+From: Lino Sanfilippo <LinoSanfilippo@gmx.de>
+Date: Fri, 23 Mar 2012 02:42:23 +0100
+Subject: fanotify: dont merge permission events
+
+From: Lino Sanfilippo <LinoSanfilippo@gmx.de>
+
+commit 03a1cec1f17ac1a6041996b3e40f96b5a2f90e1b upstream.
+
+Boyd Yang reported a problem for the case that multiple threads of the same
+thread group are waiting for a reponse for a permission event.
+In this case it is possible that some of the threads are never woken up, even
+if the response for the event has been received
+(see http://marc.info/?l=linux-kernel&m=131822913806350&w=2).
+
+The reason is that we are currently merging permission events if they belong to
+the same thread group. But we are not prepared to wake up more than one waiter
+for each event. We do
+
+wait_event(group->fanotify_data.access_waitq, event->response ||
+ atomic_read(&group->fanotify_data.bypass_perm));
+and after that
+ event->response = 0;
+
+which is the reason that even if we woke up all waiters for the same event
+some of them may see event->response being already set 0 again, then go back to
+sleep and block forever.
+
+With this patch we avoid that more than one thread is waiting for a response
+by not merging permission events for the same thread group any more.
+
+Reported-by: Boyd Yang <boyd.yang@gmail.com>
+Signed-off-by: Lino Sanfilippo <LinoSanfilipp@gmx.de>
+Signed-off-by: Eric Paris <eparis@redhat.com>
+Cc: Mihai Donțu <mihai.dontu@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fanotify/fanotify.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
++#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
++ /* dont merge two permission events */
++ if ((old->mask & FAN_ALL_PERM_EVENTS) &&
++ (new->mask & FAN_ALL_PERM_EVENTS))
++ return false;
++#endif
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
--- /dev/null
+From 3f1f33206c16c7b3839d71372bc2ac3f305aa802 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Tue, 2 Oct 2012 15:38:52 +0200
+Subject: perf: Clarify perf_cpu_context::active_pmu usage by renaming
+ it to ::unique_pmu
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 3f1f33206c16c7b3839d71372bc2ac3f305aa802 upstream.
+
+Stephane thought the perf_cpu_context::active_pmu name confusing and
+suggested using 'unique_pmu' instead.
+
+This pointer is a pointer to a 'random' pmu sharing the cpuctx
+instance, therefore limiting a for_each_pmu loop to those where
+cpuctx->unique_pmu matches the pmu we get a loop over unique cpuctx
+instances.
+
+Suggested-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-kxyjqpfj2fn9gt7kwu5ag9ks@git.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/perf_event.h | 2 +-
+ kernel/events/core.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -939,7 +939,7 @@ struct perf_cpu_context {
+ int exclusive;
+ struct list_head rotation_list;
+ int jiffies_interval;
+- struct pmu *active_pmu;
++ struct pmu *unique_pmu;
+ struct perf_cgroup *cgrp;
+ };
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4579,7 +4579,7 @@ static void perf_event_task_event(struct
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+@@ -4725,7 +4725,7 @@ static void perf_event_comm_event(struct
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+@@ -4921,7 +4921,7 @@ got_name:
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+ vma->vm_flags & VM_EXEC);
+@@ -5947,8 +5947,8 @@ static void update_pmu_context(struct pm
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+- if (cpuctx->active_pmu == old_pmu)
+- cpuctx->active_pmu = pmu;
++ if (cpuctx->unique_pmu == old_pmu)
++ cpuctx->unique_pmu = pmu;
+ }
+ }
+
+@@ -6080,7 +6080,7 @@ skip_type:
+ cpuctx->ctx.pmu = pmu;
+ cpuctx->jiffies_interval = 1;
+ INIT_LIST_HEAD(&cpuctx->rotation_list);
+- cpuctx->active_pmu = pmu;
++ cpuctx->unique_pmu = pmu;
+ }
+
+ got_cpu_context:
--- /dev/null
+From 95cf59ea72331d0093010543b8951bb43f262cac Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Tue, 2 Oct 2012 15:41:23 +0200
+Subject: perf: Fix perf_cgroup_switch for sw-events
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 95cf59ea72331d0093010543b8951bb43f262cac upstream.
+
+Jiri reported that he could trigger the WARN_ON_ONCE() in
+perf_cgroup_switch() using sw-events. This is because sw-events share
+a cpuctx with multiple PMUs.
+
+Use the ->unique_pmu pointer to limit the pmu iteration to unique
+cpuctx instances.
+
+Reported-and-Tested-by: Jiri Olsa <jolsa@redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-so7wi2zf3jjzrwcutm2mkz0j@git.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -342,6 +342,8 @@ void perf_cgroup_switch(struct task_stru
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+
+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++ if (cpuctx->unique_pmu != pmu)
++ continue; /* ensure we process each cpuctx once */
+
+ perf_pmu_disable(cpuctx->ctx.pmu);
+
+@@ -365,9 +367,10 @@ void perf_cgroup_switch(struct task_stru
+
+ if (mode & PERF_CGROUP_SWIN) {
+ WARN_ON_ONCE(cpuctx->cgrp);
+- /* set cgrp before ctxsw in to
+- * allow event_filter_match() to not
+- * have to pass task around
++ /*
++ * set cgrp before ctxsw in to allow
++ * event_filter_match() to not have to pass
++ * task around
+ */
+ cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
drm-radeon-fix-handling-of-variable-sized-arrays-for-router-objects.patch
scsi-iscsi-don-t-hang-in-endless-loop-if-no-targets-present.patch
cgroup-fail-if-monitored-file-and-event_control-are-in-different-cgroup.patch
+perf-clarify-perf_cpu_context-active_pmu-usage-by-renaming.patch
+perf-fix-perf_cgroup_switch-for-sw-events.patch
+fanotify-dont-merge-permission-events.patch