--- /dev/null
+From 24261fc23db950951760d00c188ba63cc756b932 Mon Sep 17 00:00:00 2001
+From: Mateusz Guzik <mguzik@redhat.com>
+Date: Fri, 8 Mar 2013 16:30:03 +0100
+Subject: cifs: delay super block destruction until all cifsFileInfo objects are gone
+
+From: Mateusz Guzik <mguzik@redhat.com>
+
+commit 24261fc23db950951760d00c188ba63cc756b932 upstream.
+
+cifsFileInfo objects hold references to dentries and it is possible that
+these will still be around in workqueues when VFS decides to kill super
+block during unmount.
+
+This results in panics like this one:
+BUG: Dentry ffff88001f5e76c0{i=66b4a,n=1M-2} still in use (1) [unmount of cifs cifs]
+------------[ cut here ]------------
+kernel BUG at fs/dcache.c:943!
+[..]
+Process umount (pid: 1781, threadinfo ffff88003d6e8000, task ffff880035eeaec0)
+[..]
+Call Trace:
+ [<ffffffff811b44f3>] shrink_dcache_for_umount+0x33/0x60
+ [<ffffffff8119f7fc>] generic_shutdown_super+0x2c/0xe0
+ [<ffffffff8119f946>] kill_anon_super+0x16/0x30
+ [<ffffffffa036623a>] cifs_kill_sb+0x1a/0x30 [cifs]
+ [<ffffffff8119fcc7>] deactivate_locked_super+0x57/0x80
+ [<ffffffff811a085e>] deactivate_super+0x4e/0x70
+ [<ffffffff811bb417>] mntput_no_expire+0xd7/0x130
+ [<ffffffff811bc30c>] sys_umount+0x9c/0x3c0
+ [<ffffffff81657c19>] system_call_fastpath+0x16/0x1b
+
+Fix this by making each cifsFileInfo object hold a reference to cifs
+super block, which implicitly keeps VFS super block around as well.
+
+Signed-off-by: Mateusz Guzik <mguzik@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Reported-and-Tested-by: Ben Greear <greearb@candelatech.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+[xr: Backported to 3.4: adjust context]
+Signed-off-by: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/cifsfs.c | 24 ++++++++++++++++++++++++
+ fs/cifs/cifsfs.h | 4 ++++
+ fs/cifs/file.c | 6 +++++-
+ 3 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -87,6 +87,30 @@ extern mempool_t *cifs_mid_poolp;
+
+ struct workqueue_struct *cifsiod_wq;
+
++/*
++ * Bumps refcount for cifs super block.
++ * Note that it should be only called if a referece to VFS super block is
++ * already held, e.g. in open-type syscalls context. Otherwise it can race with
++ * atomic_dec_and_test in deactivate_locked_super.
++ */
++void
++cifs_sb_active(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_inc_return(&server->active) == 1)
++ atomic_inc(&sb->s_active);
++}
++
++void
++cifs_sb_deactive(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_dec_and_test(&server->active))
++ deactivate_super(sb);
++}
++
+ static int
+ cifs_read_super(struct super_block *sb)
+ {
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_t
+ extern const struct address_space_operations cifs_addr_ops;
+ extern const struct address_space_operations cifs_addr_ops_smallbuf;
+
++/* Functions related to super block operations */
++extern void cifs_sb_active(struct super_block *sb);
++extern void cifs_sb_deactive(struct super_block *sb);
++
+ /* Functions related to inodes */
+ extern const struct inode_operations cifs_dir_inode_ops;
+ extern struct inode *cifs_root_iget(struct super_block *);
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -265,6 +265,8 @@ cifs_new_fileinfo(__u16 fileHandle, stru
+ mutex_init(&pCifsFile->fh_mutex);
+ INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+
++ cifs_sb_active(inode->i_sb);
++
+ spin_lock(&cifs_file_list_lock);
+ list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
+ /* if readable file instance put first in list*/
+@@ -293,7 +295,8 @@ void cifsFileInfo_put(struct cifsFileInf
+ struct inode *inode = cifs_file->dentry->d_inode;
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct super_block *sb = inode->i_sb;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsLockInfo *li, *tmp;
+
+ spin_lock(&cifs_file_list_lock);
+@@ -345,6 +348,7 @@ void cifsFileInfo_put(struct cifsFileInf
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
++ cifs_sb_deactive(sb);
+ kfree(cifs_file);
+ }
+
--- /dev/null
+From 06335856d22eeef4862c539920137e348a9e620d Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilovsky@etersoft.ru>
+Date: Wed, 29 Aug 2012 21:13:38 +0400
+Subject: CIFS: Fix error handling in cifs_push_mandatory_locks
+
+From: Pavel Shilovsky <pshilovsky@etersoft.ru>
+
+commit e2f2886a824ff0a56da1eaa13019fde86aa89fa6 upstream.
+
+Signed-off-by: Pavel Shilovsky <pshilovsky@etersoft.ru>
+Signed-off-by: Steve French <smfrench@gmail.com>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -882,7 +882,7 @@ cifs_push_mandatory_locks(struct cifsFil
+ if (!buf) {
+ mutex_unlock(&cinode->lock_mutex);
+ FreeXid(xid);
+- return rc;
++ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; i++) {
--- /dev/null
+From 640c4ad6d759b60a64049ff46c9acff5954f18d6 Mon Sep 17 00:00:00 2001
+From: "Geyslan G. Bem" <geyslan@gmail.com>
+Date: Fri, 11 Oct 2013 16:49:16 -0300
+Subject: ecryptfs: Fix memory leakage in keystore.c
+
+From: "Geyslan G. Bem" <geyslan@gmail.com>
+
+commit 3edc8376c06133e3386265a824869cad03a4efd4 upstream.
+
+In 'decrypt_pki_encrypted_session_key' function:
+
+Initializes 'payload' pointer and releases it on exit.
+
+Signed-off-by: Geyslan G. Bem <geyslan@gmail.com>
+Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ecryptfs/keystore.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct
+ struct ecryptfs_msg_ctx *msg_ctx;
+ struct ecryptfs_message *msg = NULL;
+ char *auth_tok_sig;
+- char *payload;
++ char *payload = NULL;
+ size_t payload_len;
+ int rc;
+
+@@ -1204,6 +1204,7 @@ decrypt_pki_encrypted_session_key(struct
+ out:
+ if (msg)
+ kfree(msg);
++ kfree(payload);
+ return rc;
+ }
+
--- /dev/null
+From 55a058d4a89c4ef33294f9758559f0704d347738 Mon Sep 17 00:00:00 2001
+From: Justin Lecher <jlec@gentoo.org>
+Date: Mon, 30 Jul 2012 14:42:53 -0700
+Subject: fs: cachefiles: add support for large files in filesystem caching
+
+From: Justin Lecher <jlec@gentoo.org>
+
+commit 98c350cda2c14a343d34ea01a3d9c24fea5ec66d upstream.
+
+Support the caching of large files.
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=31182
+
+Signed-off-by: Justin Lecher <jlec@gentoo.org>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.com>
+Tested-by: Suresh Jayaraman <sjayaraman@suse.com>
+Acked-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.2:
+ - Adjust context
+ - dentry_open() takes dentry and vfsmount pointers, not a path pointer]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cachefiles/rdwr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -918,7 +918,7 @@ int cachefiles_write_page(struct fscache
+ * own time */
+ dget(object->backer);
+ mntget(cache->mnt);
+- file = dentry_open(object->backer, cache->mnt, O_RDWR,
++ file = dentry_open(object->backer, cache->mnt, O_RDWR | O_LARGEFILE,
+ cache->cache_cred);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
--- /dev/null
+From 8c4f3c3fa9681dc549cd35419b259496082fef8b Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 30 Jul 2013 00:04:32 -0400
+Subject: ftrace: Check module functions being traced on reload
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 8c4f3c3fa9681dc549cd35419b259496082fef8b upstream.
+
+There's been a nasty bug that would show up and not give much info.
+The bug displayed the following warning:
+
+ WARNING: at kernel/trace/ftrace.c:1529 __ftrace_hash_rec_update+0x1e3/0x230()
+ Pid: 20903, comm: bash Tainted: G O 3.6.11+ #38405.trunk
+ Call Trace:
+ [<ffffffff8103e5ff>] warn_slowpath_common+0x7f/0xc0
+ [<ffffffff8103e65a>] warn_slowpath_null+0x1a/0x20
+ [<ffffffff810c2ee3>] __ftrace_hash_rec_update+0x1e3/0x230
+ [<ffffffff810c4f28>] ftrace_hash_move+0x28/0x1d0
+ [<ffffffff811401cc>] ? kfree+0x2c/0x110
+ [<ffffffff810c68ee>] ftrace_regex_release+0x8e/0x150
+ [<ffffffff81149f1e>] __fput+0xae/0x220
+ [<ffffffff8114a09e>] ____fput+0xe/0x10
+ [<ffffffff8105fa22>] task_work_run+0x72/0x90
+ [<ffffffff810028ec>] do_notify_resume+0x6c/0xc0
+ [<ffffffff8126596e>] ? trace_hardirqs_on_thunk+0x3a/0x3c
+ [<ffffffff815c0f88>] int_signal+0x12/0x17
+ ---[ end trace 793179526ee09b2c ]---
+
+It was finally narrowed down to unloading a module that was being traced.
+
+It was actually more than that. When functions are being traced, there's
+a table of all functions that have a ref count of the number of active
+tracers attached to that function. When a function trace callback is
+registered to a function, the function's record ref count is incremented.
+When it is unregistered, the function's record ref count is decremented.
+If an inconsistency is detected (ref count goes below zero) the above
+warning is shown and the function tracing is permanently disabled until
+reboot.
+
+The ftrace callback ops holds a hash of functions that it filters on
+(and/or filters off). If the hash is empty, the default means to filter
+all functions (for the filter_hash) or to disable no functions (for the
+notrace_hash).
+
+When a module is unloaded, it frees the function records that represent
+the module functions. These records exist on their own pages, that is
+function records for one module will not exist on the same page as
+function records for other modules or even the core kernel.
+
+Now when a module unloads, the records that represents its functions are
+freed. When the module is loaded again, the records are recreated with
+a default ref count of zero (unless there's a callback that traces all
+functions, then they will also be traced, and the ref count will be
+incremented).
+
+The problem is that if an ftrace callback hash includes functions of the
+module being unloaded, those hash entries will not be removed. If the
+module is reloaded in the same location, the hash entries still point
+to the functions of the module but the module's ref counts do not reflect
+that.
+
+With the help of Steve and Joern, we found a reproducer:
+
+ Using uinput module and uinput_release function.
+
+ cd /sys/kernel/debug/tracing
+ modprobe uinput
+ echo uinput_release > set_ftrace_filter
+ echo function > current_tracer
+ rmmod uinput
+ modprobe uinput
+ # check /proc/modules to see if loaded in same addr, otherwise try again
+ echo nop > current_tracer
+
+ [BOOM]
+
+The above loads the uinput module, which creates a table of functions that
+can be traced within the module.
+
+We add uinput_release to the filter_hash to trace just that function.
+
+Enable function tracincg, which increments the ref count of the record
+associated to uinput_release.
+
+Remove uinput, which frees the records including the one that represents
+uinput_release.
+
+Load the uinput module again (and make sure it's at the same address).
+This recreates the function records all with a ref count of zero,
+including uinput_release.
+
+Disable function tracing, which will decrement the ref count for uinput_release
+which is now zero because of the module removal and reload, and we have
+a mismatch (below zero ref count).
+
+The solution is to check all currently tracing ftrace callbacks to see if any
+are tracing any of the module's functions when a module is loaded (it already does
+that with callbacks that trace all functions). If a callback happens to have
+a module function being traced, it increments that records ref count and starts
+tracing that function.
+
+There may be a strange side effect with this, where tracing module functions
+on unload and then reloading a new module may have that new module's functions
+being traced. This may be something that confuses the user, but it's not
+a big deal. Another approach is to disable all callback hashes on module unload,
+but this leaves some ftrace callbacks that may not be registered, but can
+still have hashes tracing the module's function where ftrace doesn't know about
+it. That situation can cause the same bug. This solution solves that case too.
+Another benefit of this solution, is it is possible to trace a module's
+function on unload and load.
+
+Link: http://lkml.kernel.org/r/20130705142629.GA325@redhat.com
+
+Reported-by: Jörn Engel <joern@logfs.org>
+Reported-by: Dave Jones <davej@redhat.com>
+Reported-by: Steve Hodgson <steve@purestorage.com>
+Tested-by: Steve Hodgson <steve@purestorage.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 71 +++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 62 insertions(+), 9 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2080,12 +2080,57 @@ static cycle_t ftrace_update_time;
+ static unsigned long ftrace_update_cnt;
+ unsigned long ftrace_update_tot_cnt;
+
+-static int ops_traces_mod(struct ftrace_ops *ops)
++static inline int ops_traces_mod(struct ftrace_ops *ops)
+ {
+- struct ftrace_hash *hash;
++ /*
++ * Filter_hash being empty will default to trace module.
++ * But notrace hash requires a test of individual module functions.
++ */
++ return ftrace_hash_empty(ops->filter_hash) &&
++ ftrace_hash_empty(ops->notrace_hash);
++}
++
++/*
++ * Check if the current ops references the record.
++ *
++ * If the ops traces all functions, then it was already accounted for.
++ * If the ops does not trace the current record function, skip it.
++ * If the ops ignores the function via notrace filter, skip it.
++ */
++static inline bool
++ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
++{
++ /* If ops isn't enabled, ignore it */
++ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
++ return 0;
++
++ /* If ops traces all mods, we already accounted for it */
++ if (ops_traces_mod(ops))
++ return 0;
++
++ /* The function must be in the filter */
++ if (!ftrace_hash_empty(ops->filter_hash) &&
++ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
++ return 0;
++
++ /* If in notrace hash, we ignore it too */
++ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
++ return 0;
++
++ return 1;
++}
++
++static int referenced_filters(struct dyn_ftrace *rec)
++{
++ struct ftrace_ops *ops;
++ int cnt = 0;
+
+- hash = ops->filter_hash;
+- return ftrace_hash_empty(hash);
++ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
++ if (ops_references_rec(ops, rec))
++ cnt++;
++ }
++
++ return cnt;
+ }
+
+ static int ftrace_update_code(struct module *mod)
+@@ -2094,6 +2139,7 @@ static int ftrace_update_code(struct mod
+ struct dyn_ftrace *p;
+ cycle_t start, stop;
+ unsigned long ref = 0;
++ bool test = false;
+ int i;
+
+ /*
+@@ -2107,9 +2153,12 @@ static int ftrace_update_code(struct mod
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next) {
+- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
+- ops_traces_mod(ops))
+- ref++;
++ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
++ if (ops_traces_mod(ops))
++ ref++;
++ else
++ test = true;
++ }
+ }
+ }
+
+@@ -2119,12 +2168,16 @@ static int ftrace_update_code(struct mod
+ for (pg = ftrace_new_pgs; pg; pg = pg->next) {
+
+ for (i = 0; i < pg->index; i++) {
++ int cnt = ref;
++
+ /* If something went wrong, bail without enabling anything */
+ if (unlikely(ftrace_disabled))
+ return -1;
+
+ p = &pg->records[i];
+- p->flags = ref;
++ if (test)
++ cnt += referenced_filters(p);
++ p->flags = cnt;
+
+ /*
+ * Do the initial record conversion from mcount jump
+@@ -2144,7 +2197,7 @@ static int ftrace_update_code(struct mod
+ * conversion puts the module to the correct state, thus
+ * passing the ftrace_make_call check.
+ */
+- if (ftrace_start_up && ref) {
++ if (ftrace_start_up && cnt) {
+ int failed = __ftrace_replace_code(p, 1);
+ if (failed)
+ ftrace_bug(failed, p->ip);
--- /dev/null
+From c481420248c6730246d2a1b1773d5d7007ae0835 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Date: Fri, 12 Apr 2013 11:05:54 +0800
+Subject: perf: Fix error return code
+
+From: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+
+commit c481420248c6730246d2a1b1773d5d7007ae0835 upstream.
+
+Fix to return -ENOMEM in the allocation error case instead of 0
+(if pmu_bus_running == 1), as done elsewhere in this function.
+
+Signed-off-by: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Cc: a.p.zijlstra@chello.nl
+Cc: paulus@samba.org
+Cc: acme@ghostprotocols.net
+Link: http://lkml.kernel.org/r/CAPgLHd8j_fWcgqe%3DKLWjpBj%2B%3Do0Pw6Z-SEq%3DNTPU08c2w1tngQ@mail.gmail.com
+[ Tweaked the error code setting placement and the changelog. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5871,6 +5871,7 @@ skip_type:
+ if (pmu->pmu_cpu_context)
+ goto got_cpu_context;
+
++ ret = -ENOMEM;
+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+ if (!pmu->pmu_cpu_context)
+ goto free_dev;
--- /dev/null
+From d08b0a5594dde8b0fbda5d38cb01a81954a9829e Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 28 Oct 2013 13:55:29 +0100
+Subject: perf: Fix perf ring buffer memory ordering
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit bf378d341e4873ed928dc3c636252e6895a21f50 upstream.
+
+The PPC64 people noticed a missing memory barrier and crufty old
+comments in the perf ring buffer code. So update all the comments and
+add the missing barrier.
+
+When the architecture implements local_t using atomic_long_t there
+will be double barriers issued; but short of introducing more
+conditional barrier primitives this is the best we can do.
+
+Reported-by: Victor Kaplansky <victork@il.ibm.com>
+Tested-by: Victor Kaplansky <victork@il.ibm.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+Cc: michael@ellerman.id.au
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Michael Neuling <mikey@neuling.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: anton@samba.org
+Cc: benh@kernel.crashing.org
+Link: http://lkml.kernel.org/r/20131025173749.GG19466@laptop.lan
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[bwh: Backported to 3.2: adjust filename]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/perf_event.h | 12 +++++++-----
+ kernel/events/ring_buffer.c | 31 +++++++++++++++++++++++++++----
+ 2 files changed, 34 insertions(+), 9 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -391,13 +391,15 @@ struct perf_event_mmap_page {
+ /*
+ * Control data for the mmap() data buffer.
+ *
+- * User-space reading the @data_head value should issue an rmb(), on
+- * SMP capable platforms, after reading this value -- see
+- * perf_event_wakeup().
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+- * written by userspace to reflect the last read data. In this case
+- * the kernel will not over-write unread data.
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -75,10 +75,31 @@ again:
+ goto out;
+
+ /*
+- * Publish the known good head. Rely on the full barrier implied
+- * by atomic_dec_and_test() order the rb->head read and this
+- * write.
++ * Since the mmap() consumer (userspace) can run on a different CPU:
++ *
++ * kernel user
++ *
++ * READ ->data_tail READ ->data_head
++ * smp_mb() (A) smp_rmb() (C)
++ * WRITE $data READ $data
++ * smp_wmb() (B) smp_mb() (D)
++ * STORE ->data_head WRITE ->data_tail
++ *
++ * Where A pairs with D, and B pairs with C.
++ *
++ * I don't think A needs to be a full barrier because we won't in fact
++ * write data until we see the store from userspace. So we simply don't
++ * issue the data WRITE until we observe it. Be conservative for now.
++ *
++ * OTOH, D needs to be a full barrier since it separates the data READ
++ * from the tail WRITE.
++ *
++ * For B a WMB is sufficient since it separates two WRITEs, and for C
++ * an RMB is sufficient since it separates two READs.
++ *
++ * See perf_output_begin().
+ */
++ smp_wmb();
+ rb->user_page->data_head = head;
+
+ /*
+@@ -142,9 +163,11 @@ int perf_output_begin(struct perf_output
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
++ *
++ * See perf_output_put_handle().
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+- smp_rmb();
++ smp_mb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))
--- /dev/null
+From fd9b86d37a600488dbd80fe60cca46b822bff1cd Mon Sep 17 00:00:00 2001
+From: libin <huawei.libin@huawei.com>
+Date: Mon, 8 Apr 2013 14:39:12 +0800
+Subject: sched/debug: Fix sd->*_idx limit range avoiding overflow
+
+From: libin <huawei.libin@huawei.com>
+
+commit fd9b86d37a600488dbd80fe60cca46b822bff1cd upstream.
+
+Commit 201c373e8e ("sched/debug: Limit sd->*_idx range on
+sysctl") was an incomplete bug fix.
+
+This patch fixes sd->*_idx limit range to [0 ~ CPU_LOAD_IDX_MAX-1]
+avoiding array overflow caused by setting sd->*_idx to CPU_LOAD_IDX_MAX
+on sysctl.
+
+Signed-off-by: Libin <huawei.libin@huawei.com>
+Cc: <jiang.liu@huawei.com>
+Cc: <guohanjun@huawei.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/51626610.2040607@huawei.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5434,7 +5434,7 @@ static void sd_free_ctl_entry(struct ctl
+ }
+
+ static int min_load_idx = 0;
+-static int max_load_idx = CPU_LOAD_IDX_MAX;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+
+ static void
+ set_table_entry(struct ctl_table *entry,
--- /dev/null
+From 201c373e8e4823700d3160d5c28e1ab18fd1193e Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung.kim@lge.com>
+Date: Thu, 16 Aug 2012 17:03:24 +0900
+Subject: sched/debug: Limit sd->*_idx range on sysctl
+
+From: Namhyung Kim <namhyung.kim@lge.com>
+
+commit 201c373e8e4823700d3160d5c28e1ab18fd1193e upstream.
+
+Various sd->*_idx's are used for refering the rq's load average table
+when selecting a cpu to run. However they can be set to any number
+with sysctl knobs so that it can crash the kernel if something bad is
+given. Fix it by limiting them into the actual range.
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1345104204-8317-1-git-send-email-namhyung@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c | 35 ++++++++++++++++++++++-------------
+ 1 file changed, 22 insertions(+), 13 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5433,16 +5433,25 @@ static void sd_free_ctl_entry(struct ctl
+ *tablep = NULL;
+ }
+
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX;
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+- umode_t mode, proc_handler *proc_handler)
++ umode_t mode, proc_handler *proc_handler,
++ bool load_idx)
+ {
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
+ }
+
+ static struct ctl_table *
+@@ -5454,30 +5463,30 @@ sd_alloc_ctl_domain_table(struct sched_d
+ return NULL;
+
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[10], "flags", &sd->flags,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[11], "name", sd->name,
+- CORENAME_MAX_SIZE, 0444, proc_dostring);
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ /* &table[12] is terminator */
+
+ return table;
acpi-blacklist-add-dmi_enable_osi_linux-quirk-for-asus-eee-pc-1015px.patch
i2c-designware-mask-all-interrupts-during-i2c-controller-enable.patch
crypto-caam-add-allocation-failure-handling-in-sprintfcat-macro.patch
+setfacl-removes-part-of-acl-when-setting-posix-acls-to-samba.patch
+cifs-fix-error-handling-in-cifs_push_mandatory_locks.patch
+ecryptfs-fix-memory-leakage-in-keystore.c.patch
+fs-cachefiles-add-support-for-large-files-in-filesystem-caching.patch
+perf-fix-perf-ring-buffer-memory-ordering.patch
+ftrace-check-module-functions-being-traced-on-reload.patch
+sched-debug-limit-sd-_idx-range-on-sysctl.patch
+sched-debug-fix-sd-_idx-limit-range-avoiding-overflow.patch
+perf-fix-error-return-code.patch
+tracing-keep-overwrite-in-sync-between-regular-and-snapshot-buffers.patch
+vfs-make-vfs_fstat-use-f_light.patch
+cifs-delay-super-block-destruction-until-all-cifsfileinfo-objects-are-gone.patch
--- /dev/null
+From baf48b7f96f5cd7f8b510dba67bcbdf96df296ff Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Fri, 15 Nov 2013 20:41:32 -0600
+Subject: setfacl removes part of ACL when setting POSIX ACLs to Samba
+
+From: Steve French <smfrench@gmail.com>
+
+commit b1d93356427be6f050dc55c86eb019d173700af6 upstream.
+
+setfacl over cifs mounts can remove the default ACL when setting the
+(non-default part of) the ACL and vice versa (we were leaving at 0
+rather than setting to -1 the count field for the unaffected
+half of the ACL. For example notice the setfacl removed
+the default ACL in this sequence:
+
+steven@steven-GA-970A-DS3:~/cifs-2.6$ getfacl /mnt/test-dir ; setfacl
+-m default:user:test:rwx,user:test:rwx /mnt/test-dir
+getfacl: Removing leading '/' from absolute path names
+user::rwx
+group::r-x
+other::r-x
+default:user::rwx
+default:user:test:rwx
+default:group::r-x
+default:mask::rwx
+default:other::r-x
+
+steven@steven-GA-970A-DS3:~/cifs-2.6$ getfacl /mnt/test-dir
+getfacl: Removing leading '/' from absolute path names
+user::rwx
+user:test:rwx
+group::r-x
+mask::rwx
+other::r-x
+
+Signed-off-by: Steve French <smfrench@gmail.com>
+Acked-by: Jeremy Allison <jra@samba.org>
+[bwh: Backported to 3.2: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifssmb.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3454,11 +3454,13 @@ static __u16 ACL_to_cifs_posix(char *par
+ return 0;
+ }
+ cifs_acl->version = cpu_to_le16(1);
+- if (acl_type == ACL_TYPE_ACCESS)
++ if (acl_type == ACL_TYPE_ACCESS) {
+ cifs_acl->access_entry_count = cpu_to_le16(count);
+- else if (acl_type == ACL_TYPE_DEFAULT)
++ cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else if (acl_type == ACL_TYPE_DEFAULT) {
+ cifs_acl->default_entry_count = cpu_to_le16(count);
+- else {
++ cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else {
+ cFYI(1, "unknown ACL type %d", acl_type);
+ return 0;
+ }
--- /dev/null
+From 80902822658aab18330569587cdb69ac1dfdcea8 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Thu, 14 Mar 2013 14:20:54 -0400
+Subject: tracing: Keep overwrite in sync between regular and snapshot buffers
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 80902822658aab18330569587cdb69ac1dfdcea8 upstream.
+
+Changing the overwrite mode for the ring buffer via the trace
+option only sets the normal buffer. But the snapshot buffer could
+swap with it, and then the snapshot would be in non overwrite mode
+and the normal buffer would be in overwrite mode, even though the
+option flag states otherwise.
+
+Keep the two buffers overwrite modes in sync.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2782,8 +2782,12 @@ int set_tracer_flag(unsigned int mask, i
+ if (mask == TRACE_ITER_RECORD_CMD)
+ trace_event_enable_cmd_record(enabled);
+
+- if (mask == TRACE_ITER_OVERWRITE)
++ if (mask == TRACE_ITER_OVERWRITE) {
+ ring_buffer_change_overwrite(global_trace.buffer, enabled);
++#ifdef CONFIG_TRACER_MAX_TRACE
++ ring_buffer_change_overwrite(max_tr.buffer, enabled);
++#endif
++ }
+
+ return 0;
+ }
--- /dev/null
+From e994defb7b6813ba6fa7a2a36e86d2455ad1dc35 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 28 Apr 2012 14:55:17 -0700
+Subject: VFS: make vfs_fstat() use f[get|put]_light()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit e994defb7b6813ba6fa7a2a36e86d2455ad1dc35 upstream.
+
+Use the *_light() versions that properly avoid doing the file user count
+updates when they are unnecessary.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[xr: Backported to 3.4: adjust function name]
+Signed-off-by: Rui Xiang <rui.xiang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/stat.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -57,12 +57,13 @@ EXPORT_SYMBOL(vfs_getattr);
+
+ int vfs_fstat(unsigned int fd, struct kstat *stat)
+ {
+- struct file *f = fget_raw(fd);
++ int fput_needed;
++ struct file *f = fget_light(fd, &fput_needed);
+ int error = -EBADF;
+
+ if (f) {
+ error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
+- fput(f);
++ fput_light(f, fput_needed);
+ }
+ return error;
+ }