--- /dev/null
+From d5751469f210d2149cc2159ffff66cbeef6da3f2 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastry@etersoft.ru>
+Date: Mon, 5 Mar 2012 09:39:20 +0300
+Subject: CIFS: Do not kmalloc under the flocks spinlock
+
+From: Pavel Shilovsky <piastry@etersoft.ru>
+
+commit d5751469f210d2149cc2159ffff66cbeef6da3f2 upstream.
+
+Reorganize the code to make the memory already allocated before
+spinlock'ed loop.
+
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 56 insertions(+), 13 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFil
+ for (lockp = &inode->i_flock; *lockp != NULL; \
+ lockp = &(*lockp)->fl_next)
+
++struct lock_to_push {
++ struct list_head llist;
++ __u64 offset;
++ __u64 length;
++ __u32 pid;
++ __u16 netfid;
++ __u8 type;
++};
++
+ static int
+ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ {
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ struct file_lock *flock, **before;
+- struct cifsLockInfo *lck, *tmp;
++ unsigned int count = 0, i = 0;
+ int rc = 0, xid, type;
++ struct list_head locks_to_send, *el;
++ struct lock_to_push *lck, *tmp;
+ __u64 length;
+- struct list_head locks_to_send;
+
+ xid = GetXid();
+
+@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInf
+ return rc;
+ }
+
++ lock_flocks();
++ cifs_for_each_lock(cfile->dentry->d_inode, before) {
++ if ((*before)->fl_flags & FL_POSIX)
++ count++;
++ }
++ unlock_flocks();
++
+ INIT_LIST_HEAD(&locks_to_send);
+
++ /*
++ * Allocating count locks is enough because no locks can be added to
++ * the list while we are holding cinode->lock_mutex that protects
++ * locking operations of this inode.
++ */
++ for (; i < count; i++) {
++ lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
++ if (!lck) {
++ rc = -ENOMEM;
++ goto err_out;
++ }
++ list_add_tail(&lck->llist, &locks_to_send);
++ }
++
++ i = 0;
++ el = locks_to_send.next;
+ lock_flocks();
+ cifs_for_each_lock(cfile->dentry->d_inode, before) {
++ if (el == &locks_to_send) {
++ /* something is really wrong */
++ cERROR(1, "Can't push all brlocks!");
++ break;
++ }
+ flock = *before;
++ if ((flock->fl_flags & FL_POSIX) == 0)
++ continue;
+ length = 1 + flock->fl_end - flock->fl_start;
+ if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
+ type = CIFS_RDLCK;
+ else
+ type = CIFS_WRLCK;
+-
+- lck = cifs_lock_init(flock->fl_start, length, type,
+- cfile->netfid);
+- if (!lck) {
+- rc = -ENOMEM;
+- goto send_locks;
+- }
++ lck = list_entry(el, struct lock_to_push, llist);
+ lck->pid = flock->fl_pid;
+-
+- list_add_tail(&lck->llist, &locks_to_send);
++ lck->netfid = cfile->netfid;
++ lck->length = length;
++ lck->type = type;
++ lck->offset = flock->fl_start;
++ i++;
++ el = el->next;
+ }
+-
+-send_locks:
+ unlock_flocks();
+
+ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+@@ -979,11 +1015,18 @@ send_locks:
+ kfree(lck);
+ }
+
++out:
+ cinode->can_cache_brlcks = false;
+ mutex_unlock(&cinode->lock_mutex);
+
+ FreeXid(xid);
+ return rc;
++err_out:
++ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
++ list_del(&lck->llist);
++ kfree(lck);
++ }
++ goto out;
+ }
+
+ static int
--- /dev/null
+From 87e24f4b67e68d9fd8df16e0bf9c66d1ad2a2533 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Mon, 5 Mar 2012 23:59:25 +0100
+Subject: perf/x86: Fix local vs remote memory events for NHM/WSM
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 87e24f4b67e68d9fd8df16e0bf9c66d1ad2a2533 upstream.
+
+Verified using the below proglet.. before:
+
+[root@westmere ~]# perf stat -e node-stores -e node-store-misses ./numa 0
+remote write
+
+ Performance counter stats for './numa 0':
+
+ 2,101,554 node-stores
+ 2,096,931 node-store-misses
+
+ 5.021546079 seconds time elapsed
+
+[root@westmere ~]# perf stat -e node-stores -e node-store-misses ./numa 1
+local write
+
+ Performance counter stats for './numa 1':
+
+ 501,137 node-stores
+ 199 node-store-misses
+
+ 5.124451068 seconds time elapsed
+
+After:
+
+[root@westmere ~]# perf stat -e node-stores -e node-store-misses ./numa 0
+remote write
+
+ Performance counter stats for './numa 0':
+
+ 2,107,516 node-stores
+ 2,097,187 node-store-misses
+
+ 5.012755149 seconds time elapsed
+
+[root@westmere ~]# perf stat -e node-stores -e node-store-misses ./numa 1
+local write
+
+ Performance counter stats for './numa 1':
+
+ 2,063,355 node-stores
+ 165 node-store-misses
+
+ 5.082091494 seconds time elapsed
+
+#define _GNU_SOURCE
+
+#include <sched.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <signal.h>
+#include <unistd.h>
+#include <numaif.h>
+#include <stdlib.h>
+
+#define SIZE (32*1024*1024)
+
+volatile int done;
+
+void sig_done(int sig)
+{
+ done = 1;
+}
+
+int main(int argc, char **argv)
+{
+ cpu_set_t *mask, *mask2;
+ size_t size;
+ int i, err, t;
+ int nrcpus = 1024;
+ char *mem;
+ unsigned long nodemask = 0x01; /* node 0 */
+ DIR *node;
+ struct dirent *de;
+ int read = 0;
+ int local = 0;
+
+ if (argc < 2) {
+ printf("usage: %s [0-3]\n", argv[0]);
+ printf(" bit0 - local/remote\n");
+ printf(" bit1 - read/write\n");
+ exit(0);
+ }
+
+ switch (atoi(argv[1])) {
+ case 0:
+ printf("remote write\n");
+ break;
+ case 1:
+ printf("local write\n");
+ local = 1;
+ break;
+ case 2:
+ printf("remote read\n");
+ read = 1;
+ break;
+ case 3:
+ printf("local read\n");
+ local = 1;
+ read = 1;
+ break;
+ }
+
+ mask = CPU_ALLOC(nrcpus);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, mask);
+
+ node = opendir("/sys/devices/system/node/node0/");
+ if (!node)
+ perror("opendir");
+ while ((de = readdir(node))) {
+ int cpu;
+
+ if (sscanf(de->d_name, "cpu%d", &cpu) == 1)
+ CPU_SET_S(cpu, size, mask);
+ }
+ closedir(node);
+
+ mask2 = CPU_ALLOC(nrcpus);
+ CPU_ZERO_S(size, mask2);
+ for (i = 0; i < size; i++)
+ CPU_SET_S(i, size, mask2);
+ CPU_XOR_S(size, mask2, mask2, mask); // invert
+
+ if (!local)
+ mask = mask2;
+
+ err = sched_setaffinity(0, size, mask);
+ if (err)
+ perror("sched_setaffinity");
+
+ mem = mmap(0, SIZE, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ err = mbind(mem, SIZE, MPOL_BIND, &nodemask, 8*sizeof(nodemask), MPOL_MF_MOVE);
+ if (err)
+ perror("mbind");
+
+ signal(SIGALRM, sig_done);
+ alarm(5);
+
+ if (!read) {
+ while (!done) {
+ for (i = 0; i < SIZE; i++)
+ mem[i] = 0x01;
+ }
+ } else {
+ while (!done) {
+ for (i = 0; i < SIZE; i++)
+ t += *(volatile char *)(mem + i);
+ }
+ }
+
+ return 0;
+}
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Stephane Eranian <eranian@google.com>
+Link: http://lkml.kernel.org/n/tip-tq73sxus35xmqpojf7ootxgs@git.kernel.org
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -389,14 +389,15 @@ static __initconst const u64 westmere_hw
+ #define NHM_LOCAL_DRAM (1 << 14)
+ #define NHM_NON_DRAM (1 << 15)
+
+-#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
++#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
++#define NHM_REMOTE (NHM_REMOTE_DRAM)
+
+ #define NHM_DMND_READ (NHM_DMND_DATA_RD)
+ #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
+ #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
+
+ #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
+-#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
++#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
+ #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
+
+ static __initconst const u64 nehalem_hw_cache_extra_regs
+@@ -420,16 +421,16 @@ static __initconst const u64 nehalem_hw_
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
+ },
+ [ C(OP_WRITE) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
+ },
+ [ C(OP_PREFETCH) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
+ },
+ },
+ };
--- /dev/null
+From 3780d038fdf4b5ef26ead10b0604ab1f46dd9510 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Fri, 9 Mar 2012 12:39:54 +0100
+Subject: rt2x00: fix random stalls
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 3780d038fdf4b5ef26ead10b0604ab1f46dd9510 upstream.
+
+Is possible that we stop queue and then do not wake up it again,
+especially when packets are transmitted fast. That can be easily
+reproduced with modified tx queue entry_num to some small value e.g. 16.
+
+If mac80211 already hold local->queue_stop_reason_lock, then we can wait
+on that lock in both rt2x00queue_pause_queue() and
+rt2x00queue_unpause_queue(). After drooping ->queue_stop_reason_lock
+is possible that __ieee80211_wake_queue() will be performed before
+__ieee80211_stop_queue(), hence we stop queue and newer wake up it
+again.
+
+Another race condition is possible when between rt2x00queue_threshold()
+check and rt2x00queue_pause_queue() we will process all pending tx
+buffers on different cpu. This might happen if for example interrupt
+will be triggered on cpu performing rt2x00mac_tx().
+
+To prevent race conditions serialize pause/unpause by queue->tx_lock.
+
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Acked-by: Gertjan van Wingerde <gwingerde@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rt2x00/rt2x00dev.c | 6 +++++-
+ drivers/net/wireless/rt2x00/rt2x00mac.c | 9 +++++++++
+ drivers/net/wireless/rt2x00/rt2x00queue.c | 3 +++
+ 3 files changed, 17 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -426,10 +426,14 @@ void rt2x00lib_txdone(struct queue_entry
+ /*
+ * If the data queue was below the threshold before the txdone
+ * handler we must make sure the packet queue in the mac80211 stack
+- * is reenabled when the txdone handler has finished.
++ * is reenabled when the txdone handler has finished. This has to be
++ * serialized with rt2x00mac_tx(), otherwise we can wake up queue
++ * before it was stopped.
+ */
++ spin_lock_bh(&entry->queue->tx_lock);
+ if (!rt2x00queue_threshold(entry->queue))
+ rt2x00queue_unpause_queue(entry->queue);
++ spin_unlock_bh(&entry->queue->tx_lock);
+ }
+ EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
+
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -152,13 +152,22 @@ void rt2x00mac_tx(struct ieee80211_hw *h
+ if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
+ goto exit_fail;
+
++ /*
++ * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
++ * we should not use spin_lock_bh variant as bottom halve was already
++ * disabled before ieee80211_xmit() call.
++ */
++ spin_lock(&queue->tx_lock);
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
++ spin_unlock(&queue->tx_lock);
+
+ return;
+
+ exit_fail:
++ spin_lock(&queue->tx_lock);
+ rt2x00queue_pause_queue(queue);
++ spin_unlock(&queue->tx_lock);
+ exit_free_skb:
+ dev_kfree_skb_any(skb);
+ }
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -619,6 +619,9 @@ int rt2x00queue_write_tx_frame(struct da
+ else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_align_frame(skb);
+
++ /*
++ * That function must be called with bh disabled.
++ */
+ spin_lock(&queue->tx_lock);
+
+ if (unlikely(rt2x00queue_full(queue))) {
x86-derandom-delay_tsc-for-64-bit.patch
pci-ignore-pre-1.1-aspm-quirking-when-aspm-is-disabled.patch
omap3isp-ccdc-fix-crash-in-hs-vs-interrupt-handler.patch
+rt2x00-fix-random-stalls.patch
+perf-x86-fix-local-vs-remote-memory-events-for-nhm-wsm.patch
+cifs-do-not-kmalloc-under-the-flocks-spinlock.patch
+vfs-fix-return-value-from-do_last.patch
+vfs-fix-double-put-after-complete_walk.patch
--- /dev/null
+From 097b180ca09b581ef0dc24fbcfc1b227de3875df Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Tue, 6 Mar 2012 13:56:33 +0100
+Subject: vfs: fix double put after complete_walk()
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 097b180ca09b581ef0dc24fbcfc1b227de3875df upstream.
+
+complete_walk() already puts nd->path, no need to do it again at cleanup time.
+
+This would result in Oopses if triggered, apparently the codepath is not too
+well exercised.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namei.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2238,7 +2238,7 @@ static struct file *do_last(struct namei
+ /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
+ error = complete_walk(nd);
+ if (error)
+- goto exit;
++ return ERR_PTR(error);
+ error = -EISDIR;
+ if (S_ISDIR(nd->inode->i_mode))
+ goto exit;
--- /dev/null
+From 7f6c7e62fcc123e6bd9206da99a2163fe3facc31 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Tue, 6 Mar 2012 13:56:34 +0100
+Subject: vfs: fix return value from do_last()
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 7f6c7e62fcc123e6bd9206da99a2163fe3facc31 upstream.
+
+complete_walk() returns either ECHILD or ESTALE. do_last() turns this into
+ECHILD unconditionally. If not in RCU mode, this error will reach userspace
+which is complete nonsense.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namei.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2139,7 +2139,7 @@ static struct file *do_last(struct namei
+ /* sayonara */
+ error = complete_walk(nd);
+ if (error)
+- return ERR_PTR(-ECHILD);
++ return ERR_PTR(error);
+
+ error = -ENOTDIR;
+ if (nd->flags & LOOKUP_DIRECTORY) {