--- /dev/null
+From b49926629fb5c324bb1ed3960fb0d7905a4a8562 Mon Sep 17 00:00:00 2001
+From: Matti Gottlieb <matti.gottlieb@intel.com>
+Date: Sun, 22 Sep 2013 08:23:23 +0300
+Subject: iwlwifi: pcie: add new SKUs for 7000 & 3160 NIC series
+
+From: Matti Gottlieb <matti.gottlieb@intel.com>
+
+commit b49926629fb5c324bb1ed3960fb0d7905a4a8562 upstream.
+
+Add some new PCI IDs to the table for 7000 & 3160 series
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Matti Gottlieb <matti.gottlieb@intel.com>
+Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/iwlwifi/pcie/drv.c | 35 +++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -270,54 +270,83 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_ca
+ #if IS_ENABLED(CONFIG_IWLMVM)
+ /* 7000 Series */
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
++ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+ /* 3160 Series */
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ #endif /* CONFIG_IWLMVM */
+
+ {0}
--- /dev/null
+From bf378d341e4873ed928dc3c636252e6895a21f50 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 28 Oct 2013 13:55:29 +0100
+Subject: perf: Fix perf ring buffer memory ordering
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit bf378d341e4873ed928dc3c636252e6895a21f50 upstream.
+
+The PPC64 people noticed a missing memory barrier and crufty old
+comments in the perf ring buffer code. So update all the comments and
+add the missing barrier.
+
+When the architecture implements local_t using atomic_long_t there
+will be double barriers issued; but short of introducing more
+conditional barrier primitives this is the best we can do.
+
+Reported-by: Victor Kaplansky <victork@il.ibm.com>
+Tested-by: Victor Kaplansky <victork@il.ibm.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+Cc: michael@ellerman.id.au
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Michael Neuling <mikey@neuling.org>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: anton@samba.org
+Cc: benh@kernel.crashing.org
+Link: http://lkml.kernel.org/r/20131025173749.GG19466@laptop.lan
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/uapi/linux/perf_event.h | 12 +++++++-----
+ kernel/events/ring_buffer.c | 31 +++++++++++++++++++++++++++----
+ 2 files changed, 34 insertions(+), 9 deletions(-)
+
+--- a/include/uapi/linux/perf_event.h
++++ b/include/uapi/linux/perf_event.h
+@@ -428,13 +428,15 @@ struct perf_event_mmap_page {
+ /*
+ * Control data for the mmap() data buffer.
+ *
+- * User-space reading the @data_head value should issue an rmb(), on
+- * SMP capable platforms, after reading this value -- see
+- * perf_event_wakeup().
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+- * written by userspace to reflect the last read data. In this case
+- * the kernel will not over-write unread data.
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -87,10 +87,31 @@ again:
+ goto out;
+
+ /*
+- * Publish the known good head. Rely on the full barrier implied
+- * by atomic_dec_and_test() order the rb->head read and this
+- * write.
++ * Since the mmap() consumer (userspace) can run on a different CPU:
++ *
++ * kernel user
++ *
++ * READ ->data_tail READ ->data_head
++ * smp_mb() (A) smp_rmb() (C)
++ * WRITE $data READ $data
++ * smp_wmb() (B) smp_mb() (D)
++ * STORE ->data_head WRITE ->data_tail
++ *
++ * Where A pairs with D, and B pairs with C.
++ *
++ * I don't think A needs to be a full barrier because we won't in fact
++ * write data until we see the store from userspace. So we simply don't
++ * issue the data WRITE until we observe it. Be conservative for now.
++ *
++ * OTOH, D needs to be a full barrier since it separates the data READ
++ * from the tail WRITE.
++ *
++ * For B a WMB is sufficient since it separates two WRITEs, and for C
++ * an RMB is sufficient since it separates two READs.
++ *
++ * See perf_output_begin().
+ */
++ smp_wmb();
+ rb->user_page->data_head = head;
+
+ /*
+@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
++ *
++ * See perf_output_put_handle().
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+- smp_rmb();
++ smp_mb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))