]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Aug 2012 22:27:36 +0000 (15:27 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 1 Aug 2012 22:27:36 +0000 (15:27 -0700)
added patches:
iommu-amd-add-missing-spin_lock-initialization.patch
iommu-amd-fix-hotplug-with-iommu-pt.patch
mwifiex-correction-in-mcs-index-check.patch
rtlwifi-rtl8192cu-change-buffer-allocation-for-synchronous-reads.patch
rtlwifi-rtl8192de-fix-phy-based-version-calculation.patch
s390-idle-fix-sequence-handling-vs-cpu-hotplug.patch
s390-mm-downgrade-page-table-after-fork-of-a-31-bit-process.patch
s390-mm-fix-fault-handling-for-page-table-walk-case.patch
stable-update-references-to-older-2.6-versions-for-3.x.patch
staging-zsmalloc-finish-conversion-to-a-separate-module.patch
udf-improve-table-length-check-to-avoid-possible-overflow.patch
workqueue-perform-cpu-down-operations-from-low-priority-cpu_notifier.patch

13 files changed:
queue-3.4/iommu-amd-add-missing-spin_lock-initialization.patch [new file with mode: 0644]
queue-3.4/iommu-amd-fix-hotplug-with-iommu-pt.patch [new file with mode: 0644]
queue-3.4/mwifiex-correction-in-mcs-index-check.patch [new file with mode: 0644]
queue-3.4/rtlwifi-rtl8192cu-change-buffer-allocation-for-synchronous-reads.patch [new file with mode: 0644]
queue-3.4/rtlwifi-rtl8192de-fix-phy-based-version-calculation.patch [new file with mode: 0644]
queue-3.4/s390-idle-fix-sequence-handling-vs-cpu-hotplug.patch [new file with mode: 0644]
queue-3.4/s390-mm-downgrade-page-table-after-fork-of-a-31-bit-process.patch [new file with mode: 0644]
queue-3.4/s390-mm-fix-fault-handling-for-page-table-walk-case.patch [new file with mode: 0644]
queue-3.4/series
queue-3.4/stable-update-references-to-older-2.6-versions-for-3.x.patch [new file with mode: 0644]
queue-3.4/staging-zsmalloc-finish-conversion-to-a-separate-module.patch [new file with mode: 0644]
queue-3.4/udf-improve-table-length-check-to-avoid-possible-overflow.patch [new file with mode: 0644]
queue-3.4/workqueue-perform-cpu-down-operations-from-low-priority-cpu_notifier.patch [new file with mode: 0644]

diff --git a/queue-3.4/iommu-amd-add-missing-spin_lock-initialization.patch b/queue-3.4/iommu-amd-add-missing-spin_lock-initialization.patch
new file mode 100644 (file)
index 0000000..bf54ec9
--- /dev/null
@@ -0,0 +1,31 @@
+From 2c13d47a1a7ee8808796016c617aef25fd1d1925 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 19 Jul 2012 10:56:10 +0200
+Subject: iommu/amd: Add missing spin_lock initialization
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 2c13d47a1a7ee8808796016c617aef25fd1d1925 upstream.
+
+Add missing spin_lock initialization in
+amd_iommu_bind_pasid() function and make lockdep happy
+again.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd_iommu_v2.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev
+       atomic_set(&pasid_state->count, 1);
+       init_waitqueue_head(&pasid_state->wq);
++      spin_lock_init(&pasid_state->lock);
++
+       pasid_state->task         = task;
+       pasid_state->mm           = get_task_mm(task);
+       pasid_state->device_state = dev_state;
diff --git a/queue-3.4/iommu-amd-fix-hotplug-with-iommu-pt.patch b/queue-3.4/iommu-amd-fix-hotplug-with-iommu-pt.patch
new file mode 100644 (file)
index 0000000..2c48cb6
--- /dev/null
@@ -0,0 +1,52 @@
+From 2c9195e990297068d0f1f1bd8e2f1d09538009da Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <joerg.roedel@amd.com>
+Date: Thu, 19 Jul 2012 13:42:54 +0200
+Subject: iommu/amd: Fix hotplug with iommu=pt
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+commit 2c9195e990297068d0f1f1bd8e2f1d09538009da upstream.
+
+This did not work because devices are not put into the
+pt_domain. Fix this.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/amd_iommu.c |   17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2254,6 +2254,18 @@ static int device_change_notifier(struct
+               iommu_init_device(dev);
++              /*
++               * dev_data is still NULL and
++               * got initialized in iommu_init_device
++               */
++              dev_data = get_dev_data(dev);
++
++              if (iommu_pass_through || dev_data->iommu_v2) {
++                      dev_data->passthrough = true;
++                      attach_device(dev, pt_domain);
++                      break;
++              }
++
+               domain = domain_for_device(dev);
+               /* allocate a protection domain if a device is added */
+@@ -2271,10 +2283,7 @@ static int device_change_notifier(struct
+               dev_data = get_dev_data(dev);
+-              if (!dev_data->passthrough)
+-                      dev->archdata.dma_ops = &amd_iommu_dma_ops;
+-              else
+-                      dev->archdata.dma_ops = &nommu_dma_ops;
++              dev->archdata.dma_ops = &amd_iommu_dma_ops;
+               break;
+       case BUS_NOTIFY_DEL_DEVICE:
diff --git a/queue-3.4/mwifiex-correction-in-mcs-index-check.patch b/queue-3.4/mwifiex-correction-in-mcs-index-check.patch
new file mode 100644 (file)
index 0000000..a936510
--- /dev/null
@@ -0,0 +1,39 @@
+From fe020120cb863ba918c6d603345342a880272c4d Mon Sep 17 00:00:00 2001
+From: Amitkumar Karwar <akarwar@marvell.com>
+Date: Wed, 11 Jul 2012 18:12:57 -0700
+Subject: mwifiex: correction in mcs index check
+
+From: Amitkumar Karwar <akarwar@marvell.com>
+
+commit fe020120cb863ba918c6d603345342a880272c4d upstream.
+
+mwifiex driver supports 2x2 chips as well. Hence valid mcs values
+are 0 to 15. The check for mcs index is corrected in this patch.
+
+For example: if 40MHz is enabled and mcs index is 11, "iw link"
+command would show "tx bitrate: 108.0 MBit/s" without this patch.
+Now it shows "tx bitrate: 108.0 MBit/s MCS 11 40Mhz" with the patch.
+
+Signed-off-by: Amitkumar Karwar <akarwar@marvell.com>
+Signed-off-by: Bing Zhao <bzhao@marvell.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/mwifiex/cfg80211.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -544,9 +544,9 @@ mwifiex_dump_station_info(struct mwifiex
+       /*
+        * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
+-       * MCS index values for us are 0 to 7.
++       * MCS index values for us are 0 to 15.
+        */
+-      if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
++      if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
+               sinfo->txrate.mcs = priv->tx_rate;
+               sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+               /* 40MHz rate */
diff --git a/queue-3.4/rtlwifi-rtl8192cu-change-buffer-allocation-for-synchronous-reads.patch b/queue-3.4/rtlwifi-rtl8192cu-change-buffer-allocation-for-synchronous-reads.patch
new file mode 100644 (file)
index 0000000..4887e6a
--- /dev/null
@@ -0,0 +1,72 @@
+From 3ce4d85b76010525adedcc2555fa164bf706a2f3 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Wed, 11 Jul 2012 14:37:28 -0500
+Subject: rtlwifi: rtl8192cu: Change buffer allocation for synchronous reads
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit 3ce4d85b76010525adedcc2555fa164bf706a2f3 upstream.
+
+In commit a7959c1, the USB part of rtlwifi was switched to convert
+_usb_read_sync() to using a preallocated buffer rather than one
+that has been acquired using kmalloc. Although this routine is named
+as though it were synchronous, there seem to be simultaneous users,
+and the selection of the index to the data buffer is not multi-user
+safe. This situation is addressed by adding a new spinlock. The routine
+cannot sleep, thus a mutex is not allowed.
+
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rtlwifi/usb.c  |   14 +++++++++++---
+ drivers/net/wireless/rtlwifi/wifi.h |    1 +
+ 2 files changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -131,15 +131,19 @@ static u32 _usb_read_sync(struct rtl_pri
+       u8 request;
+       u16 wvalue;
+       u16 index;
+-      __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++      __le32 *data;
++      unsigned long flags;
++      spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
++      if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
++              rtlpriv->usb_data_index = 0;
++      data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++      spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+       request = REALTEK_USB_VENQT_CMD_REQ;
+       index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+       wvalue = (u16)addr;
+       _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+-      if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+-              rtlpriv->usb_data_index = 0;
+       return le32_to_cpu(*data);
+ }
+@@ -951,6 +955,10 @@ int __devinit rtl_usb_probe(struct usb_i
+                                   GFP_KERNEL);
+       if (!rtlpriv->usb_data)
+               return -ENOMEM;
++
++      /* this spin lock must be initialized early */
++      spin_lock_init(&rtlpriv->locks.usb_lock);
++
+       rtlpriv->usb_data_index = 0;
+       init_completion(&rtlpriv->firmware_loading_complete);
+       SET_IEEE80211_DEV(hw, &intf->dev);
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1555,6 +1555,7 @@ struct rtl_locks {
+       spinlock_t rf_ps_lock;
+       spinlock_t rf_lock;
+       spinlock_t waitq_lock;
++      spinlock_t usb_lock;
+       /*Dual mac*/
+       spinlock_t cck_and_rw_pagea_lock;
diff --git a/queue-3.4/rtlwifi-rtl8192de-fix-phy-based-version-calculation.patch b/queue-3.4/rtlwifi-rtl8192de-fix-phy-based-version-calculation.patch
new file mode 100644 (file)
index 0000000..a3427c3
--- /dev/null
@@ -0,0 +1,59 @@
+From f1b00f4dab29b57bdf1bc03ef12020b280fd2a72 Mon Sep 17 00:00:00 2001
+From: Forest Bond <forest.bond@rapidrollout.com>
+Date: Fri, 13 Jul 2012 12:26:06 -0400
+Subject: rtlwifi: rtl8192de: Fix phy-based version calculation
+
+From: Forest Bond <forest.bond@rapidrollout.com>
+
+commit f1b00f4dab29b57bdf1bc03ef12020b280fd2a72 upstream.
+
+Commit d83579e2a50ac68389e6b4c58b845c702cf37516 incorporated some
+changes from the vendor driver that made it newly important that the
+calculated hardware version correctly include the CHIP_92D bit, as all
+of the IS_92D_* macros were changed to depend on it.  However, this bit
+was being unset for dual-mac, dual-phy devices.  The vendor driver
+behavior was modified to not do this, but unfortunately this change was
+not picked up along with the others.  This caused scanning in the 2.4GHz
+band to be broken, and possibly other bugs as well.
+
+This patch brings the version calculation logic in parity with the
+vendor driver in this regard, and in doing so fixes the regression.
+However, the version calculation code in general continues to be largely
+incoherent and messy, and needs to be cleaned up.
+
+Signed-off-by: Forest Bond <forest.bond@rapidrollout.com>
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/rtlwifi/rtl8192de/phy.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3344,21 +3344,21 @@ void rtl92d_phy_config_macphymode_info(s
+       switch (rtlhal->macphymode) {
+       case DUALMAC_SINGLEPHY:
+               rtlphy->rf_type = RF_2T2R;
+-              rtlhal->version |= CHIP_92D_SINGLEPHY;
++              rtlhal->version |= RF_TYPE_2T2R;
+               rtlhal->bandset = BAND_ON_BOTH;
+               rtlhal->current_bandtype = BAND_ON_2_4G;
+               break;
+       case SINGLEMAC_SINGLEPHY:
+               rtlphy->rf_type = RF_2T2R;
+-              rtlhal->version |= CHIP_92D_SINGLEPHY;
++              rtlhal->version |= RF_TYPE_2T2R;
+               rtlhal->bandset = BAND_ON_BOTH;
+               rtlhal->current_bandtype = BAND_ON_2_4G;
+               break;
+       case DUALMAC_DUALPHY:
+               rtlphy->rf_type = RF_1T1R;
+-              rtlhal->version &= (~CHIP_92D_SINGLEPHY);
++              rtlhal->version &= RF_TYPE_1T1R;
+               /* Now we let MAC0 run on 5G band. */
+               if (rtlhal->interfaceindex == 0) {
+                       rtlhal->bandset = BAND_ON_5G;
diff --git a/queue-3.4/s390-idle-fix-sequence-handling-vs-cpu-hotplug.patch b/queue-3.4/s390-idle-fix-sequence-handling-vs-cpu-hotplug.patch
new file mode 100644 (file)
index 0000000..9861395
--- /dev/null
@@ -0,0 +1,75 @@
+From 0008204ffe85d23382d6fd0f971f3f0fbe70bae2 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Fri, 13 Jul 2012 15:45:33 +0200
+Subject: s390/idle: fix sequence handling vs cpu hotplug
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 0008204ffe85d23382d6fd0f971f3f0fbe70bae2 upstream.
+
+The s390 idle accounting code uses a sequence counter which gets used
+when the per cpu idle statistics get updated and read.
+
+One assumption on read access is that only when the sequence counter is
+even and did not change while reading all values the result is valid.
+On cpu hotplug however the per cpu data structure gets initialized via
+a cpu hotplug notifier on CPU_ONLINE.
+CPU_ONLINE however is too late, since the onlined cpu is already running
+and might access the per cpu data. Worst case is that the data structure
+gets initialized while an idle thread is updating its idle statistics.
+This will result in an uneven sequence counter after an update.
+
+As a result user space tools like top, which access /proc/stat in order
+to get idle stats, will busy loop waiting for the sequence counter to
+become even again, which will never happen until the queried cpu will
+update its idle statistics again. And even then the sequence counter
+will only have an even value for a couple of cpu cycles.
+
+Fix this by moving the initialization of the per cpu idle statistics
+to cpu_init(). I prefer that solution in favor of changing the
+notifier to CPU_UP_PREPARE, which would be a different solution to
+the problem.
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/processor.c |    2 ++
+ arch/s390/kernel/smp.c       |    3 ---
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_
+ void __cpuinit cpu_init(void)
+ {
+       struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
++      struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+       get_cpu_id(id);
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+       BUG_ON(current->mm);
+       enter_lazy_tlb(&init_mm, current);
++      memset(idle, 0, sizeof(*idle));
+ }
+ /*
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1034,14 +1034,11 @@ static int __cpuinit smp_cpu_notify(stru
+       unsigned int cpu = (unsigned int)(long)hcpu;
+       struct cpu *c = &pcpu_devices[cpu].cpu;
+       struct device *s = &c->dev;
+-      struct s390_idle_data *idle;
+       int err = 0;
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+-              idle = &per_cpu(s390_idle, cpu);
+-              memset(idle, 0, sizeof(struct s390_idle_data));
+               err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+               break;
+       case CPU_DEAD:
diff --git a/queue-3.4/s390-mm-downgrade-page-table-after-fork-of-a-31-bit-process.patch b/queue-3.4/s390-mm-downgrade-page-table-after-fork-of-a-31-bit-process.patch
new file mode 100644 (file)
index 0000000..3942cb1
--- /dev/null
@@ -0,0 +1,130 @@
+From 0f6f281b731d20bfe75c13f85d33f3f05b440222 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Thu, 26 Jul 2012 08:53:06 +0200
+Subject: s390/mm: downgrade page table after fork of a 31 bit process
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 0f6f281b731d20bfe75c13f85d33f3f05b440222 upstream.
+
+The downgrade of the 4 level page table created by init_new_context is
+currently done only in start_thread31. If a 31 bit process forks the
+new mm uses a 4 level page table, including the task size of 2<<42
+that goes along with it. This is incorrect as now a 31 bit process
+can map memory beyond 2GB. Define arch_dup_mmap to do the downgrade
+after fork.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/mmu_context.h |   14 +++++++++++++-
+ arch/s390/include/asm/processor.h   |    2 ++
+ arch/s390/mm/mmap.c                 |   12 ++++++++++--
+ arch/s390/mm/pgtable.c              |    5 -----
+ 4 files changed, 25 insertions(+), 8 deletions(-)
+
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -13,7 +13,6 @@
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include <asm/ctl_reg.h>
+-#include <asm-generic/mm_hooks.h>
+ static inline int init_new_context(struct task_struct *tsk,
+                                  struct mm_struct *mm)
+@@ -93,4 +92,17 @@ static inline void activate_mm(struct mm
+         switch_mm(prev, next, current);
+ }
++static inline void arch_dup_mmap(struct mm_struct *oldmm,
++                               struct mm_struct *mm)
++{
++#ifdef CONFIG_64BIT
++      if (oldmm->context.asce_limit < mm->context.asce_limit)
++              crst_table_downgrade(mm, oldmm->context.asce_limit);
++#endif
++}
++
++static inline void arch_exit_mmap(struct mm_struct *mm)
++{
++}
++
+ #endif /* __S390_MMU_CONTEXT_H */
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -129,7 +129,9 @@ struct stack_frame {
+       regs->psw.mask  = psw_user_bits | PSW_MASK_BA;                  \
+       regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
+       regs->gprs[15]  = new_stackp;                                   \
++      __tlb_flush_mm(current->mm);                                    \
+       crst_table_downgrade(current->mm, 1UL << 31);                   \
++      update_mm(current->mm, current);                                \
+ } while (0)
+ /* Forward declaration, a strange C thing */
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -105,9 +105,15 @@ void arch_pick_mmap_layout(struct mm_str
+ int s390_mmap_check(unsigned long addr, unsigned long len)
+ {
++      int rc;
++
+       if (!is_compat_task() &&
+-          len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+-              return crst_table_upgrade(current->mm, 1UL << 53);
++          len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
++              rc = crst_table_upgrade(current->mm, 1UL << 53);
++              if (rc)
++                      return rc;
++              update_mm(current->mm, current);
++      }
+       return 0;
+ }
+@@ -127,6 +133,7 @@ s390_get_unmapped_area(struct file *filp
+               rc = crst_table_upgrade(mm, 1UL << 53);
+               if (rc)
+                       return (unsigned long) rc;
++              update_mm(mm, current);
+               area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+       }
+       return area;
+@@ -149,6 +156,7 @@ s390_get_unmapped_area_topdown(struct fi
+               rc = crst_table_upgrade(mm, 1UL << 53);
+               if (rc)
+                       return (unsigned long) rc;
++              update_mm(mm, current);
+               area = arch_get_unmapped_area_topdown(filp, addr, len,
+                                                     pgoff, flags);
+       }
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -85,7 +85,6 @@ repeat:
+               crst_table_free(mm, table);
+       if (mm->context.asce_limit < limit)
+               goto repeat;
+-      update_mm(mm, current);
+       return 0;
+ }
+@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_stru
+ {
+       pgd_t *pgd;
+-      if (mm->context.asce_limit <= limit)
+-              return;
+-      __tlb_flush_mm(mm);
+       while (mm->context.asce_limit > limit) {
+               pgd = mm->pgd;
+               switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_stru
+               mm->task_size = mm->context.asce_limit;
+               crst_table_free(mm, (unsigned long *) pgd);
+       }
+-      update_mm(mm, current);
+ }
+ #endif
diff --git a/queue-3.4/s390-mm-fix-fault-handling-for-page-table-walk-case.patch b/queue-3.4/s390-mm-fix-fault-handling-for-page-table-walk-case.patch
new file mode 100644 (file)
index 0000000..4e4a5e5
--- /dev/null
@@ -0,0 +1,69 @@
+From 008c2e8f247f0a8db1e8e26139da12f3a3abcda0 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Fri, 27 Jul 2012 09:45:39 +0200
+Subject: s390/mm: fix fault handling for page table walk case
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 008c2e8f247f0a8db1e8e26139da12f3a3abcda0 upstream.
+
+Make sure the kernel does not incorrectly create a SIGBUS signal during
+user space accesses:
+
+For user space accesses in the switched addressing mode case the kernel
+may walk page tables and access user address space via the kernel
+mapping. If a page table entry is invalid the function __handle_fault()
+gets called in order to emulate a page fault and trigger all the usual
+actions like paging in a missing page etc. by calling handle_mm_fault().
+
+If handle_mm_fault() returns with an error fixup handling is necessary.
+For the switched addressing mode case all errors need to be mapped to
+-EFAULT, so that the calling uaccess function can return -EFAULT to
+user space.
+
+Unfortunately the __handle_fault() incorrectly calls do_sigbus() if
+VM_FAULT_SIGBUS is set. This however should only happen if a page fault
+was triggered by a user space instruction. For kernel mode uaccesses
+the correct action is to only return -EFAULT.
+So user space may incorrectly see SIGBUS signals because of this bug.
+
+For current machines this would only be possible for the switched
+addressing mode case in conjunction with futex operations.
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/mm/fault.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -443,6 +443,7 @@ int __handle_fault(unsigned long uaddr,
+       struct pt_regs regs;
+       int access, fault;
++      /* Emulate a uaccess fault from kernel mode. */
+       regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+       if (!irqs_disabled())
+               regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+@@ -452,12 +453,12 @@ int __handle_fault(unsigned long uaddr,
+       regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
+       access = write ? VM_WRITE : VM_READ;
+       fault = do_exception(&regs, access);
+-      if (unlikely(fault)) {
+-              if (fault & VM_FAULT_OOM)
+-                      return -EFAULT;
+-              else if (fault & VM_FAULT_SIGBUS)
+-                      do_sigbus(&regs);
+-      }
++      /*
++       * Since the fault happened in kernel mode while performing a uaccess
++       * all we need to do now is emulating a fixup in case "fault" is not
++       * zero.
++       * For the calling uaccess functions this results always in -EFAULT.
++       */
+       return fault ? -EFAULT : 0;
+ }
index b91785d3b32da222edd6bcade57db149c882f41d..9783cf1e0744a37deeaebe8b74b1b789b77723da 100644 (file)
@@ -33,3 +33,15 @@ tpm-chip-disabled-state-erronously-being-reported-as-error.patch
 tun-fix-a-crash-bug-and-a-memory-leak.patch
 mac80211-fail-authentication-when-ap-denied-authentication.patch
 iwlwifi-fix-debug-print-in-iwl_sta_calc_ht_flags.patch
+rtlwifi-rtl8192cu-change-buffer-allocation-for-synchronous-reads.patch
+rtlwifi-rtl8192de-fix-phy-based-version-calculation.patch
+mwifiex-correction-in-mcs-index-check.patch
+s390-idle-fix-sequence-handling-vs-cpu-hotplug.patch
+s390-mm-downgrade-page-table-after-fork-of-a-31-bit-process.patch
+s390-mm-fix-fault-handling-for-page-table-walk-case.patch
+iommu-amd-add-missing-spin_lock-initialization.patch
+iommu-amd-fix-hotplug-with-iommu-pt.patch
+udf-improve-table-length-check-to-avoid-possible-overflow.patch
+stable-update-references-to-older-2.6-versions-for-3.x.patch
+staging-zsmalloc-finish-conversion-to-a-separate-module.patch
+workqueue-perform-cpu-down-operations-from-low-priority-cpu_notifier.patch
diff --git a/queue-3.4/stable-update-references-to-older-2.6-versions-for-3.x.patch b/queue-3.4/stable-update-references-to-older-2.6-versions-for-3.x.patch
new file mode 100644 (file)
index 0000000..60f7d08
--- /dev/null
@@ -0,0 +1,58 @@
+From 2584f5212d97b664be250ad5700a2d0fee31a10d Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Tue, 5 Jun 2012 11:15:50 -0400
+Subject: stable: update references to older 2.6 versions for 3.x
+
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+
+commit 2584f5212d97b664be250ad5700a2d0fee31a10d upstream.
+
+Also add information on where the respective trees are.
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+Acked-by: Rob Landley <rob@landley.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/stable_kernel_rules.txt |   19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -1,4 +1,4 @@
+-Everything you ever wanted to know about Linux 2.6 -stable releases.
++Everything you ever wanted to know about Linux -stable releases.
+ Rules on what kind of patches are accepted, and which ones are not, into the
+ "-stable" tree:
+@@ -42,10 +42,10 @@ Procedure for submitting patches to the
+    cherry-picked than this can be specified in the following format in
+    the sign-off area:
+-     Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+-     Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+-     Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+-     Cc: <stable@vger.kernel.org> # .32.x
++     Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
++     Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
++     Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
++     Cc: <stable@vger.kernel.org> # 3.3.x
+     Signed-off-by: Ingo Molnar <mingo@elte.hu>
+    The tag sequence has the meaning of:
+@@ -79,6 +79,15 @@ Review cycle:
+    security kernel team, and not go through the normal review cycle.
+    Contact the kernel security team for more details on this procedure.
++Trees:
++
++ - The queues of patches, for both completed versions and in progress
++   versions can be found at:
++      http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
++ - The finalized and tagged releases of all stable kernels can be found
++   in separate branches per version at:
++      http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
++
+ Review committee:
diff --git a/queue-3.4/staging-zsmalloc-finish-conversion-to-a-separate-module.patch b/queue-3.4/staging-zsmalloc-finish-conversion-to-a-separate-module.patch
new file mode 100644 (file)
index 0000000..8f89d22
--- /dev/null
@@ -0,0 +1,91 @@
+From 069f101fa463351f528773d73b74e9b606b3f66a Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Wed, 20 Jun 2012 02:31:11 +0100
+Subject: staging: zsmalloc: Finish conversion to a separate module
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit 069f101fa463351f528773d73b74e9b606b3f66a upstream.
+
+ZSMALLOC is tristate, but the code has no MODULE_LICENSE and since it
+depends on GPL-only symbols it cannot be loaded as a module.  This in
+turn breaks zram which now depends on it.  I assume it's meant to be
+Dual BSD/GPL like the other z-stuff.
+
+There is also no module_exit, which will make it impossible to unload.
+Add the appropriate module_init and module_exit declarations suggested
+by comments.
+
+Reported-by: Christian Ohm <chr.ohm@gmx.net>
+References: http://bugs.debian.org/677273
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/zsmalloc/zsmalloc-main.c |   33 ++++++-------------------------
+ 1 file changed, 7 insertions(+), 26 deletions(-)
+
+--- a/drivers/staging/zsmalloc/zsmalloc-main.c
++++ b/drivers/staging/zsmalloc/zsmalloc-main.c
+@@ -426,12 +426,6 @@ static struct page *find_get_zspage(stru
+ }
+-/*
+- * If this becomes a separate module, register zs_init() with
+- * module_init(), zs_exit with module_exit(), and remove zs_initialized
+-*/
+-static int zs_initialized;
+-
+ static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+                               void *pcpu)
+ {
+@@ -490,7 +484,7 @@ fail:
+ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+ {
+-      int i, error, ovhd_size;
++      int i, ovhd_size;
+       struct zs_pool *pool;
+       if (!name)
+@@ -517,28 +511,9 @@ struct zs_pool *zs_create_pool(const cha
+       }
+-      /*
+-       * If this becomes a separate module, register zs_init with
+-       * module_init, and remove this block
+-      */
+-      if (!zs_initialized) {
+-              error = zs_init();
+-              if (error)
+-                      goto cleanup;
+-              zs_initialized = 1;
+-      }
+-
+       pool->flags = flags;
+       pool->name = name;
+-      error = 0; /* Success */
+-
+-cleanup:
+-      if (error) {
+-              zs_destroy_pool(pool);
+-              pool = NULL;
+-      }
+-
+       return pool;
+ }
+ EXPORT_SYMBOL_GPL(zs_create_pool);
+@@ -749,3 +724,9 @@ u64 zs_get_total_size_bytes(struct zs_po
+       return npages << PAGE_SHIFT;
+ }
+ EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
++
++module_init(zs_init);
++module_exit(zs_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/queue-3.4/udf-improve-table-length-check-to-avoid-possible-overflow.patch b/queue-3.4/udf-improve-table-length-check-to-avoid-possible-overflow.patch
new file mode 100644 (file)
index 0000000..0fac681
--- /dev/null
@@ -0,0 +1,33 @@
+From 57b9655d01ef057a523e810d29c37ac09b80eead Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 10 Jul 2012 17:58:04 +0200
+Subject: udf: Improve table length check to avoid possible overflow
+
+From: Jan Kara <jack@suse.cz>
+
+commit 57b9655d01ef057a523e810d29c37ac09b80eead upstream.
+
+When a partition table length is corrupted to be close to 1 << 32, the
+check for its length may overflow on 32-bit systems and we will think
+the length is valid. Later on the kernel can crash trying to read beyond
+end of buffer. Fix the check to avoid possible overflow.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/udf/super.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1283,7 +1283,7 @@ static int udf_load_logicalvol(struct su
+       BUG_ON(ident != TAG_IDENT_LVD);
+       lvd = (struct logicalVolDesc *)bh->b_data;
+       table_len = le32_to_cpu(lvd->mapTableLength);
+-      if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++      if (table_len > sb->s_blocksize - sizeof(*lvd)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Partition table too long (%u > %lu)\n", table_len,
+                       sb->s_blocksize - sizeof(*lvd));
diff --git a/queue-3.4/workqueue-perform-cpu-down-operations-from-low-priority-cpu_notifier.patch b/queue-3.4/workqueue-perform-cpu-down-operations-from-low-priority-cpu_notifier.patch
new file mode 100644 (file)
index 0000000..e22fb7f
--- /dev/null
@@ -0,0 +1,115 @@
+From 6575820221f7a4dd6eadecf7bf83cdd154335eda Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 17 Jul 2012 12:39:26 -0700
+Subject: workqueue: perform cpu down operations from low priority cpu_notifier()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 6575820221f7a4dd6eadecf7bf83cdd154335eda upstream.
+
+Currently, all workqueue cpu hotplug operations run off
+CPU_PRI_WORKQUEUE which is higher than normal notifiers.  This is to
+ensure that workqueue is up and running while bringing up a CPU before
+other notifiers try to use workqueue on the CPU.
+
+Per-cpu workqueues are supposed to remain working and bound to the CPU
+for normal CPU_DOWN_PREPARE notifiers.  This holds mostly true even
+with workqueue offlining running with higher priority because
+workqueue CPU_DOWN_PREPARE only creates a bound trustee thread which
+runs the per-cpu workqueue without concurrency management without
+explicitly detaching the existing workers.
+
+However, if the trustee needs to create new workers, it creates
+unbound workers which may wander off to other CPUs while
+CPU_DOWN_PREPARE notifiers are in progress.  Furthermore, if the CPU
+down is cancelled, the per-CPU workqueue may end up with workers which
+aren't bound to the CPU.
+
+While reliably reproducible with a convoluted artificial test-case
+involving scheduling and flushing CPU burning work items from CPU down
+notifiers, this isn't very likely to happen in the wild, and, even
+when it happens, the effects are likely to be hidden by the following
+successful CPU down.
+
+Fix it by using different priorities for up and down notifiers - high
+priority for up operations and low priority for down operations.
+
+Workqueue cpu hotplug operations will soon go through further cleanup.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpu.h |    5 +++--
+ kernel/workqueue.c  |   38 +++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -75,8 +75,9 @@ enum {
+       /* migration should happen before other stuff but after perf */
+       CPU_PRI_PERF            = 20,
+       CPU_PRI_MIGRATION       = 10,
+-      /* prepare workqueues for other notifiers */
+-      CPU_PRI_WORKQUEUE       = 5,
++      /* bring up workqueues before normal notifiers and down after */
++      CPU_PRI_WORKQUEUE_UP    = 5,
++      CPU_PRI_WORKQUEUE_DOWN  = -5,
+ };
+ #define CPU_ONLINE            0x0002 /* CPU (unsigned)v is up */
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3582,6 +3582,41 @@ static int __devinit workqueue_cpu_callb
+       return notifier_from_errno(0);
+ }
++/*
++ * Workqueues should be brought up before normal priority CPU notifiers.
++ * This will be registered high priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
++                                             unsigned long action,
++                                             void *hcpu)
++{
++      switch (action & ~CPU_TASKS_FROZEN) {
++      case CPU_UP_PREPARE:
++      case CPU_UP_CANCELED:
++      case CPU_DOWN_FAILED:
++      case CPU_ONLINE:
++              return workqueue_cpu_callback(nfb, action, hcpu);
++      }
++      return NOTIFY_OK;
++}
++
++/*
++ * Workqueues should be brought down after normal priority CPU notifiers.
++ * This will be registered as low priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
++                                               unsigned long action,
++                                               void *hcpu)
++{
++      switch (action & ~CPU_TASKS_FROZEN) {
++      case CPU_DOWN_PREPARE:
++      case CPU_DYING:
++      case CPU_POST_DEAD:
++              return workqueue_cpu_callback(nfb, action, hcpu);
++      }
++      return NOTIFY_OK;
++}
++
+ #ifdef CONFIG_SMP
+ struct work_for_cpu {
+@@ -3775,7 +3810,8 @@ static int __init init_workqueues(void)
+       unsigned int cpu;
+       int i;
+-      cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
++      cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
++      cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+       /* initialize gcwqs */
+       for_each_gcwq_cpu(cpu) {