--- /dev/null
+From 292827cb164ad00cc7689a21283b1261c0b6daed Mon Sep 17 00:00:00 2001
+From: Allen Kay <allen.m.kay@intel.com>
+Date: Fri, 14 Oct 2011 12:31:54 -0700
+Subject: intel-iommu: fix return value of iommu_unmap() API
+
+From: Allen Kay <allen.m.kay@intel.com>
+
+commit 292827cb164ad00cc7689a21283b1261c0b6daed upstream.
+
+iommu_unmap() API expects IOMMU drivers to return the actual page order
+of the address being unmapped. Previous code was just returning page
+order passed in from the caller. This patch fixes this problem.
+
+Signed-off-by: Allen Kay <allen.m.kay@intel.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Youquan Song <youquan.song@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/intel-iommu.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -817,13 +817,14 @@ static struct dma_pte *dma_pfn_level_pte
+ }
+
+ /* clear last level pte, a tlb flush should be followed */
+-static void dma_pte_clear_range(struct dmar_domain *domain,
++static int dma_pte_clear_range(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
+ {
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned int large_page = 1;
+ struct dma_pte *first_pte, *pte;
++ int order;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+@@ -847,6 +848,9 @@ static void dma_pte_clear_range(struct d
+ (void *)pte - (void *)first_pte);
+
+ } while (start_pfn && start_pfn <= last_pfn);
++
++ order = (large_page - 1) * 9;
++ return order;
+ }
+
+ /* free page table pages. last level pte should already be cleared */
+@@ -3865,14 +3869,15 @@ static int intel_iommu_unmap(struct iomm
+ {
+ struct dmar_domain *dmar_domain = domain->priv;
+ size_t size = PAGE_SIZE << gfp_order;
++ int order;
+
+- dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
++ order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ (iova + size - 1) >> VTD_PAGE_SHIFT);
+
+ if (dmar_domain->max_addr == iova + size)
+ dmar_domain->max_addr = iova;
+
+- return gfp_order;
++ return order;
+ }
+
+ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
--- /dev/null
+From 4399c8bf2b9093696fa8160d79712e7346989c46 Mon Sep 17 00:00:00 2001
+From: Allen Kay <allen.m.kay@intel.com>
+Date: Fri, 14 Oct 2011 12:32:46 -0700
+Subject: intel-iommu: fix superpage support in pfn_to_dma_pte()
+
+From: Allen Kay <allen.m.kay@intel.com>
+
+commit 4399c8bf2b9093696fa8160d79712e7346989c46 upstream.
+
+If target_level == 0, current code breaks out of the while-loop if
+SUPERPAGE bit is set. We should also break out if PTE is not present.
+If we don't do this, KVM calls to iommu_iova_to_phys() will cause
+pfn_to_dma_pte() to create mapping for 4KiB pages.
+
+Signed-off-by: Allen Kay <allen.m.kay@intel.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Youquan Song <youquan.song@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/intel-iommu.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -307,6 +307,11 @@ static inline bool dma_pte_present(struc
+ return (pte->val & 3) != 0;
+ }
+
++static inline bool dma_pte_superpage(struct dma_pte *pte)
++{
++ return (pte->val & (1 << 7));
++}
++
+ static inline int first_pte_in_page(struct dma_pte *pte)
+ {
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+@@ -732,29 +737,23 @@ out:
+ }
+
+ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+- unsigned long pfn, int large_level)
++ unsigned long pfn, int target_level)
+ {
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *parent, *pte = NULL;
+ int level = agaw_to_level(domain->agaw);
+- int offset, target_level;
++ int offset;
+
+ BUG_ON(!domain->pgd);
+ BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
+ parent = domain->pgd;
+
+- /* Search pte */
+- if (!large_level)
+- target_level = 1;
+- else
+- target_level = large_level;
+-
+ while (level > 0) {
+ void *tmp_page;
+
+ offset = pfn_level_offset(pfn, level);
+ pte = &parent[offset];
+- if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
++ if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
+ break;
+ if (level == target_level)
+ break;
--- /dev/null
+From 8140a95d228efbcd64d84150e794761a32463947 Mon Sep 17 00:00:00 2001
+From: Allen Kay <allen.m.kay@intel.com>
+Date: Fri, 14 Oct 2011 12:32:17 -0700
+Subject: intel-iommu: set iommu_superpage on VM domains to lowest common denominator
+
+From: Allen Kay <allen.m.kay@intel.com>
+
+commit 8140a95d228efbcd64d84150e794761a32463947 upstream.
+
+set dmar->iommu_superpage field to the smallest common denominator
+of super page sizes supported by all active VT-d engines. Initialize
+this field in intel_iommu_domain_init() API so intel_iommu_map() API
+will be able to use iommu_superpage field to determine the appropriate
+super page size to use.
+
+Signed-off-by: Allen Kay <allen.m.kay@intel.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Youquan Song <youquan.song@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/intel-iommu.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/pci/intel-iommu.c
++++ b/drivers/pci/intel-iommu.c
+@@ -578,17 +578,18 @@ static void domain_update_iommu_snooping
+
+ static void domain_update_iommu_superpage(struct dmar_domain *domain)
+ {
+- int i, mask = 0xf;
++ struct dmar_drhd_unit *drhd;
++ struct intel_iommu *iommu = NULL;
++ int mask = 0xf;
+
+ if (!intel_iommu_superpage) {
+ domain->iommu_superpage = 0;
+ return;
+ }
+
+- domain->iommu_superpage = 4; /* 1TiB */
+-
+- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+- mask |= cap_super_page_val(g_iommus[i]->cap);
++ /* set iommu_superpage to the smallest common denominator */
++ for_each_active_iommu(iommu, drhd) {
++ mask &= cap_super_page_val(iommu->cap);
+ if (!mask) {
+ break;
+ }
+@@ -3744,6 +3745,7 @@ static int intel_iommu_domain_init(struc
+ vm_domain_exit(dmar_domain);
+ return -ENOMEM;
+ }
++ domain_update_iommu_cap(dmar_domain);
+ domain->priv = dmar_domain;
+
+ return 0;
--- /dev/null
+From 34a5b4b6af104cf18eb50748509528b9bdbc4036 Mon Sep 17 00:00:00 2001
+From: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Date: Fri, 2 Dec 2011 08:19:18 -0800
+Subject: iwlwifi: do not re-configure HT40 after associated
+
+From: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+
+commit 34a5b4b6af104cf18eb50748509528b9bdbc4036 upstream.
+
+The ht40 setting should not change after association unless channel switch
+
+This fix a problem we are seeing which cause uCode assert because driver
+sending invalid information and make uCode confuse
+
+Here is the firmware assert message:
+kernel: iwlagn 0000:03:00.0: Microcode SW error detected. Restarting 0x82000000.
+kernel: iwlagn 0000:03:00.0: Loaded firmware version: 17.168.5.3 build 42301
+kernel: iwlagn 0000:03:00.0: Start IWL Error Log Dump:
+kernel: iwlagn 0000:03:00.0: Status: 0x000512E4, count: 6
+kernel: iwlagn 0000:03:00.0: 0x00002078 | ADVANCED_SYSASSERT
+kernel: iwlagn 0000:03:00.0: 0x00009514 | uPc
+kernel: iwlagn 0000:03:00.0: 0x00009496 | branchlink1
+kernel: iwlagn 0000:03:00.0: 0x00009496 | branchlink2
+kernel: iwlagn 0000:03:00.0: 0x0000D1F2 | interruptlink1
+kernel: iwlagn 0000:03:00.0: 0x00000000 | interruptlink2
+kernel: iwlagn 0000:03:00.0: 0x01008035 | data1
+kernel: iwlagn 0000:03:00.0: 0x0000C90F | data2
+kernel: iwlagn 0000:03:00.0: 0x000005A7 | line
+kernel: iwlagn 0000:03:00.0: 0x5080B520 | beacon time
+kernel: iwlagn 0000:03:00.0: 0xCC515AE0 | tsf low
+kernel: iwlagn 0000:03:00.0: 0x00000003 | tsf hi
+kernel: iwlagn 0000:03:00.0: 0x00000000 | time gp1
+kernel: iwlagn 0000:03:00.0: 0x29703BF0 | time gp2
+kernel: iwlagn 0000:03:00.0: 0x00000000 | time gp3
+kernel: iwlagn 0000:03:00.0: 0x000111A8 | uCode version
+kernel: iwlagn 0000:03:00.0: 0x000000B0 | hw version
+kernel: iwlagn 0000:03:00.0: 0x00480303 | board version
+kernel: iwlagn 0000:03:00.0: 0x09E8004E | hcmd
+kernel: iwlagn 0000:03:00.0: CSR values:
+kernel: iwlagn 0000:03:00.0: (2nd byte of CSR_INT_COALESCING is CSR_INT_PERIODIC_REG)
+kernel: iwlagn 0000:03:00.0: CSR_HW_IF_CONFIG_REG: 0X00480303
+kernel: iwlagn 0000:03:00.0: CSR_INT_COALESCING: 0X0000ff40
+kernel: iwlagn 0000:03:00.0: CSR_INT: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_INT_MASK: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_FH_INT_STATUS: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_GPIO_IN: 0X00000030
+kernel: iwlagn 0000:03:00.0: CSR_RESET: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_GP_CNTRL: 0X080403c5
+kernel: iwlagn 0000:03:00.0: CSR_HW_REV: 0X000000b0
+kernel: iwlagn 0000:03:00.0: CSR_EEPROM_REG: 0X07d60ffd
+kernel: iwlagn 0000:03:00.0: CSR_EEPROM_GP: 0X90000001
+kernel: iwlagn 0000:03:00.0: CSR_OTP_GP_REG: 0X00030001
+kernel: iwlagn 0000:03:00.0: CSR_GIO_REG: 0X00080044
+kernel: iwlagn 0000:03:00.0: CSR_GP_UCODE_REG: 0X000093bb
+kernel: iwlagn 0000:03:00.0: CSR_GP_DRIVER_REG: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_UCODE_DRV_GP1: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_UCODE_DRV_GP2: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_LED_REG: 0X00000078
+kernel: iwlagn 0000:03:00.0: CSR_DRAM_INT_TBL_REG: 0X88214dd2
+kernel: iwlagn 0000:03:00.0: CSR_GIO_CHICKEN_BITS: 0X27800200
+kernel: iwlagn 0000:03:00.0: CSR_ANA_PLL_CFG: 0X00000000
+kernel: iwlagn 0000:03:00.0: CSR_HW_REV_WA_REG: 0X0001001a
+kernel: iwlagn 0000:03:00.0: CSR_DBG_HPET_MEM_REG: 0Xffff0010
+kernel: iwlagn 0000:03:00.0: FH register values:
+kernel: iwlagn 0000:03:00.0: FH_RSCSR_CHNL0_STTS_WPTR_REG: 0X21316d00
+kernel: iwlagn 0000:03:00.0: FH_RSCSR_CHNL0_RBDCB_BASE_REG: 0X021479c0
+kernel: iwlagn 0000:03:00.0: FH_RSCSR_CHNL0_WPTR: 0X00000060
+kernel: iwlagn 0000:03:00.0: FH_MEM_RCSR_CHNL0_CONFIG_REG: 0X80819104
+kernel: iwlagn 0000:03:00.0: FH_MEM_RSSR_SHARED_CTRL_REG: 0X000000fc
+kernel: iwlagn 0000:03:00.0: FH_MEM_RSSR_RX_STATUS_REG: 0X07030000
+kernel: iwlagn 0000:03:00.0: FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV: 0X00000000
+kernel: iwlagn 0000:03:00.0: FH_TSSR_TX_STATUS_REG: 0X07ff0001
+kernel: iwlagn 0000:03:00.0: FH_TSSR_TX_ERROR_REG: 0X00000000
+kernel: iwlagn 0000:03:00.0: Start IWL Event Log Dump: display last 20 entries
+kernel: ------------[ cut here ]------------
+WARNING: at net/mac80211/util.c:1208 ieee80211_reconfig+0x1f1/0x407()
+kernel: Hardware name: 4290W4H
+kernel: Pid: 1896, comm: kworker/0:0 Not tainted 3.1.0 #2
+kernel: Call Trace:
+kernel: [<ffffffff81036558>] ? warn_slowpath_common+0x73/0x87
+kernel: [<ffffffff813b8966>] ? ieee80211_reconfig+0x1f1/0x407
+kernel: [<ffffffff8139e8dc>] ? ieee80211_recalc_smps_work+0x32/0x32
+kernel: [<ffffffff8139e95a>] ? ieee80211_restart_work+0x7e/0x87
+kernel: [<ffffffff810472fa>] ? process_one_work+0x1c8/0x2e3
+kernel: [<ffffffff810480c9>] ? worker_thread+0x17a/0x23a
+kernel: [<ffffffff81047f4f>] ? manage_workers.clone.18+0x15b/0x15b
+kernel: [<ffffffff81047f4f>] ? manage_workers.clone.18+0x15b/0x15b
+kernel: [<ffffffff8104ba97>] ? kthread+0x7a/0x82
+kernel: [<ffffffff813d21b4>] ? kernel_thread_helper+0x4/0x10
+kernel: [<ffffffff8104ba1d>] ? kthread_flush_work_fn+0x11/0x11
+kernel: [<ffffffff813d21b0>] ? gs_change+0xb/0xb
+
+Reported-by: Udo Steinberg <udo@hypervisor.org>
+Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ drivers/net/wireless/iwlwifi/iwl-agn-rxon.c | 36 +++++++++++++++++-----------
+ drivers/net/wireless/iwlwifi/iwl-agn.c | 18 ++------------
+ drivers/net/wireless/iwlwifi/iwl-agn.h | 2 +
+ 3 files changed, 28 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -411,6 +411,24 @@ int iwlagn_commit_rxon(struct iwl_priv *
+ return 0;
+ }
+
++void iwlagn_config_ht40(struct ieee80211_conf *conf,
++ struct iwl_rxon_context *ctx)
++{
++ if (conf_is_ht40_minus(conf)) {
++ ctx->ht.extension_chan_offset =
++ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
++ ctx->ht.is_40mhz = true;
++ } else if (conf_is_ht40_plus(conf)) {
++ ctx->ht.extension_chan_offset =
++ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
++ ctx->ht.is_40mhz = true;
++ } else {
++ ctx->ht.extension_chan_offset =
++ IEEE80211_HT_PARAM_CHA_SEC_NONE;
++ ctx->ht.is_40mhz = false;
++ }
++}
++
+ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+ {
+ struct iwl_priv *priv = hw->priv;
+@@ -470,19 +488,11 @@ int iwlagn_mac_config(struct ieee80211_h
+ ctx->ht.enabled = conf_is_ht(conf);
+
+ if (ctx->ht.enabled) {
+- if (conf_is_ht40_minus(conf)) {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+- ctx->ht.is_40mhz = true;
+- } else if (conf_is_ht40_plus(conf)) {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+- ctx->ht.is_40mhz = true;
+- } else {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_NONE;
+- ctx->ht.is_40mhz = false;
+- }
++ /* if HT40 is used, it should not change
++ * after associated except channel switch */
++ if (iwl_is_associated_ctx(ctx) &&
++ !ctx->ht.is_40mhz)
++ iwlagn_config_ht40(conf, ctx);
+ } else
+ ctx->ht.is_40mhz = false;
+
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -2872,21 +2872,9 @@ static void iwlagn_mac_channel_switch(st
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+- if (ctx->ht.enabled) {
+- if (conf_is_ht40_minus(conf)) {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+- ctx->ht.is_40mhz = true;
+- } else if (conf_is_ht40_plus(conf)) {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+- ctx->ht.is_40mhz = true;
+- } else {
+- ctx->ht.extension_chan_offset =
+- IEEE80211_HT_PARAM_CHA_SEC_NONE;
+- ctx->ht.is_40mhz = false;
+- }
+- } else
++ if (ctx->ht.enabled)
++ iwlagn_config_ht40(conf, ctx);
++ else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
+@@ -152,6 +152,8 @@ void iwlagn_bss_info_changed(struct ieee
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
++void iwlagn_config_ht40(struct ieee80211_conf *conf,
++ struct iwl_rxon_context *ctx);
+
+ /* uCode */
+ void iwlagn_rx_calib_result(struct iwl_priv *priv,
--- /dev/null
+From a855b84c3d8c73220d4d3cd392a7bee7c83de70e Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 18 Nov 2011 10:55:35 -0800
+Subject: percpu: fix chunk range calculation
+
+From: Tejun Heo <tj@kernel.org>
+
+commit a855b84c3d8c73220d4d3cd392a7bee7c83de70e upstream.
+
+Percpu allocator recorded the cpus which map to the first and last
+units in pcpu_first/last_unit_cpu respectively and used them to
+determine the address range of a chunk - e.g. it assumed that the
+first unit has the lowest address in a chunk while the last unit has
+the highest address.
+
+This simply isn't true. Groups in a chunk can have arbitrary positive
+or negative offsets from the previous one and there is no guarantee
+that the first unit occupies the lowest offset while the last one the
+highest.
+
+Fix it by actually comparing unit offsets to determine cpus occupying
+the lowest and highest offsets. Also, rename pcu_first/last_unit_cpu
+to pcpu_low/high_unit_cpu to avoid confusion.
+
+The chunk address range is used to flush cache on vmalloc area
+map/unmap and decide whether a given address is in the first chunk by
+per_cpu_ptr_to_phys() and the bug was discovered by invalid
+per_cpu_ptr_to_phys() translation for crash_note.
+
+Kudos to Dave Young for tracking down the problem.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: WANG Cong <xiyou.wangcong@gmail.com>
+Reported-by: Dave Young <dyoung@redhat.com>
+Tested-by: Dave Young <dyoung@redhat.com>
+LKML-Reference: <4EC21F67.10905@redhat.com>
+Signed-off-by: Thomas Renninger <trenn@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/percpu-vm.c | 12 ++++++------
+ mm/percpu.c | 34 ++++++++++++++++++++--------------
+ 2 files changed, 26 insertions(+), 20 deletions(-)
+
+--- a/mm/percpu-vm.c
++++ b/mm/percpu-vm.c
+@@ -143,8 +143,8 @@ static void pcpu_pre_unmap_flush(struct
+ int page_start, int page_end)
+ {
+ flush_cache_vunmap(
+- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
++ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
++ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
+ }
+
+ static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
+@@ -206,8 +206,8 @@ static void pcpu_post_unmap_tlb_flush(st
+ int page_start, int page_end)
+ {
+ flush_tlb_kernel_range(
+- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
++ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
++ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
+ }
+
+ static int __pcpu_map_pages(unsigned long addr, struct page **pages,
+@@ -284,8 +284,8 @@ static void pcpu_post_map_flush(struct p
+ int page_start, int page_end)
+ {
+ flush_cache_vmap(
+- pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
+- pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
++ pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
++ pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
+ }
+
+ /**
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -116,9 +116,9 @@ static int pcpu_atom_size __read_mostly;
+ static int pcpu_nr_slots __read_mostly;
+ static size_t pcpu_chunk_struct_size __read_mostly;
+
+-/* cpus with the lowest and highest unit numbers */
+-static unsigned int pcpu_first_unit_cpu __read_mostly;
+-static unsigned int pcpu_last_unit_cpu __read_mostly;
++/* cpus with the lowest and highest unit addresses */
++static unsigned int pcpu_low_unit_cpu __read_mostly;
++static unsigned int pcpu_high_unit_cpu __read_mostly;
+
+ /* the address of the first chunk which starts with the kernel static area */
+ void *pcpu_base_addr __read_mostly;
+@@ -984,19 +984,19 @@ phys_addr_t per_cpu_ptr_to_phys(void *ad
+ {
+ void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+ bool in_first_chunk = false;
+- unsigned long first_start, first_end;
++ unsigned long first_low, first_high;
+ unsigned int cpu;
+
+ /*
+- * The following test on first_start/end isn't strictly
++ * The following test on unit_low/high isn't strictly
+ * necessary but will speed up lookups of addresses which
+ * aren't in the first chunk.
+ */
+- first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
+- first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
+- pcpu_unit_pages);
+- if ((unsigned long)addr >= first_start &&
+- (unsigned long)addr < first_end) {
++ first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
++ first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
++ pcpu_unit_pages);
++ if ((unsigned long)addr >= first_low &&
++ (unsigned long)addr < first_high) {
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+
+@@ -1233,7 +1233,9 @@ int __init pcpu_setup_first_chunk(const
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ unit_map[cpu] = UINT_MAX;
+- pcpu_first_unit_cpu = NR_CPUS;
++
++ pcpu_low_unit_cpu = NR_CPUS;
++ pcpu_high_unit_cpu = NR_CPUS;
+
+ for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
+ const struct pcpu_group_info *gi = &ai->groups[group];
+@@ -1253,9 +1255,13 @@ int __init pcpu_setup_first_chunk(const
+ unit_map[cpu] = unit + i;
+ unit_off[cpu] = gi->base_offset + i * ai->unit_size;
+
+- if (pcpu_first_unit_cpu == NR_CPUS)
+- pcpu_first_unit_cpu = cpu;
+- pcpu_last_unit_cpu = cpu;
++ /* determine low/high unit_cpu */
++ if (pcpu_low_unit_cpu == NR_CPUS ||
++ unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
++ pcpu_low_unit_cpu = cpu;
++ if (pcpu_high_unit_cpu == NR_CPUS ||
++ unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
++ pcpu_high_unit_cpu = cpu;
+ }
+ }
+ pcpu_nr_units = unit;
mm-vmalloc-check-for-page-allocation-failure-before-vmlist-insertion.patch
fix-apparmor-dereferencing-potentially-freed-dentry-sanitize-__d_path-api.patch
target-handle-0-correctly-in-transport_get_sectors_6.patch
+intel-iommu-fix-return-value-of-iommu_unmap-api.patch
+intel-iommu-set-iommu_superpage-on-vm-domains-to-lowest-common-denominator.patch
+intel-iommu-fix-superpage-support-in-pfn_to_dma_pte.patch
+percpu-fix-chunk-range-calculation.patch
+iwlwifi-do-not-re-configure-ht40-after-associated.patch
+xfrm-fix-key-lengths-for-rfc3686-ctr-aes.patch
--- /dev/null
+From 4203223a1aed862b4445fdcd260d6139603a51d9 Mon Sep 17 00:00:00 2001
+From: Tushar Gohad <tgohad@mvista.com>
+Date: Thu, 28 Jul 2011 10:36:20 +0000
+Subject: xfrm: Fix key lengths for rfc3686(ctr(aes))
+
+From: Tushar Gohad <tgohad@mvista.com>
+
+commit 4203223a1aed862b4445fdcd260d6139603a51d9 upstream.
+
+Fix the min and max bit lengths for AES-CTR (RFC3686) keys.
+The number of bits in key spec is the key length (128/256)
+plus 32 bits of nonce.
+
+This change takes care of the "Invalid key length" errors
+reported by setkey when specifying 288 bit keys for aes-ctr.
+
+Signed-off-by: Tushar Gohad <tgohad@mvista.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Calvin Owens <jcalvinowens@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/net/xfrm/xfrm_algo.c
++++ b/net/xfrm/xfrm_algo.c
+@@ -462,8 +462,8 @@ static struct xfrm_algo_desc ealg_list[] = {
+ .desc = {
+ .sadb_alg_id = SADB_X_EALG_AESCTR,
+ .sadb_alg_ivlen = 8,
+- .sadb_alg_minbits = 128,
+- .sadb_alg_maxbits = 256
++ .sadb_alg_minbits = 160,
++ .sadb_alg_maxbits = 288
+ }
+ },
+ };