]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Dec 2013 18:44:30 +0000 (10:44 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Dec 2013 18:44:30 +0000 (10:44 -0800)
added patches:
drivers-rtc-rtc-at91rm9200.c-correct-alarm-over-day-month-wrap.patch
drm-i915-fix-pipe-csc-post-offset-calculation.patch
iommu-arm-smmu-use-mutex-instead-of-spinlock-for-locking-page-tables.patch
mm-memcg-do-not-allow-task-about-to-oom-kill-to-bypass-the-limit.patch
mm-memcg-do-not-declare-oom-from-__gfp_nofail-allocations.patch
mm-memcg-fix-race-condition-between-memcg-teardown-and-swapin.patch
partially-revert-mtd-nand-pxa3xx-introduce-marvell-armada370-nand-compatible-string.patch
powerpc-fix-pte-page-address-mismatch-in-pgtable-ctor-dtor.patch
regulator-pfuze100-fix-address-of-fabid.patch

queue-3.12/drivers-rtc-rtc-at91rm9200.c-correct-alarm-over-day-month-wrap.patch [new file with mode: 0644]
queue-3.12/drm-i915-fix-pipe-csc-post-offset-calculation.patch [new file with mode: 0644]
queue-3.12/iommu-arm-smmu-use-mutex-instead-of-spinlock-for-locking-page-tables.patch [new file with mode: 0644]
queue-3.12/mm-memcg-do-not-allow-task-about-to-oom-kill-to-bypass-the-limit.patch [new file with mode: 0644]
queue-3.12/mm-memcg-do-not-declare-oom-from-__gfp_nofail-allocations.patch [new file with mode: 0644]
queue-3.12/mm-memcg-fix-race-condition-between-memcg-teardown-and-swapin.patch [new file with mode: 0644]
queue-3.12/partially-revert-mtd-nand-pxa3xx-introduce-marvell-armada370-nand-compatible-string.patch [new file with mode: 0644]
queue-3.12/powerpc-fix-pte-page-address-mismatch-in-pgtable-ctor-dtor.patch [new file with mode: 0644]
queue-3.12/regulator-pfuze100-fix-address-of-fabid.patch [new file with mode: 0644]
queue-3.12/series

diff --git a/queue-3.12/drivers-rtc-rtc-at91rm9200.c-correct-alarm-over-day-month-wrap.patch b/queue-3.12/drivers-rtc-rtc-at91rm9200.c-correct-alarm-over-day-month-wrap.patch
new file mode 100644 (file)
index 0000000..6bb85cd
--- /dev/null
@@ -0,0 +1,33 @@
+From eb3c227289840eed95ddfb0516046f08d8993940 Mon Sep 17 00:00:00 2001
+From: Linus Pizunski <linus@narrativeteam.com>
+Date: Thu, 12 Dec 2013 17:12:23 -0800
+Subject: drivers/rtc/rtc-at91rm9200.c: correct alarm over day/month wrap
+
+From: Linus Pizunski <linus@narrativeteam.com>
+
+commit eb3c227289840eed95ddfb0516046f08d8993940 upstream.
+
+Update month and day of month to the alarm month/day instead of current
+day/month when setting the RTC alarm mask.
+
+Signed-off-by: Linus Pizunski <linus@narrativeteam.com>
+Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rtc/rtc-at91rm9200.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/rtc/rtc-at91rm9200.c
++++ b/drivers/rtc/rtc-at91rm9200.c
+@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct devi
+       at91_alarm_year = tm.tm_year;
++      tm.tm_mon = alrm->time.tm_mon;
++      tm.tm_mday = alrm->time.tm_mday;
+       tm.tm_hour = alrm->time.tm_hour;
+       tm.tm_min = alrm->time.tm_min;
+       tm.tm_sec = alrm->time.tm_sec;
diff --git a/queue-3.12/drm-i915-fix-pipe-csc-post-offset-calculation.patch b/queue-3.12/drm-i915-fix-pipe-csc-post-offset-calculation.patch
new file mode 100644 (file)
index 0000000..717b6c0
--- /dev/null
@@ -0,0 +1,35 @@
+From 32cf0cb0294814cb1ee5d8727e9aac0e9aa80d2e Mon Sep 17 00:00:00 2001
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Date: Thu, 28 Nov 2013 22:10:38 +0200
+Subject: drm/i915: Fix pipe CSC post offset calculation
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 32cf0cb0294814cb1ee5d8727e9aac0e9aa80d2e upstream.
+
+We were miscalculating the pipe CSC post offset for the full->limited
+range conversion. The resulting post offset was double what it was
+supposed to be, which caused blacks to come out grey when using
+limited range output on HSW+.
+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=71769
+Tested-by: Lauri Mylläri <lauri.myllari@gmail.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_display.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5546,7 +5546,7 @@ static void intel_set_pipe_csc(struct dr
+               uint16_t postoff = 0;
+               if (intel_crtc->config.limited_color_range)
+-                      postoff = (16 * (1 << 13) / 255) & 0x1fff;
++                      postoff = (16 * (1 << 12) / 255) & 0x1fff;
+               I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+               I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
diff --git a/queue-3.12/iommu-arm-smmu-use-mutex-instead-of-spinlock-for-locking-page-tables.patch b/queue-3.12/iommu-arm-smmu-use-mutex-instead-of-spinlock-for-locking-page-tables.patch
new file mode 100644 (file)
index 0000000..7a72c9a
--- /dev/null
@@ -0,0 +1,154 @@
+From a44a9791e778d9ccda50d5534028ed4057a9a45b Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 7 Nov 2013 18:47:50 +0000
+Subject: iommu/arm-smmu: use mutex instead of spinlock for locking page tables
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit a44a9791e778d9ccda50d5534028ed4057a9a45b upstream.
+
+When creating IO mappings, we lazily allocate our page tables using the
+standard, non-atomic allocator functions. This presents us with a
+problem, since our page tables are protected with a spinlock.
+
+This patch reworks the smmu_domain lock to use a mutex instead of a
+spinlock. iova_to_phys is then reworked so that it only reads the page
+tables, and can run in a lockless fashion, leaving the mutex to guard
+against concurrent mapping threads.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/arm-smmu.c |   62 +++++++++++++++++++----------------------------
+ 1 file changed, 26 insertions(+), 36 deletions(-)
+
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -392,7 +392,7 @@ struct arm_smmu_domain {
+       struct arm_smmu_cfg             root_cfg;
+       phys_addr_t                     output_mask;
+-      spinlock_t                      lock;
++      struct mutex                    lock;
+ };
+ static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+@@ -897,7 +897,7 @@ static int arm_smmu_domain_init(struct i
+               goto out_free_domain;
+       smmu_domain->root_cfg.pgd = pgd;
+-      spin_lock_init(&smmu_domain->lock);
++      mutex_init(&smmu_domain->lock);
+       domain->priv = smmu_domain;
+       return 0;
+@@ -1134,7 +1134,7 @@ static int arm_smmu_attach_dev(struct io
+        * Sanity check the domain. We don't currently support domains
+        * that cross between different SMMU chains.
+        */
+-      spin_lock(&smmu_domain->lock);
++      mutex_lock(&smmu_domain->lock);
+       if (!smmu_domain->leaf_smmu) {
+               /* Now that we have a master, we can finalise the domain */
+               ret = arm_smmu_init_domain_context(domain, dev);
+@@ -1149,7 +1149,7 @@ static int arm_smmu_attach_dev(struct io
+                       dev_name(device_smmu->dev));
+               goto err_unlock;
+       }
+-      spin_unlock(&smmu_domain->lock);
++      mutex_unlock(&smmu_domain->lock);
+       /* Looks ok, so add the device to the domain */
+       master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+@@ -1159,7 +1159,7 @@ static int arm_smmu_attach_dev(struct io
+       return arm_smmu_domain_add_master(smmu_domain, master);
+ err_unlock:
+-      spin_unlock(&smmu_domain->lock);
++      mutex_unlock(&smmu_domain->lock);
+       return ret;
+ }
+@@ -1388,7 +1388,7 @@ static int arm_smmu_handle_mapping(struc
+       if (paddr & ~output_mask)
+               return -ERANGE;
+-      spin_lock(&smmu_domain->lock);
++      mutex_lock(&smmu_domain->lock);
+       pgd += pgd_index(iova);
+       end = iova + size;
+       do {
+@@ -1404,7 +1404,7 @@ static int arm_smmu_handle_mapping(struc
+       } while (pgd++, iova != end);
+ out_unlock:
+-      spin_unlock(&smmu_domain->lock);
++      mutex_unlock(&smmu_domain->lock);
+       /* Ensure new page tables are visible to the hardware walker */
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+@@ -1443,44 +1443,34 @@ static size_t arm_smmu_unmap(struct iomm
+ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+                                        dma_addr_t iova)
+ {
+-      pgd_t *pgd;
+-      pud_t *pud;
+-      pmd_t *pmd;
+-      pte_t *pte;
++      pgd_t *pgdp, pgd;
++      pud_t pud;
++      pmd_t pmd;
++      pte_t pte;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+-      struct arm_smmu_device *smmu = root_cfg->smmu;
+-      spin_lock(&smmu_domain->lock);
+-      pgd = root_cfg->pgd;
+-      if (!pgd)
+-              goto err_unlock;
++      pgdp = root_cfg->pgd;
++      if (!pgdp)
++              return 0;
+-      pgd += pgd_index(iova);
+-      if (pgd_none_or_clear_bad(pgd))
+-              goto err_unlock;
++      pgd = *(pgdp + pgd_index(iova));
++      if (pgd_none(pgd))
++              return 0;
+-      pud = pud_offset(pgd, iova);
+-      if (pud_none_or_clear_bad(pud))
+-              goto err_unlock;
++      pud = *pud_offset(&pgd, iova);
++      if (pud_none(pud))
++              return 0;
+-      pmd = pmd_offset(pud, iova);
+-      if (pmd_none_or_clear_bad(pmd))
+-              goto err_unlock;
++      pmd = *pmd_offset(&pud, iova);
++      if (pmd_none(pmd))
++              return 0;
+-      pte = pmd_page_vaddr(*pmd) + pte_index(iova);
++      pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
+       if (pte_none(pte))
+-              goto err_unlock;
+-
+-      spin_unlock(&smmu_domain->lock);
+-      return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
++              return 0;
+-err_unlock:
+-      spin_unlock(&smmu_domain->lock);
+-      dev_warn(smmu->dev,
+-               "invalid (corrupt?) page tables detected for iova 0x%llx\n",
+-               (unsigned long long)iova);
+-      return -EINVAL;
++      return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+ }
+ static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
diff --git a/queue-3.12/mm-memcg-do-not-allow-task-about-to-oom-kill-to-bypass-the-limit.patch b/queue-3.12/mm-memcg-do-not-allow-task-about-to-oom-kill-to-bypass-the-limit.patch
new file mode 100644 (file)
index 0000000..6619ba3
--- /dev/null
@@ -0,0 +1,44 @@
+From 1f14c1ac19aa45118054b6d5425873c5c7fc23a1 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 12 Dec 2013 17:12:35 -0800
+Subject: mm: memcg: do not allow task about to OOM kill to bypass the limit
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 1f14c1ac19aa45118054b6d5425873c5c7fc23a1 upstream.
+
+Commit 4942642080ea ("mm: memcg: handle non-error OOM situations more
+gracefully") allowed tasks that already entered a memcg OOM condition to
+bypass the memcg limit on subsequent allocation attempts hoping this
+would expedite finishing the page fault and executing the kill.
+
+David Rientjes is worried that this breaks memcg isolation guarantees
+and since there is no evidence that the bypass actually speeds up fault
+processing just change it so that these subsequent charge attempts fail
+outright.  The notable exception being __GFP_NOFAIL charges which are
+required to bypass the limit regardless.
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Reported-by: David Rientjes <rientjes@google.com>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Acked-bt: David Rientjes <rientjes@google.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2675,7 +2675,7 @@ static int __mem_cgroup_try_charge(struc
+               goto bypass;
+       if (unlikely(task_in_memcg_oom(current)))
+-              goto bypass;
++              goto nomem;
+       if (gfp_mask & __GFP_NOFAIL)
+               oom = false;
diff --git a/queue-3.12/mm-memcg-do-not-declare-oom-from-__gfp_nofail-allocations.patch b/queue-3.12/mm-memcg-do-not-declare-oom-from-__gfp_nofail-allocations.patch
new file mode 100644 (file)
index 0000000..3a81b2a
--- /dev/null
@@ -0,0 +1,42 @@
+From a0d8b00a3381f9d75764b3377590451cb0b4fe41 Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 12 Dec 2013 17:12:20 -0800
+Subject: mm: memcg: do not declare OOM from __GFP_NOFAIL allocations
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit a0d8b00a3381f9d75764b3377590451cb0b4fe41 upstream.
+
+Commit 84235de394d9 ("fs: buffer: move allocation failure loop into the
+allocator") started recognizing __GFP_NOFAIL in memory cgroups but
+forgot to disable the OOM killer.
+
+Any task that does not fail allocation will also not enter the OOM
+completion path.  So don't declare an OOM state in this case or it'll be
+leaked and the task be able to bypass the limit until the next
+userspace-triggered page fault cleans up the OOM state.
+
+Reported-by: William Dauchy <wdauchy@gmail.com>
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2677,6 +2677,9 @@ static int __mem_cgroup_try_charge(struc
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
++      if (gfp_mask & __GFP_NOFAIL)
++              oom = false;
++
+       /*
+        * We always charge the cgroup the mm_struct belongs to.
+        * The mm_struct's mem_cgroup changes on task migration if the
diff --git a/queue-3.12/mm-memcg-fix-race-condition-between-memcg-teardown-and-swapin.patch b/queue-3.12/mm-memcg-fix-race-condition-between-memcg-teardown-and-swapin.patch
new file mode 100644 (file)
index 0000000..413d1a9
--- /dev/null
@@ -0,0 +1,107 @@
+From 96f1c58d853497a757463e0b57fed140d6858f3a Mon Sep 17 00:00:00 2001
+From: Johannes Weiner <hannes@cmpxchg.org>
+Date: Thu, 12 Dec 2013 17:12:34 -0800
+Subject: mm: memcg: fix race condition between memcg teardown and swapin
+
+From: Johannes Weiner <hannes@cmpxchg.org>
+
+commit 96f1c58d853497a757463e0b57fed140d6858f3a upstream.
+
+There is a race condition between a memcg being torn down and a swapin
+triggered from a different memcg of a page that was recorded to belong
+to the exiting memcg on swapout (with CONFIG_MEMCG_SWAP extension).  The
+result is unreclaimable pages pointing to dead memcgs, which can lead to
+anything from endless loops in later memcg teardown (the page is charged
+to all hierarchical parents but is not on any LRU list) or crashes from
+following the dangling memcg pointer.
+
+Memcgs with tasks in them can not be torn down and usually charges don't
+show up in memcgs without tasks.  Swapin with the CONFIG_MEMCG_SWAP
+extension is the notable exception because it charges the cgroup that
+was recorded as owner during swapout, which may be empty and in the
+process of being torn down when a task in another memcg triggers the
+swapin:
+
+  teardown:                 swapin:
+
+                            lookup_swap_cgroup_id()
+                            rcu_read_lock()
+                            mem_cgroup_lookup()
+                            css_tryget()
+                            rcu_read_unlock()
+  disable css_tryget()
+  call_rcu()
+    offline_css()
+      reparent_charges()
+                            res_counter_charge() (hierarchical!)
+                            css_put()
+                              css_free()
+                            pc->mem_cgroup = dead memcg
+                            add page to dead lru
+
+Add a final reparenting step into css_free() to make sure any such raced
+charges are moved out of the memcg before it's finally freed.
+
+In the longer term it would be cleaner to have the css_tryget() and the
+res_counter charge under the same RCU lock section so that the charge
+reparenting is deferred until the last charge whose tryget succeeded is
+visible.  But this will require more invasive changes that will be
+harder to evaluate and backport into stable, so better defer them to a
+separate change set.
+
+Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c |   36 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -6341,6 +6341,42 @@ static void mem_cgroup_css_offline(struc
+ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
++      /*
++       * XXX: css_offline() would be where we should reparent all
++       * memory to prepare the cgroup for destruction.  However,
++       * memcg does not do css_tryget() and res_counter charging
++       * under the same RCU lock region, which means that charging
++       * could race with offlining.  Offlining only happens to
++       * cgroups with no tasks in them but charges can show up
++       * without any tasks from the swapin path when the target
++       * memcg is looked up from the swapout record and not from the
++       * current task as it usually is.  A race like this can leak
++       * charges and put pages with stale cgroup pointers into
++       * circulation:
++       *
++       * #0                        #1
++       *                           lookup_swap_cgroup_id()
++       *                           rcu_read_lock()
++       *                           mem_cgroup_lookup()
++       *                           css_tryget()
++       *                           rcu_read_unlock()
++       * disable css_tryget()
++       * call_rcu()
++       *   offline_css()
++       *     reparent_charges()
++       *                           res_counter_charge()
++       *                           css_put()
++       *                             css_free()
++       *                           pc->mem_cgroup = dead memcg
++       *                           add page to lru
++       *
++       * The bulk of the charges are still moved in offline_css() to
++       * avoid pinning a lot of pages in case a long-term reference
++       * like a swapout record is deferring the css_free() to long
++       * after offlining.  But this makes sure we catch any charges
++       * made after offlining:
++       */
++      mem_cgroup_reparent_charges(memcg);
+       memcg_destroy_kmem(memcg);
+       __mem_cgroup_free(memcg);
diff --git a/queue-3.12/partially-revert-mtd-nand-pxa3xx-introduce-marvell-armada370-nand-compatible-string.patch b/queue-3.12/partially-revert-mtd-nand-pxa3xx-introduce-marvell-armada370-nand-compatible-string.patch
new file mode 100644 (file)
index 0000000..4091c55
--- /dev/null
@@ -0,0 +1,37 @@
+From 9c59ac616137fb62f6cb3f1219201b09cbcf30be Mon Sep 17 00:00:00 2001
+From: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+Date: Mon, 9 Dec 2013 18:36:26 -0300
+Subject: Partially revert "mtd: nand: pxa3xx: Introduce 'marvell,armada370-nand' compatible string"
+
+From: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+
+commit 9c59ac616137fb62f6cb3f1219201b09cbcf30be upstream.
+
+This partially reverts c0f3b8643a6fa2461d70760ec49d21d2b031d611.
+
+The "armada370-nand" compatible support is not complete, and it was mistake
+to add it. Revert it and postpone the support until the infrastructure is
+in place.
+
+Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+Acked-by: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/pxa3xx_nand.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1241,10 +1241,6 @@ static struct of_device_id pxa3xx_nand_d
+               .compatible = "marvell,pxa3xx-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
+       },
+-      {
+-              .compatible = "marvell,armada370-nand",
+-              .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+-      },
+       {}
+ };
+ MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
diff --git a/queue-3.12/powerpc-fix-pte-page-address-mismatch-in-pgtable-ctor-dtor.patch b/queue-3.12/powerpc-fix-pte-page-address-mismatch-in-pgtable-ctor-dtor.patch
new file mode 100644 (file)
index 0000000..fe7e9bc
--- /dev/null
@@ -0,0 +1,78 @@
+From cf77ee54362a245f9a01f240adce03a06c05eb68 Mon Sep 17 00:00:00 2001
+From: "Hong H. Pham" <hong.pham@windriver.com>
+Date: Sat, 7 Dec 2013 09:06:33 -0500
+Subject: powerpc: Fix PTE page address mismatch in pgtable ctor/dtor
+
+From: "Hong H. Pham" <hong.pham@windriver.com>
+
+commit cf77ee54362a245f9a01f240adce03a06c05eb68 upstream.
+
+In pte_alloc_one(), pgtable_page_ctor() is passed an address that has
+not been converted by page_address() to the newly allocated PTE page.
+
+When the PTE is freed, __pte_free_tlb() calls pgtable_page_dtor()
+with an address to the PTE page that has been converted by page_address().
+The mismatch in the PTE's page address causes pgtable_page_dtor() to access
+invalid memory, so resources for that PTE (such as the page lock) is not
+properly cleaned up.
+
+On PPC32, only SMP kernels are affected.
+
+On PPC64, only SMP kernels with 4K page size are affected.
+
+This bug was introduced by commit d614bb041209fd7cb5e4b35e11a7b2f6ee8f62b8
+"powerpc: Move the pte free routines from common header".
+
+On a preempt-rt kernel, a spinlock is dynamically allocated for each
+PTE in pgtable_page_ctor().  When the PTE is freed, calling
+pgtable_page_dtor() with a mismatched page address causes a memory leak,
+as the pointer to the PTE's spinlock is bogus.
+
+On mainline, there isn't any immediately obvious symptoms, but the
+problem still exists here.
+
+Fixes: d614bb041209fd7c "powerpc: Move the pte free routes from common header"
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Hong H. Pham <hong.pham@windriver.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pgalloc-32.h |    6 ++----
+ arch/powerpc/include/asm/pgalloc-64.h |    6 ++----
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/pgalloc-32.h
++++ b/arch/powerpc/include/asm/pgalloc-32.h
+@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(stru
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+                                 unsigned long address)
+ {
+-      struct page *page = page_address(table);
+-
+       tlb_flush_pgtable(tlb, address);
+-      pgtable_page_dtor(page);
+-      pgtable_free_tlb(tlb, page, 0);
++      pgtable_page_dtor(table);
++      pgtable_free_tlb(tlb, page_address(table), 0);
+ }
+ #endif /* _ASM_POWERPC_PGALLOC_32_H */
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -144,11 +144,9 @@ static inline void pgtable_free_tlb(stru
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+                                 unsigned long address)
+ {
+-      struct page *page = page_address(table);
+-
+       tlb_flush_pgtable(tlb, address);
+-      pgtable_page_dtor(page);
+-      pgtable_free_tlb(tlb, page, 0);
++      pgtable_page_dtor(table);
++      pgtable_free_tlb(tlb, page_address(table), 0);
+ }
+ #else /* if CONFIG_PPC_64K_PAGES */
diff --git a/queue-3.12/regulator-pfuze100-fix-address-of-fabid.patch b/queue-3.12/regulator-pfuze100-fix-address-of-fabid.patch
new file mode 100644 (file)
index 0000000..b46602b
--- /dev/null
@@ -0,0 +1,31 @@
+From a1b6fa85c639ad0d5447d1a5e7d1463bbe29fcd3 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@ingics.com>
+Date: Mon, 9 Dec 2013 15:24:19 +0800
+Subject: regulator: pfuze100: Fix address of FABID
+
+From: Axel Lin <axel.lin@ingics.com>
+
+commit a1b6fa85c639ad0d5447d1a5e7d1463bbe29fcd3 upstream.
+
+According to the datasheet, the address of FABID is 0x4. Fix it.
+
+Signed-off-by: Axel Lin <axel.lin@ingics.com>
+Acked-by: Robin Gong <b38343@freescale.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/pfuze100-regulator.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -38,7 +38,7 @@
+ #define PFUZE100_DEVICEID     0x0
+ #define PFUZE100_REVID                0x3
+-#define PFUZE100_FABID                0x3
++#define PFUZE100_FABID                0x4
+ #define PFUZE100_SW1ABVOL     0x20
+ #define PFUZE100_SW1CVOL      0x2e
index a3e3149823b39e13ebd295274443e1d1ed2d83b8..ae980690c0d1f489b34c530599aaa46f6221234b 100644 (file)
@@ -37,3 +37,12 @@ usb-serial-option-blacklist-interface-1-for-huawei-e173s-6.patch
 usb-option-support-new-huawei-devices.patch
 input-usbtouchscreen-separate-report-and-transmit-buffer-size-handling.patch
 media-af9035-fix-broken-i2c-and-usb-i-o.patch
+powerpc-fix-pte-page-address-mismatch-in-pgtable-ctor-dtor.patch
+drivers-rtc-rtc-at91rm9200.c-correct-alarm-over-day-month-wrap.patch
+mm-memcg-do-not-declare-oom-from-__gfp_nofail-allocations.patch
+mm-memcg-do-not-allow-task-about-to-oom-kill-to-bypass-the-limit.patch
+mm-memcg-fix-race-condition-between-memcg-teardown-and-swapin.patch
+regulator-pfuze100-fix-address-of-fabid.patch
+partially-revert-mtd-nand-pxa3xx-introduce-marvell-armada370-nand-compatible-string.patch
+iommu-arm-smmu-use-mutex-instead-of-spinlock-for-locking-page-tables.patch
+drm-i915-fix-pipe-csc-post-offset-calculation.patch