]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 3 Jan 2012 20:03:19 +0000 (12:03 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 3 Jan 2012 20:03:19 +0000 (12:03 -0800)
added patches:
arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch
arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch
arm-imx-fix-pwm-period-value.patch
drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch
futex-fix-uninterruptible-loop-due-to-gate_area.patch
mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch
oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch
vfs-fix-race-between-cpu-hotplug-and-lglocks.patch
watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch

queue-3.0/arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch [new file with mode: 0644]
queue-3.0/arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch [new file with mode: 0644]
queue-3.0/arm-imx-fix-pwm-period-value.patch [new file with mode: 0644]
queue-3.0/drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch [new file with mode: 0644]
queue-3.0/futex-fix-uninterruptible-loop-due-to-gate_area.patch [new file with mode: 0644]
queue-3.0/mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch [new file with mode: 0644]
queue-3.0/oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch [new file with mode: 0644]
queue-3.0/series
queue-3.0/vfs-fix-race-between-cpu-hotplug-and-lglocks.patch [new file with mode: 0644]
queue-3.0/watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch [new file with mode: 0644]

diff --git a/queue-3.0/arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch b/queue-3.0/arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch
new file mode 100644 (file)
index 0000000..2d419f9
--- /dev/null
@@ -0,0 +1,45 @@
+From b63038d6f4ca5d1849ce01d9fc5bb9cb426dec73 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+Date: Tue, 13 Dec 2011 16:51:04 +0100
+Subject: ARM: 7214/1: mmc: mmci: Fixup handling of MCI_STARTBITERR
+
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+
+commit b63038d6f4ca5d1849ce01d9fc5bb9cb426dec73 upstream.
+
+The interrupt was previously enabled and then correctly cleared.
+Now we also handle it correctly.
+
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Ulf Hansson <ulf.hansson@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/mmci.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -557,7 +557,8 @@ mmci_data_irq(struct mmci_host *host, st
+             unsigned int status)
+ {
+       /* First check for errors */
+-      if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
++      if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++                    MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+               u32 remain, success;
+               /* Terminate the DMA transfer */
+@@ -837,8 +838,9 @@ static irqreturn_t mmci_irq(int irq, voi
+               dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+               data = host->data;
+-              if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
+-                            MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
++              if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
++                            MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
++                            MCI_DATABLOCKEND) && data)
+                       mmci_data_irq(host, data, status);
+               cmd = host->cmd;
diff --git a/queue-3.0/arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch b/queue-3.0/arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch
new file mode 100644 (file)
index 0000000..91f7bbb
--- /dev/null
@@ -0,0 +1,39 @@
+From 3b6e3c73851a9a4b0e6ed9d378206341dd65e8a5 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+Date: Tue, 13 Dec 2011 16:58:43 +0100
+Subject: ARM: 7220/1: mmc: mmci: Fixup error handling for dma
+
+From: Ulf Hansson <ulf.hansson@stericsson.com>
+
+commit 3b6e3c73851a9a4b0e6ed9d378206341dd65e8a5 upstream.
+
+When getting a cmd irq during an ongoing data transfer
+with dma, the dma job were never terminated. This is now
+corrected.
+
+Tested-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Per Forlin <per.forlin@stericsson.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@stericsson.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mmc/host/mmci.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -637,8 +637,12 @@ mmci_cmd_irq(struct mmci_host *host, str
+       }
+       if (!cmd->data || cmd->error) {
+-              if (host->data)
++              if (host->data) {
++                      /* Terminate the DMA transfer */
++                      if (dma_inprogress(host))
++                              mmci_dma_data_error(host);
+                       mmci_stop_data(host);
++              }
+               mmci_request_end(host, cmd->mrq);
+       } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+               mmci_start_data(host, cmd->data);
diff --git a/queue-3.0/arm-imx-fix-pwm-period-value.patch b/queue-3.0/arm-imx-fix-pwm-period-value.patch
new file mode 100644 (file)
index 0000000..5aeaa3e
--- /dev/null
@@ -0,0 +1,40 @@
+From 5776ac2eb33164c77cdb4d2b48feee15616eaba3 Mon Sep 17 00:00:00 2001
+From: Jason Chen <jason.chen@linaro.org>
+Date: Mon, 19 Dec 2011 11:23:28 +0800
+Subject: ARM:imx:fix pwm period value
+
+From: Jason Chen <jason.chen@linaro.org>
+
+commit 5776ac2eb33164c77cdb4d2b48feee15616eaba3 upstream.
+
+According to imx pwm RM, the real period value should be
+PERIOD value in PWMPR plus 2.
+
+PWMO (Hz) = PCLK(Hz) / (period +2)
+
+Signed-off-by: Jason Chen <jason.chen@linaro.org>
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/plat-mxc/pwm.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/arm/plat-mxc/pwm.c
++++ b/arch/arm/plat-mxc/pwm.c
+@@ -77,6 +77,15 @@ int pwm_config(struct pwm_device *pwm, i
+               do_div(c, period_ns);
+               duty_cycles = c;
++              /*
++               * according to imx pwm RM, the real period value should be
++               * PERIOD value in PWMPR plus 2.
++               */
++              if (period_cycles > 2)
++                      period_cycles -= 2;
++              else
++                      period_cycles = 0;
++
+               writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
+               writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
diff --git a/queue-3.0/drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch b/queue-3.0/drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch
new file mode 100644 (file)
index 0000000..91cdc95
--- /dev/null
@@ -0,0 +1,44 @@
+From 77e00f2ea94abee1ad13bdfde19cf7aa25992b0e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 21 Dec 2011 11:58:17 -0500
+Subject: drm/radeon/kms: bail on BTC parts if MC ucode is missing
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 77e00f2ea94abee1ad13bdfde19cf7aa25992b0e upstream.
+
+We already do this for cayman, need to also do it for
+BTC parts.  The default memory and voltage setup is not
+adequate for advanced operation.  Continuing will
+result in an unusable display.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/evergreen.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3257,6 +3257,18 @@ int evergreen_init(struct radeon_device
+                       rdev->accel_working = false;
+               }
+       }
++
++      /* Don't start up if the MC ucode is missing on BTC parts.
++       * The default clocks and voltages before the MC ucode
++       * is loaded are not suffient for advanced operations.
++       */
++      if (ASIC_IS_DCE5(rdev)) {
++              if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
++                      DRM_ERROR("radeon: MC ucode required for NI+.\n");
++                      return -EINVAL;
++              }
++      }
++
+       return 0;
+ }
diff --git a/queue-3.0/futex-fix-uninterruptible-loop-due-to-gate_area.patch b/queue-3.0/futex-fix-uninterruptible-loop-due-to-gate_area.patch
new file mode 100644 (file)
index 0000000..d483d01
--- /dev/null
@@ -0,0 +1,78 @@
+From e6780f7243eddb133cc20ec37fa69317c218b709 Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Sat, 31 Dec 2011 11:44:01 -0800
+Subject: futex: Fix uninterruptible loop due to gate_area
+
+From: Hugh Dickins <hughd@google.com>
+
+commit e6780f7243eddb133cc20ec37fa69317c218b709 upstream.
+
+It was found (by Sasha) that if you use a futex located in the gate
+area we get stuck in an uninterruptible infinite loop, much like the
+ZERO_PAGE issue.
+
+While looking at this problem, PeterZ realized you'll get into similar
+trouble when hitting any install_special_pages() mapping.  And are there
+still drivers setting up their own special mmaps without page->mapping,
+and without special VM or pte flags to make get_user_pages fail?
+
+In most cases, if page->mapping is NULL, we do not need to retry at all:
+Linus points out that even /proc/sys/vm/drop_caches poses no problem,
+because it ends up using remove_mapping(), which takes care not to
+interfere when the page reference count is raised.
+
+But there is still one case which does need a retry: if memory pressure
+called shmem_writepage in between get_user_pages_fast dropping page
+table lock and our acquiring page lock, then the page gets switched from
+filecache to swapcache (and ->mapping set to NULL) whatever the refcount.
+Fault it back in to get the page->mapping needed for key->shared.inode.
+
+Reported-by: Sasha Levin <levinsasha928@gmail.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/futex.c |   28 ++++++++++++++++++++--------
+ 1 file changed, 20 insertions(+), 8 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -314,17 +314,29 @@ again:
+ #endif
+       lock_page(page_head);
++
++      /*
++       * If page_head->mapping is NULL, then it cannot be a PageAnon
++       * page; but it might be the ZERO_PAGE or in the gate area or
++       * in a special mapping (all cases which we are happy to fail);
++       * or it may have been a good file page when get_user_pages_fast
++       * found it, but truncated or holepunched or subjected to
++       * invalidate_complete_page2 before we got the page lock (also
++       * cases which we are happy to fail).  And we hold a reference,
++       * so refcount care in invalidate_complete_page's remove_mapping
++       * prevents drop_caches from setting mapping to NULL beneath us.
++       *
++       * The case we do have to guard against is when memory pressure made
++       * shmem_writepage move it from filecache to swapcache beneath us:
++       * an unlikely race, but we do need to retry for page_head->mapping.
++       */
+       if (!page_head->mapping) {
++              int shmem_swizzled = PageSwapCache(page_head);
+               unlock_page(page_head);
+               put_page(page_head);
+-              /*
+-              * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+-              * trying to find one. RW mapping would have COW'd (and thus
+-              * have a mapping) so this page is RO and won't ever change.
+-              */
+-              if ((page_head == ZERO_PAGE(address)))
+-                      return -EFAULT;
+-              goto again;
++              if (shmem_swizzled)
++                      goto again;
++              return -EFAULT;
+       }
+       /*
diff --git a/queue-3.0/mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch b/queue-3.0/mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch
new file mode 100644 (file)
index 0000000..0888843
--- /dev/null
@@ -0,0 +1,41 @@
+From b0365c8d0cb6e79eb5f21418ae61ab511f31b575 Mon Sep 17 00:00:00 2001
+From: Hillf Danton <dhillf@gmail.com>
+Date: Wed, 28 Dec 2011 15:57:16 -0800
+Subject: mm: hugetlb: fix non-atomic enqueue of huge page
+
+From: Hillf Danton <dhillf@gmail.com>
+
+commit b0365c8d0cb6e79eb5f21418ae61ab511f31b575 upstream.
+
+If a huge page is enqueued under the protection of hugetlb_lock, then the
+operation is atomic and safe.
+
+Signed-off-by: Hillf Danton <dhillf@gmail.com>
+Reviewed-by: Michal Hocko <mhocko@suse.cz>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/hugetlb.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -901,7 +901,6 @@ retry:
+       h->resv_huge_pages += delta;
+       ret = 0;
+-      spin_unlock(&hugetlb_lock);
+       /* Free the needed pages to the hugetlb pool */
+       list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               if ((--needed) < 0)
+@@ -915,6 +914,7 @@ retry:
+               VM_BUG_ON(page_count(page));
+               enqueue_huge_page(h, page);
+       }
++      spin_unlock(&hugetlb_lock);
+       /* Free unnecessary surplus pages to the buddy allocator */
+ free:
diff --git a/queue-3.0/oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch b/queue-3.0/oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch
new file mode 100644 (file)
index 0000000..4e2afdb
--- /dev/null
@@ -0,0 +1,64 @@
+From 55205c916e179e09773d98d290334d319f45ac6b Mon Sep 17 00:00:00 2001
+From: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+Date: Thu, 22 Dec 2011 16:15:40 +0100
+Subject: oprofile, arm/sh: Fix oprofile_arch_exit() linkage issue
+
+From: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+
+commit 55205c916e179e09773d98d290334d319f45ac6b upstream.
+
+This change fixes a linking problem, which happens if oprofile
+is selected to be compiled as built-in:
+
+  `oprofile_arch_exit' referenced in section `.init.text' of
+  arch/arm/oprofile/built-in.o: defined in discarded section
+  `.exit.text' of arch/arm/oprofile/built-in.o
+
+The problem is appeared after commit 87121ca504, which
+introduced oprofile_arch_exit() calls from __init function. Note
+that the aforementioned commit has been backported to stable
+branches, and the problem is known to be reproduced at least
+with 3.0.13 and 3.1.5 kernels.
+
+Signed-off-by: Vladimir Zapolskiy <vladimir.zapolskiy@nokia.com>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: oprofile-list <oprofile-list@lists.sourceforge.net>
+Link: http://lkml.kernel.org/r/20111222151540.GB16765@erda.amd.com
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/oprofile/common.c |    2 +-
+ arch/sh/oprofile/common.c  |    4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct opr
+       return oprofile_perf_init(ops);
+ }
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+       oprofile_perf_exit();
+ }
+--- a/arch/sh/oprofile/common.c
++++ b/arch/sh/oprofile/common.c
+@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct opr
+       return oprofile_perf_init(ops);
+ }
+-void __exit oprofile_arch_exit(void)
++void oprofile_arch_exit(void)
+ {
+       oprofile_perf_exit();
+       kfree(sh_pmu_op_name);
+@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct opr
+       ops->backtrace = sh_backtrace;
+       return -ENODEV;
+ }
+-void __exit oprofile_arch_exit(void) {}
++void oprofile_arch_exit(void) {}
+ #endif /* CONFIG_HW_PERF_EVENTS */
index 94dbd5c9408f2439c90bf778144eb07ad032d8cc..493f73d570490a34d8c2613c7a1cbbb736d7f2e2 100644 (file)
@@ -29,3 +29,12 @@ ath9k-fix-max-phy-rate-at-rate-control-init.patch
 iwlwifi-do-not-set-the-sequence-control-bit-is-not-needed.patch
 iwlwifi-allow-to-switch-to-ht40-if-not-associated.patch
 memcg-keep-root-group-unchanged-if-creation-fails.patch
+vfs-fix-race-between-cpu-hotplug-and-lglocks.patch
+arm-imx-fix-pwm-period-value.patch
+arm-7214-1-mmc-mmci-fixup-handling-of-mci_startbiterr.patch
+arm-7220-1-mmc-mmci-fixup-error-handling-for-dma.patch
+oprofile-arm-sh-fix-oprofile_arch_exit-linkage-issue.patch
+futex-fix-uninterruptible-loop-due-to-gate_area.patch
+watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch
+drm-radeon-kms-bail-on-btc-parts-if-mc-ucode-is-missing.patch
+mm-hugetlb-fix-non-atomic-enqueue-of-huge-page.patch
diff --git a/queue-3.0/vfs-fix-race-between-cpu-hotplug-and-lglocks.patch b/queue-3.0/vfs-fix-race-between-cpu-hotplug-and-lglocks.patch
new file mode 100644 (file)
index 0000000..bb0aa34
--- /dev/null
@@ -0,0 +1,153 @@
+From e30e2fdfe56288576ee9e04dbb06b4bd5f282203 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+Date: Thu, 22 Dec 2011 02:45:29 +0530
+Subject: VFS: Fix race between CPU hotplug and lglocks
+
+From: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
+
+commit e30e2fdfe56288576ee9e04dbb06b4bd5f282203 upstream.
+
+Currently, the *_global_[un]lock_online() routines are not at all synchronized
+with CPU hotplug. Soft-lockups detected as a consequence of this race was
+reported earlier at https://lkml.org/lkml/2011/8/24/185. (Thanks to Cong Meng
+for finding out that the root-cause of this issue is the race condition
+between br_write_[un]lock() and CPU hotplug, which results in the lock states
+getting messed up).
+
+Fixing this race by just adding {get,put}_online_cpus() at appropriate places
+in *_global_[un]lock_online() is not a good option, because, then suddenly
+br_write_[un]lock() would become blocking, whereas they have been kept as
+non-blocking all this time, and we would want to keep them that way.
+
+So, overall, we want to ensure 3 things:
+1. br_write_lock() and br_write_unlock() must remain as non-blocking.
+2. The corresponding lock and unlock of the per-cpu spinlocks must not happen
+   for different sets of CPUs.
+3. Either prevent any new CPU online operation in between this lock-unlock, or
+   ensure that the newly onlined CPU does not proceed with its corresponding
+   per-cpu spinlock unlocked.
+
+To achieve all this:
+(a) We introduce a new spinlock that is taken by the *_global_lock_online()
+    routine and released by the *_global_unlock_online() routine.
+(b) We register a callback for CPU hotplug notifications, and this callback
+    takes the same spinlock as above.
+(c) We maintain a bitmap which is close to the cpu_online_mask, and once it is
+    initialized in the lock_init() code, all future updates to it are done in
+    the callback, under the above spinlock.
+(d) The above bitmap is used (instead of cpu_online_mask) while locking and
+    unlocking the per-cpu locks.
+
+The callback takes the spinlock upon the CPU_UP_PREPARE event. So, if the
+br_write_lock-unlock sequence is in progress, the callback keeps spinning,
+thus preventing the CPU online operation till the lock-unlock sequence is
+complete. This takes care of requirement (3).
+
+The bitmap that we maintain remains unmodified throughout the lock-unlock
+sequence, since all updates to it are managed by the callback, which takes
+the same spinlock as the one taken by the lock code and released only by the
+unlock routine. Combining this with (d) above, satisfies requirement (2).
+
+Overall, since we use a spinlock (mentioned in (a)) to prevent CPU hotplug
+operations from racing with br_write_lock-unlock, requirement (1) is also
+taken care of.
+
+By the way, it is to be noted that a CPU offline operation can actually run
+in parallel with our lock-unlock sequence, because our callback doesn't react
+to notifications earlier than CPU_DEAD (in order to maintain our bitmap
+properly). And this means, since we use our own bitmap (which is stale, on
+purpose) during the lock-unlock sequence, we could end up unlocking the
+per-cpu lock of an offline CPU (because we had locked it earlier, when the
+CPU was online), in order to satisfy requirement (2). But this is harmless,
+though it looks a bit awkward.
+
+Debugged-by: Cong Meng <mc@linux.vnet.ibm.com>
+Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/lglock.h |   36 ++++++++++++++++++++++++++++++++----
+ 1 file changed, 32 insertions(+), 4 deletions(-)
+
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -22,6 +22,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/lockdep.h>
+ #include <linux/percpu.h>
++#include <linux/cpu.h>
+ /* can make br locks by using local lock for read side, global lock for write */
+ #define br_lock_init(name)    name##_lock_init()
+@@ -72,9 +73,31 @@
+ #define DEFINE_LGLOCK(name)                                           \
+                                                                       \
++ DEFINE_SPINLOCK(name##_cpu_lock);                                    \
++ cpumask_t name##_cpus __read_mostly;                                 \
+  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                                \
+  DEFINE_LGLOCK_LOCKDEP(name);                                         \
+                                                                       \
++ static int                                                           \
++ name##_lg_cpu_callback(struct notifier_block *nb,                    \
++                              unsigned long action, void *hcpu)       \
++ {                                                                    \
++      switch (action & ~CPU_TASKS_FROZEN) {                           \
++      case CPU_UP_PREPARE:                                            \
++              spin_lock(&name##_cpu_lock);                            \
++              cpu_set((unsigned long)hcpu, name##_cpus);              \
++              spin_unlock(&name##_cpu_lock);                          \
++              break;                                                  \
++      case CPU_UP_CANCELED: case CPU_DEAD:                            \
++              spin_lock(&name##_cpu_lock);                            \
++              cpu_clear((unsigned long)hcpu, name##_cpus);            \
++              spin_unlock(&name##_cpu_lock);                          \
++      }                                                               \
++      return NOTIFY_OK;                                               \
++ }                                                                    \
++ static struct notifier_block name##_lg_cpu_notifier = {              \
++      .notifier_call = name##_lg_cpu_callback,                        \
++ };                                                                   \
+  void name##_lock_init(void) {                                                \
+       int i;                                                          \
+       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -83,6 +106,11 @@
+               lock = &per_cpu(name##_lock, i);                        \
+               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
+       }                                                               \
++      register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
++      get_online_cpus();                                              \
++      for_each_online_cpu(i)                                          \
++              cpu_set(i, name##_cpus);                                \
++      put_online_cpus();                                              \
+  }                                                                    \
+  EXPORT_SYMBOL(name##_lock_init);                                     \
+                                                                       \
+@@ -124,9 +152,9 @@
+                                                                       \
+  void name##_global_lock_online(void) {                                       \
+       int i;                                                          \
+-      preempt_disable();                                              \
++      spin_lock(&name##_cpu_lock);                                    \
+       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
+-      for_each_online_cpu(i) {                                        \
++      for_each_cpu(i, &name##_cpus) {                                 \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_lock(lock);                                   \
+@@ -137,12 +165,12 @@
+  void name##_global_unlock_online(void) {                             \
+       int i;                                                          \
+       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
+-      for_each_online_cpu(i) {                                        \
++      for_each_cpu(i, &name##_cpus) {                                 \
+               arch_spinlock_t *lock;                                  \
+               lock = &per_cpu(name##_lock, i);                        \
+               arch_spin_unlock(lock);                                 \
+       }                                                               \
+-      preempt_enable();                                               \
++      spin_unlock(&name##_cpu_lock);                                  \
+  }                                                                    \
+  EXPORT_SYMBOL(name##_global_unlock_online);                          \
+                                                                       \
diff --git a/queue-3.0/watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch b/queue-3.0/watchdog-hpwdt-changes-to-handle-nx-secure-bit-in-32bit-path.patch
new file mode 100644 (file)
index 0000000..e7df182
--- /dev/null
@@ -0,0 +1,45 @@
+From e67d668e147c3b4fec638c9e0ace04319f5ceccd Mon Sep 17 00:00:00 2001
+From: "Mingarelli, Thomas" <Thomas.Mingarelli@hp.com>
+Date: Mon, 7 Nov 2011 10:59:00 +0100
+Subject: watchdog: hpwdt: Changes to handle NX secure bit in 32bit path
+
+From: "Mingarelli, Thomas" <Thomas.Mingarelli@hp.com>
+
+commit e67d668e147c3b4fec638c9e0ace04319f5ceccd upstream.
+
+This patch makes use of the set_memory_x() kernel API in order
+to make necessary BIOS calls to source NMIs.
+
+This is needed for SLES11 SP2 and the latest upstream kernel as it appears
+the NX Execute Disable has grown in its control.
+
+Signed-off by: Thomas Mingarelli <thomas.mingarelli@hp.com>
+Signed-off by: Wim Van Sebroeck <wim@iguana.be>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/watchdog/hpwdt.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -216,6 +216,7 @@ static int __devinit cru_detect(unsigned
+       cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
++      set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+       asminline_call(&cmn_regs, bios32_entrypoint);
+       if (cmn_regs.u1.ral != 0) {
+@@ -233,8 +234,10 @@ static int __devinit cru_detect(unsigned
+               if ((physical_bios_base + physical_bios_offset)) {
+                       cru_rom_addr =
+                               ioremap(cru_physical_address, cru_length);
+-                      if (cru_rom_addr)
++                      if (cru_rom_addr) {
++                              set_memory_x((unsigned long)cru_rom_addr, cru_length);
+                               retval = 0;
++                      }
+               }
+               printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",