--- /dev/null
+From 41e2e8fd34fff909a0e40129f6ac4233ecfa67a9 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Fri, 13 Aug 2010 23:33:46 +0100
+Subject: ARM: Tighten check for allowable CPSR values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 41e2e8fd34fff909a0e40129f6ac4233ecfa67a9 upstream.
+
+Reviewed-by: Arve Hjønnevåg <arve@android.com>
+Acked-by: Dima Zavin <dima@android.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/include/asm/ptrace.h | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -150,15 +150,24 @@ struct pt_regs {
+ */
+ static inline int valid_user_regs(struct pt_regs *regs)
+ {
+- if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+- return 1;
++ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
++
++ /*
++ * Always clear the F (FIQ) and A (delayed abort) bits
++ */
++ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
++
++ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
++ if (mode == USR_MODE)
++ return 1;
++ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
++ return 1;
+ }
+
+ /*
+ * Force CPSR to something logical...
+ */
+- regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
++ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+ if (!(elf_hwcap & HWCAP_26BIT))
+ regs->ARM_cpsr |= USR_MODE;
+
--- /dev/null
+From 6ccf15a1a76d2ff915cdef6ae4d12d0170087118 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <maximlevitsky@gmail.com>
+Date: Fri, 13 Aug 2010 11:27:28 -0400
+Subject: ath5k: disable ASPM L0s for all cards
+
+From: Maxim Levitsky <maximlevitsky@gmail.com>
+
+commit 6ccf15a1a76d2ff915cdef6ae4d12d0170087118 upstream.
+
+Atheros PCIe wireless cards handled by ath5k do require L0s disabled.
+For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+by default in the future in 2.6.36) this will also mean both L1 and L0s
+will be disabled when a pre 1.1 PCIe device is detected. We do know L1
+works correctly even for all ath5k pre 1.1 PCIe devices though but cannot
+currently undue the effect of a blacklist, for details you can read
+pcie_aspm_sanity_check() and see how it adjusts the device link
+capability.
+
+It may be possible in the future to implement some PCI API to allow
+drivers to override blacklists for pre 1.1 PCIe but for now it is
+best to accept that both L0s and L1 will be disabled completely for
+distributions shipping with CONFIG_PCIEASPM rather than having this
+issue present. Motivation for adding this new API will be to help
+with power consumption for some of these devices.
+
+Example of issues you'd see:
+
+ - On the Acer Aspire One (AOA150, Atheros Communications Inc. AR5001
+ Wireless Network Adapter [168c:001c] (rev 01)) doesn't work well
+ with ASPM enabled, the card will eventually stall on heavy traffic
+ with often 'unsupported jumbo' warnings appearing. Disabling
+ ASPM L0s in ath5k fixes these problems.
+
+ - On the same card you would see a storm of RXORN interrupts
+ even though medium is idle.
+
+Credit for root causing and fixing the bug goes to Jussi Kivilinna.
+
+Cc: David Quan <David.Quan@atheros.com>
+Cc: Matthew Garrett <mjg59@srcf.ucam.org>
+Cc: Tim Gardner <tim.gardner@canonical.com>
+Cc: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath5k/base.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -48,6 +48,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/cache.h>
+ #include <linux/pci.h>
++#include <linux/pci-aspm.h>
+ #include <linux/ethtool.h>
+ #include <linux/uaccess.h>
+
+@@ -448,6 +449,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
+ int ret;
+ u8 csz;
+
++ /*
++ * L0s needs to be disabled on all ath5k cards.
++ *
++ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
++ * by default in the future in 2.6.36) this will also mean both L1 and
++ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
++ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
++ * though but cannot currently undue the effect of a blacklist, for
++ * details you can read pcie_aspm_sanity_check() and see how it adjusts
++ * the device link capability.
++ *
++ * It may be possible in the future to implement some PCI API to allow
++ * drivers to override blacklists for pre 1.1 PCIe but for now it is
++ * best to accept that both L0s and L1 will be disabled completely for
++ * distributions shipping with CONFIG_PCIEASPM rather than having this
++ * issue present. Motivation for adding this new API will be to help
++ * with power consumption for some of these devices.
++ */
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
++
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable device\n");
--- /dev/null
+From 98f332855effef02aeb738e4d62e9a5b903c52fd Mon Sep 17 00:00:00 2001
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Date: Thu, 12 Aug 2010 04:13:55 +0100
+Subject: dm ioctl: release _hash_lock between devices in remove_all
+
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+
+commit 98f332855effef02aeb738e4d62e9a5b903c52fd upstream.
+
+This patch changes dm_hash_remove_all() to release _hash_lock when
+removing a device. After removing the device, dm_hash_remove_all()
+takes _hash_lock and searches the hash from scratch again.
+
+This patch is a preparation for the next patch, which changes device
+deletion code to wait for md reference to be 0. Without this patch,
+the wait in the next patch may cause AB-BA deadlock:
+ CPU0 CPU1
+ -----------------------------------------------------------------------
+ dm_hash_remove_all()
+ down_write(_hash_lock)
+ table_status()
+ md = find_device()
+ dm_get(md)
+ <increment md->holders>
+ dm_get_live_or_inactive_table()
+ dm_get_inactive_table()
+ down_write(_hash_lock)
+ <in the md deletion code>
+ <wait for md->holders to be 0>
+
+Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-ioctl.c | 44 +++++++++++++++++++++++++-------------------
+ 1 file changed, 25 insertions(+), 19 deletions(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -249,40 +249,46 @@ static void __hash_remove(struct hash_ce
+
+ static void dm_hash_remove_all(int keep_open_devices)
+ {
+- int i, dev_skipped, dev_removed;
++ int i, dev_skipped;
+ struct hash_cell *hc;
+- struct list_head *tmp, *n;
++ struct mapped_device *md;
++
++retry:
++ dev_skipped = 0;
+
+ down_write(&_hash_lock);
+
+-retry:
+- dev_skipped = dev_removed = 0;
+ for (i = 0; i < NUM_BUCKETS; i++) {
+- list_for_each_safe (tmp, n, _name_buckets + i) {
+- hc = list_entry(tmp, struct hash_cell, name_list);
++ list_for_each_entry(hc, _name_buckets + i, name_list) {
++ md = hc->md;
++ dm_get(md);
+
+- if (keep_open_devices &&
+- dm_lock_for_deletion(hc->md)) {
++ if (keep_open_devices && dm_lock_for_deletion(md)) {
++ dm_put(md);
+ dev_skipped++;
+ continue;
+ }
++
+ __hash_remove(hc);
+- dev_removed = 1;
+- }
+- }
+
+- /*
+- * Some mapped devices may be using other mapped devices, so if any
+- * still exist, repeat until we make no further progress.
+- */
+- if (dev_skipped) {
+- if (dev_removed)
+- goto retry;
++ up_write(&_hash_lock);
+
+- DMWARN("remove_all left %d open device(s)", dev_skipped);
++ dm_put(md);
++
++ /*
++ * Some mapped devices may be using other mapped
++ * devices, so repeat until we make no further
++ * progress. If a new mapped device is created
++ * here it will also get removed.
++ */
++ goto retry;
++ }
+ }
+
+ up_write(&_hash_lock);
++
++ if (dev_skipped)
++ DMWARN("remove_all left %d open device(s)", dev_skipped);
+ }
+
+ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
--- /dev/null
+From 6bbf79a14080a0c61212f53b4b87dc1a99fedf9c Mon Sep 17 00:00:00 2001
+From: Alasdair G Kergon <agk@redhat.com>
+Date: Thu, 12 Aug 2010 04:13:49 +0100
+Subject: dm mpath: fix NULL pointer dereference when path parameters missing
+
+From: Alasdair G Kergon <agk@redhat.com>
+
+commit 6bbf79a14080a0c61212f53b4b87dc1a99fedf9c upstream.
+
+multipath_ctr() forgets to return an error after detecting
+missing path parameters. Fix this.
+
+Signed-off-by: Patrick LoPresti <lopresti@gmail.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-mpath.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -691,6 +691,7 @@ static struct priority_group *parse_prio
+
+ if (as->argc < nr_params) {
+ ti->error = "not enough path parameters";
++ r = -EINVAL;
+ goto bad;
+ }
+
--- /dev/null
+From 5ddb954b9ee50824977d2931e0ff58b3050b337d Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sat, 7 Aug 2010 11:01:36 +0100
+Subject: drm/i915/edp: Flush the write before waiting for PLLs
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 5ddb954b9ee50824977d2931e0ff58b3050b337d upstream.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1402,6 +1402,7 @@ static void igdng_enable_pll_edp (struct
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
++ POSTING_READ(DP_A);
+ udelay(200);
+ }
+
--- /dev/null
+From 845b6cf34150100deb5f58c8a37a372b111f2918 Mon Sep 17 00:00:00 2001
+From: Jiaju Zhang <jjzhang.linux@gmail.com>
+Date: Wed, 28 Jul 2010 13:21:06 +0800
+Subject: Fix the nested PR lock calling issue in ACL
+
+From: Jiaju Zhang <jjzhang.linux@gmail.com>
+
+commit 845b6cf34150100deb5f58c8a37a372b111f2918 upstream.
+
+Hi,
+
+Thanks a lot for all the review and comments so far;) I'd like to send
+the improved (V4) version of this patch.
+
+This patch fixes a deadlock in OCFS2 ACL. We found this bug in OCFS2
+and Samba integration using scenario, the symptom is several smbd
+processes will be hung under heavy workload. Finally we found out it
+is the nested PR lock calling that leads to this deadlock:
+
+ node1 node2
+ gr PR
+ |
+ V
+ PR(EX)---> BAST:OCFS2_LOCK_BLOCKED
+ |
+ V
+ rq PR
+ |
+ V
+ wait=1
+
+After requesting the 2nd PR lock, the process "smbd" went into D
+state. It can only be woken up when the 1st PR lock's RO holder equals
+zero. There should be an ocfs2_inode_unlock in the calling path later
+on, which can decrement the RO holder. But since it has been in
+uninterruptible sleep, the unlock function has no chance to be called.
+
+The related stack trace is:
+smbd D ffff8800013d0600 0 9522 5608 0x00000000
+ ffff88002ca7fb18 0000000000000282 ffff88002f964500 ffff88002ca7fa98
+ ffff8800013d0600 ffff88002ca7fae0 ffff88002f964340 ffff88002f964340
+ ffff88002ca7ffd8 ffff88002ca7ffd8 ffff88002f964340 ffff88002f964340
+Call Trace:
+[<ffffffff80350425>] schedule_timeout+0x175/0x210
+[<ffffffff8034f580>] wait_for_common+0xf0/0x210
+[<ffffffffa03e12b9>] __ocfs2_cluster_lock+0x3b9/0xa90 [ocfs2]
+[<ffffffffa03e7665>] ocfs2_inode_lock_full_nested+0x255/0xdb0 [ocfs2]
+[<ffffffffa0446019>] ocfs2_get_acl+0x69/0x120 [ocfs2]
+[<ffffffffa0446368>] ocfs2_check_acl+0x28/0x80 [ocfs2]
+[<ffffffff800e3507>] acl_permission_check+0x57/0xb0
+[<ffffffff800e357d>] generic_permission+0x1d/0xc0
+[<ffffffffa03eecea>] ocfs2_permission+0x10a/0x1d0 [ocfs2]
+[<ffffffff800e3f65>] inode_permission+0x45/0x100
+[<ffffffff800d86b3>] sys_chdir+0x53/0x90
+[<ffffffff80007458>] system_call_fastpath+0x16/0x1b
+[<00007f34a4ef6927>] 0x7f34a4ef6927
+
+For details, please see:
+https://bugzilla.novell.com/show_bug.cgi?id=614332 and
+http://oss.oracle.com/bugzilla/show_bug.cgi?id=1278
+
+Signed-off-by: Jiaju Zhang <jjzhang@suse.de>
+Acked-by: Mark Fasheh <mfasheh@suse.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/acl.c | 24 +++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -293,12 +293,30 @@ static int ocfs2_set_acl(handle_t *handl
+
+ int ocfs2_check_acl(struct inode *inode, int mask)
+ {
+- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++ struct buffer_head *di_bh = NULL;
++ struct posix_acl *acl;
++ int ret = -EAGAIN;
+
+- if (IS_ERR(acl))
++ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++ return ret;
++
++ ret = ocfs2_read_inode_block(inode, &di_bh);
++ if (ret < 0) {
++ mlog_errno(ret);
++ return ret;
++ }
++
++ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
++
++ brelse(di_bh);
++
++ if (IS_ERR(acl)) {
++ mlog_errno(PTR_ERR(acl));
+ return PTR_ERR(acl);
++ }
+ if (acl) {
+- int ret = posix_acl_permission(inode, acl, mask);
++ ret = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return ret;
+ }
--- /dev/null
+From b9783dcebe952bf73449fe70a19ee4814adc81a0 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <khali@linux-fr.org>
+Date: Sat, 14 Aug 2010 21:08:48 +0200
+Subject: hwmon: (pc87360) Fix device resource declaration
+
+From: Jean Delvare <khali@linux-fr.org>
+
+commit b9783dcebe952bf73449fe70a19ee4814adc81a0 upstream.
+
+It's not OK to call platform_device_add_resources() multiple times
+in a row. Despite its name, this functions sets the resources, it
+doesn't add them. So we have to prepare an array with all the
+resources, and then call platform_device_add_resources() once.
+
+Before this fix, only the last I/O resource would be actually
+registered. The other I/O resources were leaked.
+
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Cc: Jim Cromie <jim.cromie@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/pc87360.c | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+--- a/drivers/hwmon/pc87360.c
++++ b/drivers/hwmon/pc87360.c
+@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_upda
+
+ static int __init pc87360_device_add(unsigned short address)
+ {
+- struct resource res = {
+- .name = "pc87360",
+- .flags = IORESOURCE_IO,
+- };
+- int err, i;
++ struct resource res[3];
++ int err, i, res_count;
+
+ pdev = platform_device_alloc("pc87360", address);
+ if (!pdev) {
+@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(uns
+ goto exit;
+ }
+
++ memset(res, 0, 3 * sizeof(struct resource));
++ res_count = 0;
+ for (i = 0; i < 3; i++) {
+ if (!extra_isa[i])
+ continue;
+- res.start = extra_isa[i];
+- res.end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].start = extra_isa[i];
++ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].name = "pc87360",
++ res[res_count].flags = IORESOURCE_IO,
+
+- err = acpi_check_resource_conflict(&res);
++ err = acpi_check_resource_conflict(&res[res_count]);
+ if (err)
+ goto exit_device_put;
+
+- err = platform_device_add_resources(pdev, &res, 1);
+- if (err) {
+- printk(KERN_ERR "pc87360: Device resource[%d] "
+- "addition failed (%d)\n", i, err);
+- goto exit_device_put;
+- }
++ res_count++;
++ }
++
++ err = platform_device_add_resources(pdev, res, res_count);
++ if (err) {
++ printk(KERN_ERR "pc87360: Device resources addition failed "
++ "(%d)\n", err);
++ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
--- /dev/null
+From 9b00c64318cc337846a7a08a5678f5f19aeff188 Mon Sep 17 00:00:00 2001
+From: Patrick J. LoPresti <lopresti@gmail.com>
+Date: Tue, 10 Aug 2010 17:28:01 -0400
+Subject: nfs: Add "lookupcache" to displayed mount options
+
+From: Patrick J. LoPresti <lopresti@gmail.com>
+
+commit 9b00c64318cc337846a7a08a5678f5f19aeff188 upstream.
+
+Running "cat /proc/mounts" fails to display the "lookupcache" option.
+This oversight cost me a bunch of wasted time recently.
+
+The following simple patch fixes it.
+
+Signed-off-by: Patrick LoPresti <lopresti@gmail.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/super.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -616,6 +616,13 @@ static void nfs_show_mount_options(struc
+
+ if (nfss->options & NFS_OPTION_FSCACHE)
+ seq_printf(m, ",fsc");
++
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
++ seq_printf(m, ",lookupcache=none");
++ else
++ seq_printf(m, ",lookupcache=pos");
++ }
+ }
+
+ /*
--- /dev/null
+From 8a2e70c40ff58f82dde67770e6623ca45f0cb0c8 Mon Sep 17 00:00:00 2001
+From: Tao Ma <tao.ma@oracle.com>
+Date: Thu, 22 Jul 2010 13:56:45 +0800
+Subject: ocfs2: Count more refcount records in file system fragmentation.
+
+From: Tao Ma <tao.ma@oracle.com>
+
+commit 8a2e70c40ff58f82dde67770e6623ca45f0cb0c8 upstream.
+
+The refcount record calculation in ocfs2_calc_refcount_meta_credits
+is too optimistic that we can always allocate contiguous clusters
+and handle an already existed refcount rec as a whole. Actually
+because of file system fragmentation, we may have the chance to split
+a refcount record into 3 parts during the transaction. So consider
+the worst case in record calculation.
+
+Signed-off-by: Tao Ma <tao.ma@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/refcounttree.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -2454,16 +2454,26 @@ static int ocfs2_calc_refcount_meta_cred
+ len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - cpos;
+ /*
+- * If the refcount rec already exist, cool. We just need
+- * to check whether there is a split. Otherwise we just need
+- * to increase the refcount.
+- * If we will insert one, increases recs_add.
+- *
+ * We record all the records which will be inserted to the
+ * same refcount block, so that we can tell exactly whether
+ * we need a new refcount block or not.
++ *
++ * If we will insert a new one, this is easy and only happens
++ * during adding refcounted flag to the extent, so we don't
++ * have a chance of spliting. We just need one record.
++ *
++ * If the refcount rec already exists, that would be a little
++ * complicated. we may have to:
++ * 1) split at the beginning if the start pos isn't aligned.
++ * we need 1 more record in this case.
++ * 2) split int the end if the end pos isn't aligned.
++ * we need 1 more record in this case.
++ * 3) split in the middle because of file system fragmentation.
++ * we need 2 more records in this case(we can't detect this
++ * beforehand, so always think of the worst case).
+ */
+ if (rec.r_refcount) {
++ recs_add += 2;
+ /* Check whether we need a split at the beginning. */
+ if (cpos == start_cpos &&
+ cpos != le64_to_cpu(rec.r_cpos))
--- /dev/null
+From a524812b7eaa7783d7811198921100f079034e61 Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 30 Jul 2010 16:14:44 +0800
+Subject: ocfs2/dlm: avoid incorrect bit set in refmap on recovery master
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit a524812b7eaa7783d7811198921100f079034e61 upstream.
+
+In the following situation, there remains an incorrect bit in refmap on the
+recovery master. Finally the recovery master will fail at purging the lockres
+due to the incorrect bit in refmap.
+
+1) node A has no interest on lockres A any longer, so it is purging it.
+2) the owner of lockres A is node B, so node A is sending de-ref message
+to node B.
+3) at this time, node B crashed. node C becomes the recovery master. it recovers
+lockres A(because the master is the dead node B).
+4) node A migrated lockres A to node C with a refbit there.
+5) node A failed to send de-ref message to node B because it crashed. The failure
+is ignored. no other action is done for lockres A any more.
+
+For mormal, re-send the deref message to it to recovery master can fix it. Well,
+ignoring the failure of deref to the original master and not recovering the lockres
+to recovery master has the same effect. And the later is simpler.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Acked-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmrecovery.c | 22 ++++++++++------------
+ fs/ocfs2/dlm/dlmthread.c | 34 +++++++++++++++++++++-------------
+ 2 files changed, 31 insertions(+), 25 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1941,6 +1941,8 @@ void dlm_move_lockres_to_recovery_list(s
+ struct list_head *queue;
+ struct dlm_lock *lock, *next;
+
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ if (!list_empty(&res->recovering)) {
+ mlog(0,
+@@ -2265,19 +2267,15 @@ static void dlm_do_local_recovery_cleanu
+ /* zero the lvb if necessary */
+ dlm_revalidate_lvb(dlm, res, dead_node);
+ if (res->owner == dead_node) {
+- if (res->state & DLM_LOCK_RES_DROPPING_REF)
+- mlog(0, "%s:%.*s: owned by "
+- "dead node %u, this node was "
+- "dropping its ref when it died. "
+- "continue, dropping the flag.\n",
+- dlm->name, res->lockname.len,
+- res->lockname.name, dead_node);
+-
+- /* the wake_up for this will happen when the
+- * RECOVERING flag is dropped later */
+- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
++ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
++ mlog(ML_NOTICE, "Ignore %.*s for "
++ "recovery as it is being freed\n",
++ res->lockname.len,
++ res->lockname.name);
++ } else
++ dlm_move_lockres_to_recovery_list(dlm,
++ res);
+
+- dlm_move_lockres_to_recovery_list(dlm, res);
+ } else if (res->owner == dlm->node_num) {
+ dlm_free_dead_locks(dlm, res, dead_node);
+ __dlm_lockres_calc_usage(dlm, res);
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -93,19 +93,27 @@ int __dlm_lockres_has_locks(struct dlm_l
+ * truly ready to be freed. */
+ int __dlm_lockres_unused(struct dlm_lock_resource *res)
+ {
+- if (!__dlm_lockres_has_locks(res) &&
+- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
+- /* try not to scan the bitmap unless the first two
+- * conditions are already true */
+- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+- if (bit >= O2NM_MAX_NODES) {
+- /* since the bit for dlm->node_num is not
+- * set, inflight_locks better be zero */
+- BUG_ON(res->inflight_locks != 0);
+- return 1;
+- }
+- }
+- return 0;
++ int bit;
++
++ if (__dlm_lockres_has_locks(res))
++ return 0;
++
++ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
++ return 0;
++
++ if (res->state & DLM_LOCK_RES_RECOVERING)
++ return 0;
++
++ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
++ if (bit < O2NM_MAX_NODES)
++ return 0;
++
++ /*
++ * since the bit for dlm->node_num is not set, inflight_locks better
++ * be zero
++ */
++ BUG_ON(res->inflight_locks != 0);
++ return 1;
+ }
+
+
--- /dev/null
+From 6d98c3ccb52f692f1a60339dde7c700686a5568b Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 16 Jul 2010 23:13:33 +0800
+Subject: ocfs2/dlm: fix a dead lock
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit 6d98c3ccb52f692f1a60339dde7c700686a5568b upstream.
+
+When we have to take both dlm->master_lock and lockres->spinlock,
+take them in order
+
+lockres->spinlock and then dlm->master_lock.
+
+The patch fixes a violation of the rule.
+We can simply move taking dlm->master_lock to where we have dropped res->spinlock
+since when we access res->state and free mle memory we don't need master_lock's
+protection.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -3046,8 +3046,6 @@ int dlm_migrate_request_handler(struct o
+ /* check for pre-existing lock */
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+- spin_lock(&dlm->master_lock);
+-
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+@@ -3065,14 +3063,15 @@ int dlm_migrate_request_handler(struct o
+ spin_unlock(&res->spinlock);
+ }
+
++ spin_lock(&dlm->master_lock);
+ /* ignore status. only nonzero status would BUG. */
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+ name, namelen,
+ migrate->new_master,
+ migrate->master);
+
+-unlock:
+ spin_unlock(&dlm->master_lock);
++unlock:
+ spin_unlock(&dlm->spinlock);
+
+ if (oldmle) {
--- /dev/null
+From b11f1f1ab73fd358b1b734a9427744802202ba68 Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 30 Jul 2010 23:18:00 +0800
+Subject: ocfs2/dlm: remove potential deadlock -V3
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit b11f1f1ab73fd358b1b734a9427744802202ba68 upstream.
+
+When we need to take both dlm_domain_lock and dlm->spinlock, we should take
+them in order of: dlm_domain_lock then dlm->spinlock.
+
+There is pathes disobey this order. That is calling dlm_lockres_put() with
+dlm->spinlock held in dlm_run_purge_list. dlm_lockres_put() calls dlm_put() at
+the ref and dlm_put() locks on dlm_domain_lock.
+
+Fix:
+Don't grab/put the dlm when the initialising/releasing lockres.
+That grab is not required because we don't call dlm_unregister_domain()
+based on refcount.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct k
+
+ atomic_dec(&dlm->res_cur_count);
+
+- dlm_put(dlm);
+-
+ if (!hlist_unhashed(&res->hash_node) ||
+ !list_empty(&res->granted) ||
+ !list_empty(&res->converting) ||
+@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_
+ res->migration_pending = 0;
+ res->inflight_locks = 0;
+
+- /* put in dlm_lockres_release */
+- dlm_grab(dlm);
+ res->dlm = dlm;
+
+ kref_init(&res->refs);
--- /dev/null
+From 6eda3dd33f8a0ce58ee56a11351758643a698db4 Mon Sep 17 00:00:00 2001
+From: Tiger Yang <tiger.yang@oracle.com>
+Date: Fri, 16 Jul 2010 11:21:23 +0800
+Subject: ocfs2: do not overwrite error codes in ocfs2_init_acl
+
+From: Tiger Yang <tiger.yang@oracle.com>
+
+commit 6eda3dd33f8a0ce58ee56a11351758643a698db4 upstream.
+
+Setting the acl while creating a new inode depends on
+the error codes of posix_acl_create_masq. This patch fix
+a issue of overwriting the error codes of it.
+
+Reported-by: Pawel Zawora <pzawora@gmail.com>
+Signed-off-by: Tiger Yang <tiger.yang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/acl.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -347,7 +347,7 @@ int ocfs2_init_acl(handle_t *handle,
+ {
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl = NULL;
+- int ret = 0;
++ int ret = 0, ret2;
+ mode_t mode;
+
+ if (!S_ISLNK(inode->i_mode)) {
+@@ -384,7 +384,12 @@ int ocfs2_init_acl(handle_t *handle,
+ mode = inode->i_mode;
+ ret = posix_acl_create_masq(clone, &mode);
+ if (ret >= 0) {
+- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ if (ret2) {
++ mlog_errno(ret2);
++ ret = ret2;
++ goto cleanup;
++ }
+ if (ret > 0) {
+ ret = ocfs2_set_acl(handle, inode,
+ di_bh, ACL_TYPE_ACCESS,
--- /dev/null
+From 7beaf243787f85a2ef9213ccf13ab4a243283fde Mon Sep 17 00:00:00 2001
+From: Srinivas Eeda <srinivas.eeda@oracle.com>
+Date: Mon, 19 Jul 2010 16:04:12 -0700
+Subject: ocfs2 fix o2dlm dlm run purgelist (rev 3)
+
+From: Srinivas Eeda <srinivas.eeda@oracle.com>
+
+commit 7beaf243787f85a2ef9213ccf13ab4a243283fde upstream.
+
+This patch fixes two problems in dlm_run_purgelist
+
+1. If a lockres is found to be in use, dlm_run_purgelist keeps trying to purge
+the same lockres instead of trying the next lockres.
+
+2. When a lockres is found unused, dlm_run_purgelist releases lockres spinlock
+before setting DLM_LOCK_RES_DROPPING_REF and calls dlm_purge_lockres.
+spinlock is reacquired but in this window lockres can get reused. This leads
+to BUG.
+
+This patch modifies dlm_run_purgelist to skip lockres if it's in use and purge
+ next lockres. It also sets DLM_LOCK_RES_DROPPING_REF before releasing the
+lockres spinlock protecting it from getting reused.
+
+Signed-off-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Acked-by: Sunil Mushran <sunil.mushran@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmthread.c | 80 +++++++++++++++++++----------------------------
+ 1 file changed, 34 insertions(+), 46 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -153,45 +153,25 @@ void dlm_lockres_calc_usage(struct dlm_c
+ spin_unlock(&dlm->spinlock);
+ }
+
+-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
++static void dlm_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+ {
+ int master;
+ int ret = 0;
+
+- spin_lock(&res->spinlock);
+- if (!__dlm_lockres_unused(res)) {
+- mlog(0, "%s:%.*s: tried to purge but not unused\n",
+- dlm->name, res->lockname.len, res->lockname.name);
+- __dlm_print_one_lock_resource(res);
+- spin_unlock(&res->spinlock);
+- BUG();
+- }
+-
+- if (res->state & DLM_LOCK_RES_MIGRATING) {
+- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
+- "being remastered\n", dlm->name, res->lockname.len,
+- res->lockname.name);
+- /* Re-add the lockres to the end of the purge list */
+- if (!list_empty(&res->purge)) {
+- list_del_init(&res->purge);
+- list_add_tail(&res->purge, &dlm->purge_list);
+- }
+- spin_unlock(&res->spinlock);
+- return 0;
+- }
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+
+ master = (res->owner == dlm->node_num);
+
+- if (!master)
+- res->state |= DLM_LOCK_RES_DROPPING_REF;
+- spin_unlock(&res->spinlock);
+
+ mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
+ res->lockname.name, master);
+
+ if (!master) {
++ res->state |= DLM_LOCK_RES_DROPPING_REF;
+ /* drop spinlock... retake below */
++ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+@@ -209,31 +189,35 @@ static int dlm_purge_lockres(struct dlm_
+ mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
+ dlm->name, res->lockname.len, res->lockname.name, ret);
+ spin_lock(&dlm->spinlock);
++ spin_lock(&res->spinlock);
+ }
+
+- spin_lock(&res->spinlock);
+ if (!list_empty(&res->purge)) {
+ mlog(0, "removing lockres %.*s:%p from purgelist, "
+ "master = %d\n", res->lockname.len, res->lockname.name,
+ res, master);
+ list_del_init(&res->purge);
+- spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+- } else
+- spin_unlock(&res->spinlock);
++ }
++
++ if (!__dlm_lockres_unused(res)) {
++ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
++ dlm->name, res->lockname.len, res->lockname.name);
++ __dlm_print_one_lock_resource(res);
++ BUG();
++ }
+
+ __dlm_unhash_lockres(res);
+
+ /* lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource. */
+ if (!master) {
+- spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+- }
+- return 0;
++ } else
++ spin_unlock(&res->spinlock);
+ }
+
+ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+@@ -252,17 +236,7 @@ static void dlm_run_purge_list(struct dl
+ lockres = list_entry(dlm->purge_list.next,
+ struct dlm_lock_resource, purge);
+
+- /* Status of the lockres *might* change so double
+- * check. If the lockres is unused, holding the dlm
+- * spinlock will prevent people from getting and more
+- * refs on it -- there's no need to keep the lockres
+- * spinlock. */
+ spin_lock(&lockres->spinlock);
+- unused = __dlm_lockres_unused(lockres);
+- spin_unlock(&lockres->spinlock);
+-
+- if (!unused)
+- continue;
+
+ purge_jiffies = lockres->last_used +
+ msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
+@@ -274,15 +248,29 @@ static void dlm_run_purge_list(struct dl
+ * in tail order, we can stop at the first
+ * unpurgable resource -- anyone added after
+ * him will have a greater last_used value */
++ spin_unlock(&lockres->spinlock);
+ break;
+ }
+
++ /* Status of the lockres *might* change so double
++ * check. If the lockres is unused, holding the dlm
++ * spinlock will prevent people from getting and more
++ * refs on it. */
++ unused = __dlm_lockres_unused(lockres);
++ if (!unused ||
++ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
++ mlog(0, "lockres %s:%.*s: is in use or "
++ "being remastered, used %d, state %d\n",
++ dlm->name, lockres->lockname.len,
++ lockres->lockname.name, !unused, lockres->state);
++ list_move_tail(&dlm->purge_list, &lockres->purge);
++ spin_unlock(&lockres->spinlock);
++ continue;
++ }
++
+ dlm_lockres_get(lockres);
+
+- /* This may drop and reacquire the dlm spinlock if it
+- * has to do migration. */
+- if (dlm_purge_lockres(dlm, lockres))
+- BUG();
++ dlm_purge_lockres(dlm, lockres);
+
+ dlm_lockres_put(lockres);
+
--- /dev/null
+From 93b352fce679945845664b56b0c3afbd655a7a12 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@gmail.com>
+Date: Mon, 16 Aug 2010 16:09:09 +0800
+Subject: pxa3xx: fix ns2cycle equation
+
+From: Axel Lin <axel.lin@gmail.com>
+
+commit 93b352fce679945845664b56b0c3afbd655a7a12 upstream.
+
+Test on a PXA310 platform with Samsung K9F2G08X0B NAND flash,
+with tCH=5 and clk is 156MHz, ns2cycle(5, 156000000) returns -1.
+
+ns2cycle returns negtive value will break NDTR0_tXX macros.
+
+After checking the commit log, I found the problem is introduced by
+commit 5b0d4d7c8a67c5ba3d35e6ceb0c5530cc6846db7
+"[MTD] [NAND] pxa3xx: convert from ns to clock ticks more accurately"
+
+To get num of clock cycles, we use below equation:
+num of clock cycles = time (ns) / one clock cycle (ns) + 1
+We need to add 1 cycle here because integer division will truncate the result.
+It is possible the developers set the Min values in SPEC for timing settings.
+Thus the truncate may cause problem, and it is safe to add an extra cycle here.
+
+The various fields in NDTR{01} are in units of clock ticks minus one,
+thus we should subtract 1 cycle then.
+
+Thus the correct equation should be:
+num of clock cycles = time (ns) / one clock cycle (ns) + 1 - 1
+ = time (ns) / one clock cycle (ns)
+
+Signed-off-by: Axel Lin <axel.lin@gmail.com>
+Signed-off-by: Lei Wen <leiwen@marvell.com>
+Acked-by: Eric Miao <eric.y.miao@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/nand/pxa3xx_nand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -316,7 +316,7 @@ static struct pxa3xx_nand_flash *builtin
+ #define tAR_NDTR1(r) (((r) >> 0) & 0xf)
+
+ /* convert nano-seconds to nand flash controller clock cycles */
+-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
++#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+ /* convert nand flash controller clock cycles to nano-seconds */
+ #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
alsa-riptide-fix-detection-load-of-firmware-files.patch
alsa-emu10k1-delay-the-pcm-interrupts-add-pcm_irq_delay-parameter.patch
alsa-hda-fix-missing-stream-for-second-adc-on-realtek-alc260-hda-codec.patch
+ocfs2-do-not-overwrite-error-codes-in-ocfs2_init_acl.patch
+ocfs2-dlm-fix-a-dead-lock.patch
+ocfs2-fix-o2dlm-dlm-run-purgelist-rev-3.patch
+ocfs2-count-more-refcount-records-in-file-system-fragmentation.patch
+ocfs2-dlm-avoid-incorrect-bit-set-in-refmap-on-recovery-master.patch
+ocfs2-dlm-remove-potential-deadlock-v3.patch
+x86-hotplug-serialize-cpu-hotplug-to-avoid-bringup-concurrency-issues.patch
+x86-apic-fix-apic-debug-boot-crash.patch
+fix-the-nested-pr-lock-calling-issue-in-acl.patch
+hwmon-pc87360-fix-device-resource-declaration.patch
+arm-tighten-check-for-allowable-cpsr-values.patch
+nfs-add-lookupcache-to-displayed-mount-options.patch
+ath5k-disable-aspm-l0s-for-all-cards.patch
+pxa3xx-fix-ns2cycle-equation.patch
+drm-i915-edp-flush-the-write-before-waiting-for-plls.patch
+dm-mpath-fix-null-pointer-dereference-when-path-parameters-missing.patch
+dm-ioctl-release-_hash_lock-between-devices-in-remove_all.patch
--- /dev/null
+From 05e407603e527f9d808dd3866d3a17c2ce4dfcc5 Mon Sep 17 00:00:00 2001
+From: Daniel Kiper <dkiper@net-space.pl>
+Date: Fri, 20 Aug 2010 00:46:16 +0200
+Subject: x86, apic: Fix apic=debug boot crash
+
+From: Daniel Kiper <dkiper@net-space.pl>
+
+commit 05e407603e527f9d808dd3866d3a17c2ce4dfcc5 upstream.
+
+Fix a boot crash when apic=debug is used and the APIC is
+not properly initialized.
+
+This issue appears during Xen Dom0 kernel boot but the
+fix is generic and the crash could occur on real hardware
+as well.
+
+Signed-off-by: Daniel Kiper <dkiper@net-space.pl>
+Cc: xen-devel@lists.xensource.com
+Cc: konrad.wilk@oracle.com
+Cc: jeremy@goop.org
+LKML-Reference: <20100819224616.GB9967@router-fw-old.local.net-space.pl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/io_apic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1736,6 +1736,8 @@ __apicdebuginit(void) print_IO_APIC(void
+ struct irq_pin_list *entry;
+
+ cfg = desc->chip_data;
++ if (!cfg)
++ continue;
+ entry = cfg->irq_2_pin;
+ if (!entry)
+ continue;
--- /dev/null
+From d7c53c9e822a4fefa13a0cae76f3190bfd0d5c11 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@amd64.org>
+Date: Thu, 19 Aug 2010 20:10:29 +0200
+Subject: x86, hotplug: Serialize CPU hotplug to avoid bringup concurrency issues
+
+From: Borislav Petkov <bp@amd64.org>
+
+commit d7c53c9e822a4fefa13a0cae76f3190bfd0d5c11 upstream.
+
+When testing cpu hotplug code on 32-bit we kept hitting the "CPU%d:
+Stuck ??" message due to multiple cores concurrently accessing the
+cpu_callin_mask, among others.
+
+Since these codepaths are not protected from concurrent access due to
+the fact that there's no sane reason for making an already complex
+code unnecessarily more complex - we hit the issue only when insanely
+switching cores off- and online - serialize hotplugging cores on the
+sysfs level and be done with it.
+
+[ v2.1: fix !HOTPLUG_CPU build ]
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100819181029.GC17171@aftab>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/Kconfig | 5 +++++
+ arch/x86/kernel/smpboot.c | 19 +++++++++++++++++++
+ 2 files changed, 24 insertions(+)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -227,6 +227,11 @@ config X86_32_LAZY_GS
+
+ config KTIME_SCALAR
+ def_bool X86_32
++
++config ARCH_CPU_PROBE_RELEASE
++ def_bool y
++ depends on HOTPLUG_CPU
++
+ source "init/Kconfig"
+ source "kernel/Kconfig.freezer"
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -88,6 +88,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
+ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+ #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
+ #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
++
++/*
++ * We need this for trampoline_base protection from concurrent accesses when
++ * off- and onlining cores wildly.
++ */
++static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
++
++void cpu_hotplug_driver_lock()
++{
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
++}
++
++void cpu_hotplug_driver_unlock()
++{
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++}
++
++ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
++ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
+ #else
+ static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+ #define get_idle_for_cpu(x) (idle_thread_array[(x)])