--- /dev/null
+From ef077179a2909d3d0d3accf29ad1ea9ebb19019b Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marek.vasut@gmail.com>
+Date: Thu, 12 Aug 2010 02:14:54 +0100
+Subject: ARM: Fix gen_nand probe structures contents
+
+From: Marek Vasut <marek.vasut@gmail.com>
+
+commit ef077179a2909d3d0d3accf29ad1ea9ebb19019b upstream.
+
+These three platforms didn't properly fill nr_chips in gen_nand
+registration and therefore depended on gen_nand bug fixed by commit
+81cbb0b17796d81cbd92defe113cf2a7c7a21fbb ("mtd: gen_nand: fix support for
+multiple chips")
+
+Signed-off-by: Marek Vasut <marek.vasut@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mach-ixp4xx/ixdp425-setup.c | 1 +
+ arch/arm/mach-mx3/mach-qong.c | 1 +
+ arch/arm/mach-orion5x/ts78xx-setup.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
++++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
+@@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_i
+
+ static struct platform_nand_data ixdp425_flash_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ .options = NAND_NO_AUTOINCR,
+ #ifdef CONFIG_MTD_PARTITIONS
+--- a/arch/arm/mach-mx3/mach-qong.c
++++ b/arch/arm/mach-mx3/mach-qong.c
+@@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct
+
+ static struct platform_nand_data qong_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 20,
+ .options = 0,
+ },
+--- a/arch/arm/mach-orion5x/ts78xx-setup.c
++++ b/arch/arm/mach-orion5x/ts78xx-setup.c
+@@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_na
+
+ static struct platform_nand_data ts78xx_ts_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .part_probe_types = ts_nand_part_probes,
+ .partitions = ts78xx_ts_nand_parts,
+ .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
--- /dev/null
+From 41e2e8fd34fff909a0e40129f6ac4233ecfa67a9 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Fri, 13 Aug 2010 23:33:46 +0100
+Subject: ARM: Tighten check for allowable CPSR values
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 41e2e8fd34fff909a0e40129f6ac4233ecfa67a9 upstream.
+
+Reviewed-by: Arve Hjønnevåg <arve@android.com>
+Acked-by: Dima Zavin <dima@android.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/include/asm/ptrace.h | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -158,15 +158,24 @@ struct pt_regs {
+ */
+ static inline int valid_user_regs(struct pt_regs *regs)
+ {
+- if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
+- regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
+- return 1;
++ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
++
++ /*
++ * Always clear the F (FIQ) and A (delayed abort) bits
++ */
++ regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
++
++ if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
++ if (mode == USR_MODE)
++ return 1;
++ if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
++ return 1;
+ }
+
+ /*
+ * Force CPSR to something logical...
+ */
+- regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
++ regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
+ if (!(elf_hwcap & HWCAP_26BIT))
+ regs->ARM_cpsr |= USR_MODE;
+
--- /dev/null
+From 6ccf15a1a76d2ff915cdef6ae4d12d0170087118 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <maximlevitsky@gmail.com>
+Date: Fri, 13 Aug 2010 11:27:28 -0400
+Subject: ath5k: disable ASPM L0s for all cards
+
+From: Maxim Levitsky <maximlevitsky@gmail.com>
+
+commit 6ccf15a1a76d2ff915cdef6ae4d12d0170087118 upstream.
+
+Atheros PCIe wireless cards handled by ath5k do require L0s disabled.
+For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+by default in the future in 2.6.36) this will also mean both L1 and L0s
+will be disabled when a pre 1.1 PCIe device is detected. We do know L1
+works correctly even for all ath5k pre 1.1 PCIe devices though but cannot
+currently undue the effect of a blacklist, for details you can read
+pcie_aspm_sanity_check() and see how it adjusts the device link
+capability.
+
+It may be possible in the future to implement some PCI API to allow
+drivers to override blacklists for pre 1.1 PCIe but for now it is
+best to accept that both L0s and L1 will be disabled completely for
+distributions shipping with CONFIG_PCIEASPM rather than having this
+issue present. Motivation for adding this new API will be to help
+with power consumption for some of these devices.
+
+Example of issues you'd see:
+
+ - On the Acer Aspire One (AOA150, Atheros Communications Inc. AR5001
+ Wireless Network Adapter [168c:001c] (rev 01)) doesn't work well
+ with ASPM enabled, the card will eventually stall on heavy traffic
+ with often 'unsupported jumbo' warnings appearing. Disabling
+ ASPM L0s in ath5k fixes these problems.
+
+ - On the same card you would see a storm of RXORN interrupts
+ even though medium is idle.
+
+Credit for root causing and fixing the bug goes to Jussi Kivilinna.
+
+Cc: David Quan <David.Quan@atheros.com>
+Cc: Matthew Garrett <mjg59@srcf.ucam.org>
+Cc: Tim Gardner <tim.gardner@canonical.com>
+Cc: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
+Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath5k/base.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/drivers/net/wireless/ath/ath5k/base.c
++++ b/drivers/net/wireless/ath/ath5k/base.c
+@@ -48,6 +48,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/cache.h>
+ #include <linux/pci.h>
++#include <linux/pci-aspm.h>
+ #include <linux/ethtool.h>
+ #include <linux/uaccess.h>
+ #include <linux/slab.h>
+@@ -472,6 +473,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
+ int ret;
+ u8 csz;
+
++ /*
++ * L0s needs to be disabled on all ath5k cards.
++ *
++ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
++ * by default in the future in 2.6.36) this will also mean both L1 and
++ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
++ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
++ * though but cannot currently undue the effect of a blacklist, for
++ * details you can read pcie_aspm_sanity_check() and see how it adjusts
++ * the device link capability.
++ *
++ * It may be possible in the future to implement some PCI API to allow
++ * drivers to override blacklists for pre 1.1 PCIe but for now it is
++ * best to accept that both L0s and L1 will be disabled completely for
++ * distributions shipping with CONFIG_PCIEASPM rather than having this
++ * issue present. Motivation for adding this new API will be to help
++ * with power consumption for some of these devices.
++ */
++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
++
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable device\n");
--- /dev/null
+From da93f10684bfba2983a70c10b5d417232b6a5245 Mon Sep 17 00:00:00 2001
+From: Rajkumar Manoharan <rmanoharan@atheros.com>
+Date: Wed, 11 Aug 2010 20:27:43 +0530
+Subject: ath9k_htc: fix panic on packet injection using airbase-ng tool.
+
+From: Rajkumar Manoharan <rmanoharan@atheros.com>
+
+commit da93f10684bfba2983a70c10b5d417232b6a5245 upstream.
+
+This should fix the oops which occurs during the packet injection
+on monitor interface.
+
+EIP is at ath9k_htc_tx_start+0x69/0x220 [ath9k_htc]
+ [<f84dc8ea>] ? invoke_tx_handlers+0xa5a/0xee0 [mac80211]
+ [<f82c84f4>] ? ath9k_htc_tx+0x44/0xe0 [ath9k_htc]
+ [<f84db7b8>] ? __ieee80211_tx+0xf8/0x190 [mac80211]
+ [<f84dce0d>] ? ieee80211_tx+0x9d/0x1a0 [mac80211]
+ [<f84dcfac>] ? ieee80211_xmit+0x9c/0x1c0 [mac80211]
+ [<f84dd1b5>] ? ieee80211_monitor_start_xmit+0x85/0xb0 [mac80211]
+ [<c04c30cd>] ? dev_hard_start_xmit+0x1ad/0x210
+ [<c04b97c2>] ? __alloc_skb+0x52/0x130
+ [<c04d7cd5>] ? sch_direct_xmit+0x105/0x170
+ [<c04c5e9f>] ? dev_queue_xmit+0x37f/0x4b0
+ [<c0567e1e>] ? packet_snd+0x21e/0x250
+ [<c05684a2>] ? packet_sendmsg+0x32/0x40
+ [<c04b4c63>] ? sock_aio_write+0x113/0x130
+ [<c0207934>] ? do_sync_write+0xc4/0x100
+ [<c0167740>] ? autoremove_wake_function+0x0/0x50
+ [<c02f4414>] ? security_file_permission+0x14/0x20
+ [<c0207ad4>] ? rw_verify_area+0x64/0xe0
+ [<c01e6458>] ? handle_mm_fault+0x338/0x390
+ [<c0207cd5>] ? vfs_write+0x185/0x1a0
+ [<c058db20>] ? do_page_fault+0x160/0x3a0
+ [<c0208512>] ? sys_write+0x42/0x70
+ [<c01033ec>] ? syscall_call+0x7/0xb
+
+Signed-off-by: Rajkumar Manoharan <rmanoharan@atheros.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -68,18 +68,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+- struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+- u8 sta_idx;
++ u8 sta_idx, vif_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+- avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
++ if (tx_info->control.vif &&
++ (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
++ vif_idx = ((struct ath9k_htc_vif *)
++ tx_info->control.vif->drv_priv)->index;
++ else
++ vif_idx = priv->nvifs;
++
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+@@ -96,7 +101,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+- tx_hdr.vif_idx = avp->index;
++ tx_hdr.vif_idx = vif_idx;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+@@ -156,7 +161,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+- mgmt_hdr.vif_idx = avp->index;
++ mgmt_hdr.vif_idx = vif_idx;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
--- /dev/null
+From ef56609f9c7fdf5baa9d9f86f84a7bd8a717cd25 Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marek.vasut@gmail.com>
+Date: Thu, 12 Aug 2010 03:53:54 +0100
+Subject: BFIN: Fix gen_nand probe structures contents
+
+From: Marek Vasut <marek.vasut@gmail.com>
+
+commit ef56609f9c7fdf5baa9d9f86f84a7bd8a717cd25 upstream.
+
+These two platforms didn't properly fill nr_chips in gen_nand
+registration and therefore depended on gen_nand bug fixed by by commit
+81cbb0b17796d81cbd92defe113cf2a7c7a21fbb ("mtd: gen_nand: fix support for
+multiple chips")
+
+Signed-off-by: Marek Vasut <marek.vasut@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/blackfin/mach-bf537/boards/stamp.c | 1 +
+ arch/blackfin/mach-bf561/boards/acvilon.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/blackfin/mach-bf537/boards/stamp.c
++++ b/arch/blackfin/mach-bf537/boards/stamp.c
+@@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(stru
+
+ static struct platform_nand_data bfin_plat_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ #ifdef CONFIG_MTD_PARTITIONS
+ .part_probe_types = part_probes,
+--- a/arch/blackfin/mach-bf561/boards/acvilon.c
++++ b/arch/blackfin/mach-bf561/boards/acvilon.c
+@@ -284,6 +284,7 @@ static int bfin_plat_nand_dev_ready(stru
+
+ static struct platform_nand_data bfin_plat_nand_data = {
+ .chip = {
++ .nr_chips = 1,
+ .chip_delay = 30,
+ #ifdef CONFIG_MTD_PARTITIONS
+ .part_probe_types = part_probes,
--- /dev/null
+From fe100acddf438591ecf3582cb57241e560da70b7 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 9 Aug 2010 15:52:03 +0200
+Subject: cfg80211: fix locking in action frame TX
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit fe100acddf438591ecf3582cb57241e560da70b7 upstream.
+
+Accesses to "wdev->current_bss" must be
+locked with the wdev lock, which action
+frame transmission is missing.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/wireless/mlme.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/wireless/mlme.c
++++ b/net/wireless/mlme.c
+@@ -842,12 +842,18 @@ int cfg80211_mlme_action(struct cfg80211
+ return -EINVAL;
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
+ /* Verify that we are associated with the destination AP */
++ wdev_lock(wdev);
++
+ if (!wdev->current_bss ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
+ ETH_ALEN) != 0 ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+- ETH_ALEN) != 0)
++ ETH_ALEN) != 0) {
++ wdev_unlock(wdev);
+ return -ENOTCONN;
++ }
++ wdev_unlock(wdev);
++
+ }
+
+ if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
--- /dev/null
+From 98f332855effef02aeb738e4d62e9a5b903c52fd Mon Sep 17 00:00:00 2001
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Date: Thu, 12 Aug 2010 04:13:55 +0100
+Subject: dm ioctl: release _hash_lock between devices in remove_all
+
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+
+commit 98f332855effef02aeb738e4d62e9a5b903c52fd upstream.
+
+This patch changes dm_hash_remove_all() to release _hash_lock when
+removing a device. After removing the device, dm_hash_remove_all()
+takes _hash_lock and searches the hash from scratch again.
+
+This patch is a preparation for the next patch, which changes device
+deletion code to wait for md reference to be 0. Without this patch,
+the wait in the next patch may cause AB-BA deadlock:
+ CPU0 CPU1
+ -----------------------------------------------------------------------
+ dm_hash_remove_all()
+ down_write(_hash_lock)
+ table_status()
+ md = find_device()
+ dm_get(md)
+ <increment md->holders>
+ dm_get_live_or_inactive_table()
+ dm_get_inactive_table()
+ down_write(_hash_lock)
+ <in the md deletion code>
+ <wait for md->holders to be 0>
+
+Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-ioctl.c | 44 +++++++++++++++++++++++++-------------------
+ 1 file changed, 25 insertions(+), 19 deletions(-)
+
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -249,40 +249,46 @@ static void __hash_remove(struct hash_ce
+
+ static void dm_hash_remove_all(int keep_open_devices)
+ {
+- int i, dev_skipped, dev_removed;
++ int i, dev_skipped;
+ struct hash_cell *hc;
+- struct list_head *tmp, *n;
++ struct mapped_device *md;
++
++retry:
++ dev_skipped = 0;
+
+ down_write(&_hash_lock);
+
+-retry:
+- dev_skipped = dev_removed = 0;
+ for (i = 0; i < NUM_BUCKETS; i++) {
+- list_for_each_safe (tmp, n, _name_buckets + i) {
+- hc = list_entry(tmp, struct hash_cell, name_list);
++ list_for_each_entry(hc, _name_buckets + i, name_list) {
++ md = hc->md;
++ dm_get(md);
+
+- if (keep_open_devices &&
+- dm_lock_for_deletion(hc->md)) {
++ if (keep_open_devices && dm_lock_for_deletion(md)) {
++ dm_put(md);
+ dev_skipped++;
+ continue;
+ }
++
+ __hash_remove(hc);
+- dev_removed = 1;
+- }
+- }
+
+- /*
+- * Some mapped devices may be using other mapped devices, so if any
+- * still exist, repeat until we make no further progress.
+- */
+- if (dev_skipped) {
+- if (dev_removed)
+- goto retry;
++ up_write(&_hash_lock);
+
+- DMWARN("remove_all left %d open device(s)", dev_skipped);
++ dm_put(md);
++
++ /*
++ * Some mapped devices may be using other mapped
++ * devices, so repeat until we make no further
++ * progress. If a new mapped device is created
++ * here it will also get removed.
++ */
++ goto retry;
++ }
+ }
+
+ up_write(&_hash_lock);
++
++ if (dev_skipped)
++ DMWARN("remove_all left %d open device(s)", dev_skipped);
+ }
+
+ static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
--- /dev/null
+From abdc568b0540bec6d3e0afebac496adef1189b77 Mon Sep 17 00:00:00 2001
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Date: Thu, 12 Aug 2010 04:13:54 +0100
+Subject: dm: prevent access to md being deleted
+
+From: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+
+commit abdc568b0540bec6d3e0afebac496adef1189b77 upstream.
+
+This patch prevents access to mapped_device which is being deleted.
+
+Currently, even after a mapped_device has been removed from the hash,
+it could be accessed through idr_find() using minor number.
+That could cause a race and NULL pointer reference below:
+ CPU0 CPU1
+ ------------------------------------------------------------------
+ dev_remove(param)
+ down_write(_hash_lock)
+ dm_lock_for_deletion(md)
+ spin_lock(_minor_lock)
+ set_bit(DMF_DELETING)
+ spin_unlock(_minor_lock)
+ __hash_remove(hc)
+ up_write(_hash_lock)
+ dev_status(param)
+ md = find_device(param)
+ down_read(_hash_lock)
+ __find_device_hash_cell(param)
+ dm_get_md(param->dev)
+ md = dm_find_md(dev)
+ spin_lock(_minor_lock)
+ md = idr_find(MINOR(dev))
+ spin_unlock(_minor_lock)
+ dm_put(md)
+ free_dev(md)
+ dm_get(md)
+ up_read(_hash_lock)
+ __dev_status(md, param)
+ dm_put(md)
+
+This patch fixes such problems.
+
+Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2141,6 +2141,7 @@ static struct mapped_device *dm_find_md(
+ md = idr_find(&_minor_idr, minor);
+ if (md && (md == MINOR_ALLOCED ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
++ dm_deleting_md(md) ||
+ test_bit(DMF_FREEING, &md->flags))) {
+ md = NULL;
+ goto out;
--- /dev/null
+From 1e5554c8428bc7209a83e2d07ca724be4d981ce3 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 12 Aug 2010 04:13:50 +0100
+Subject: dm snapshot: iterate origin and cow devices
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 1e5554c8428bc7209a83e2d07ca724be4d981ce3 upstream.
+
+Iterate both origin and snapshot devices
+
+iterate_devices method should call the callback for all the devices where
+the bio may be remapped. Thus, snapshot_iterate_devices should call the callback
+for both snapshot and origin underlying devices because it remaps some bios
+to the snapshot and some to the origin.
+
+snapshot_iterate_devices called the callback only for the origin device.
+This led to badly calculated device limits if snapshot and origin were placed
+on different types of disks.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-snap.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1899,8 +1899,14 @@ static int snapshot_iterate_devices(stru
+ iterate_devices_callout_fn fn, void *data)
+ {
+ struct dm_snapshot *snap = ti->private;
++ int r;
+
+- return fn(ti, snap->origin, 0, ti->len, data);
++ r = fn(ti, snap->origin, 0, ti->len, data);
++
++ if (!r)
++ r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
++
++ return r;
+ }
+
+
--- /dev/null
+From c24110450650f17f7d3ba4fbe01f01ac5a115456 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 12 Aug 2010 04:13:51 +0100
+Subject: dm snapshot: test chunk size against both origin and snapshot
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit c24110450650f17f7d3ba4fbe01f01ac5a115456 upstream.
+
+Validate chunk size against both origin and snapshot sector size
+
+Don't allow chunk size smaller than either origin or snapshot logical
+sector size. Reading or writing data not aligned to sector size is not
+allowed and causes immediate errors.
+
+This requires us to open the origin before initialising the
+exception store and to export dm_snap_origin.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/dm-exception-store.c | 4 +++-
+ drivers/md/dm-exception-store.h | 3 ++-
+ drivers/md/dm-snap.c | 36 +++++++++++++++++++++---------------
+ 3 files changed, 26 insertions(+), 17 deletions(-)
+
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(st
+
+ /* Validate the chunk size against the device block size */
+ if (chunk_size %
+- (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
++ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
++ chunk_size %
++ (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
+ *error = "Chunk size is not a multiple of device blocksize";
+ return -EINVAL;
+ }
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -126,8 +126,9 @@ struct dm_exception_store {
+ };
+
+ /*
+- * Obtain the cow device used by a given snapshot.
++ * Obtain the origin or cow device used by a given snapshot.
+ */
++struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
+ struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
+
+ /*
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -148,6 +148,12 @@ struct dm_snapshot {
+ #define RUNNING_MERGE 0
+ #define SHUTDOWN_MERGE 1
+
++struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
++{
++ return s->origin;
++}
++EXPORT_SYMBOL(dm_snap_origin);
++
+ struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
+ {
+ return s->cow;
+@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target
+ origin_mode = FMODE_WRITE;
+ }
+
+- origin_path = argv[0];
+- argv++;
+- argc--;
+-
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ti->error = "Cannot allocate snapshot context private "
+@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target
+ goto bad;
+ }
+
++ origin_path = argv[0];
++ argv++;
++ argc--;
++
++ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
++ if (r) {
++ ti->error = "Cannot get origin device";
++ goto bad_origin;
++ }
++
+ cow_path = argv[0];
+ argv++;
+ argc--;
+@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target
+ argv += args_used;
+ argc -= args_used;
+
+- r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
+- if (r) {
+- ti->error = "Cannot get origin device";
+- goto bad_origin;
+- }
+-
+ s->ti = ti;
+ s->valid = 1;
+ s->active = 0;
+@@ -1212,15 +1218,15 @@ bad_kcopyd:
+ dm_exception_table_exit(&s->complete, exception_cache);
+
+ bad_hash_tables:
+- dm_put_device(ti, s->origin);
+-
+-bad_origin:
+ dm_exception_store_destroy(s->store);
+
+ bad_store:
+ dm_put_device(ti, s->cow);
+
+ bad_cow:
++ dm_put_device(ti, s->origin);
++
++bad_origin:
+ kfree(s);
+
+ bad:
+@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_targe
+
+ mempool_destroy(s->pending_pool);
+
+- dm_put_device(ti, s->origin);
+-
+ dm_exception_store_destroy(s->store);
+
+ dm_put_device(ti, s->cow);
+
++ dm_put_device(ti, s->origin);
++
+ kfree(s);
+ }
+
--- /dev/null
+From 5ddb954b9ee50824977d2931e0ff58b3050b337d Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sat, 7 Aug 2010 11:01:36 +0100
+Subject: drm/i915/edp: Flush the write before waiting for PLLs
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 5ddb954b9ee50824977d2931e0ff58b3050b337d upstream.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1502,6 +1502,7 @@ static void ironlake_enable_pll_edp (str
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
++ POSTING_READ(DP_A);
+ udelay(200);
+ }
+
--- /dev/null
+From 69d0b96c095468526009cb3104eee561c9252a84 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Wed, 4 Aug 2010 21:22:09 +0200
+Subject: drm/i915: fixup pageflip ringbuffer commands for i8xx
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 69d0b96c095468526009cb3104eee561c9252a84 upstream.
+
+Add a new path for 2nd gen chips that uses the commands for i81x
+chips (where public docs do exist) augmented with the plane bits
+from i915. It seems to work and doesn't result in a black screen
+like before.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+[anholt: resolved against conflict]
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4837,11 +4837,17 @@ static int intel_crtc_page_flip(struct d
+ OUT_RING(offset | obj_priv->tiling_mode);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
+- } else {
++ } else if (IS_GEN3(dev)) {
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(offset);
++ OUT_RING(MI_NOOP);
++ } else {
++ OUT_RING(MI_DISPLAY_FLIP |
++ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
++ OUT_RING(fb->pitch);
++ OUT_RING(offset);
+ OUT_RING(MI_NOOP);
+ }
+ ADVANCE_LP_RING();
--- /dev/null
+From 6146b3d61925116e3fecce36c2fd873665bd6614 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Wed, 4 Aug 2010 21:22:10 +0200
+Subject: drm/i915: i8xx also doesn't like multiple oustanding pageflips
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 6146b3d61925116e3fecce36c2fd873665bd6614 upstream.
+
+My i855GM suffers from a 80k/s interrupt storm without this.
+So add 2nd gen to the list of things that don't like more than
+one outstanding pageflip request.
+
+Furthermore I've changed the busy loop into a ringbuffer wait.
+Busy-loops that don't check whether the chip died are simply evil.
+And performance should actually improve, because there's usually
+a decent amount of rendering queued on the gpu, hopefully rendering
+that MI_WAIT into a noop by the time it's executed.
+
+The current code holds dev->struct_mutex while executing this loop,
+hence stalling all other gem activity anyway.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+[anholt: resolved against conflict]
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4816,14 +4816,16 @@ static int intel_crtc_page_flip(struct d
+ work->pending_flip_obj = obj;
+
+ if (intel_crtc->plane)
+- flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
++ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+- flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
++ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+- /* Wait for any previous flip to finish */
+- if (IS_GEN3(dev))
+- while (I915_READ(ISR) & flip_mask)
+- ;
++ if (IS_GEN3(dev) || IS_GEN2(dev)) {
++ BEGIN_LP_RING(2);
++ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
++ OUT_RING(0);
++ ADVANCE_LP_RING();
++ }
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = obj_priv->gtt_offset;
--- /dev/null
+From 9ea2c4be978d597076ddc6c550557de5d243cea8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexdeucher@gmail.com>
+Date: Fri, 6 Aug 2010 00:27:44 -0400
+Subject: drm/radeon/kms: add additional quirk for Acer rv620 laptop
+
+From: Alex Deucher <alexdeucher@gmail.com>
+
+commit 9ea2c4be978d597076ddc6c550557de5d243cea8 upstream.
+
+HPD pins are reversed
+
+Fixes:
+https://bugs.freedesktop.org/show_bug.cgi?id=29387
+
+Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/radeon_atombios.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -206,6 +206,7 @@ static bool radeon_atom_apply_quirks(str
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
+ {
++ struct radeon_device *rdev = dev->dev_private;
+
+ /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x791e) &&
+@@ -308,13 +309,22 @@ static bool radeon_atom_apply_quirks(str
+ }
+ }
+
+- /* Acer laptop reports DVI-D as DVI-I */
++ /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
++ struct radeon_gpio_rec gpio;
++
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+- (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
++ gpio = radeon_lookup_gpio(rdev, 6);
++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
++ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
++ gpio = radeon_lookup_gpio(rdev, 7);
++ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
++ }
+ }
+
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
--- /dev/null
+From d8ab35575098b2d6dc10b2535aeb40545933ae56 Mon Sep 17 00:00:00 2001
+From: Dr. David Alan Gilbert <linux@treblig.org>
+Date: Mon, 2 Aug 2010 09:43:52 +1000
+Subject: drm/radeon/kms: add missing copy from user
+
+From: Dr. David Alan Gilbert <linux@treblig.org>
+
+commit d8ab35575098b2d6dc10b2535aeb40545933ae56 upstream.
+
+This hasn't mattered up until the ioctl started using the value, and it fell
+apart.
+
+fixes fd.o 29340, Ubuntu LP 606081
+
+[airlied: cleaned up whitespace and don't need an error before pushing]
+
+Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/radeon_kms.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -106,7 +106,9 @@ int radeon_info_ioctl(struct drm_device
+
+ info = data;
+ value_ptr = (uint32_t *)((unsigned long)info->value);
+- value = *value_ptr;
++ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
++ return -EFAULT;
++
+ switch (info->request) {
+ case RADEON_INFO_DEVICE_ID:
+ value = dev->pci_device;
--- /dev/null
+From 845b6cf34150100deb5f58c8a37a372b111f2918 Mon Sep 17 00:00:00 2001
+From: Jiaju Zhang <jjzhang.linux@gmail.com>
+Date: Wed, 28 Jul 2010 13:21:06 +0800
+Subject: Fix the nested PR lock calling issue in ACL
+
+From: Jiaju Zhang <jjzhang.linux@gmail.com>
+
+commit 845b6cf34150100deb5f58c8a37a372b111f2918 upstream.
+
+Hi,
+
+Thanks a lot for all the review and comments so far;) I'd like to send
+the improved (V4) version of this patch.
+
+This patch fixes a deadlock in OCFS2 ACL. We found this bug in OCFS2
+and Samba integration using scenario, the symptom is several smbd
+processes will be hung under heavy workload. Finally we found out it
+is the nested PR lock calling that leads to this deadlock:
+
+ node1 node2
+ gr PR
+ |
+ V
+ PR(EX)---> BAST:OCFS2_LOCK_BLOCKED
+ |
+ V
+ rq PR
+ |
+ V
+ wait=1
+
+After requesting the 2nd PR lock, the process "smbd" went into D
+state. It can only be woken up when the 1st PR lock's RO holder equals
+zero. There should be an ocfs2_inode_unlock in the calling path later
+on, which can decrement the RO holder. But since it has been in
+uninterruptible sleep, the unlock function has no chance to be called.
+
+The related stack trace is:
+smbd D ffff8800013d0600 0 9522 5608 0x00000000
+ ffff88002ca7fb18 0000000000000282 ffff88002f964500 ffff88002ca7fa98
+ ffff8800013d0600 ffff88002ca7fae0 ffff88002f964340 ffff88002f964340
+ ffff88002ca7ffd8 ffff88002ca7ffd8 ffff88002f964340 ffff88002f964340
+Call Trace:
+[<ffffffff80350425>] schedule_timeout+0x175/0x210
+[<ffffffff8034f580>] wait_for_common+0xf0/0x210
+[<ffffffffa03e12b9>] __ocfs2_cluster_lock+0x3b9/0xa90 [ocfs2]
+[<ffffffffa03e7665>] ocfs2_inode_lock_full_nested+0x255/0xdb0 [ocfs2]
+[<ffffffffa0446019>] ocfs2_get_acl+0x69/0x120 [ocfs2]
+[<ffffffffa0446368>] ocfs2_check_acl+0x28/0x80 [ocfs2]
+[<ffffffff800e3507>] acl_permission_check+0x57/0xb0
+[<ffffffff800e357d>] generic_permission+0x1d/0xc0
+[<ffffffffa03eecea>] ocfs2_permission+0x10a/0x1d0 [ocfs2]
+[<ffffffff800e3f65>] inode_permission+0x45/0x100
+[<ffffffff800d86b3>] sys_chdir+0x53/0x90
+[<ffffffff80007458>] system_call_fastpath+0x16/0x1b
+[<00007f34a4ef6927>] 0x7f34a4ef6927
+
+For details, please see:
+https://bugzilla.novell.com/show_bug.cgi?id=614332 and
+http://oss.oracle.com/bugzilla/show_bug.cgi?id=1278
+
+Signed-off-by: Jiaju Zhang <jjzhang@suse.de>
+Acked-by: Mark Fasheh <mfasheh@suse.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/acl.c | 24 +++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handl
+
+ int ocfs2_check_acl(struct inode *inode, int mask)
+ {
+- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
++ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++ struct buffer_head *di_bh = NULL;
++ struct posix_acl *acl;
++ int ret = -EAGAIN;
+
+- if (IS_ERR(acl))
++ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++ return ret;
++
++ ret = ocfs2_read_inode_block(inode, &di_bh);
++ if (ret < 0) {
++ mlog_errno(ret);
++ return ret;
++ }
++
++ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
++
++ brelse(di_bh);
++
++ if (IS_ERR(acl)) {
++ mlog_errno(PTR_ERR(acl));
+ return PTR_ERR(acl);
++ }
+ if (acl) {
+- int ret = posix_acl_permission(inode, acl, mask);
++ ret = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return ret;
+ }
--- /dev/null
+From b9783dcebe952bf73449fe70a19ee4814adc81a0 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <khali@linux-fr.org>
+Date: Sat, 14 Aug 2010 21:08:48 +0200
+Subject: hwmon: (pc87360) Fix device resource declaration
+
+From: Jean Delvare <khali@linux-fr.org>
+
+commit b9783dcebe952bf73449fe70a19ee4814adc81a0 upstream.
+
+It's not OK to call platform_device_add_resources() multiple times
+in a row. Despite its name, this functions sets the resources, it
+doesn't add them. So we have to prepare an array with all the
+resources, and then call platform_device_add_resources() once.
+
+Before this fix, only the last I/O resource would be actually
+registered. The other I/O resources were leaked.
+
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Cc: Jim Cromie <jim.cromie@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/pc87360.c | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+--- a/drivers/hwmon/pc87360.c
++++ b/drivers/hwmon/pc87360.c
+@@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_upda
+
+ static int __init pc87360_device_add(unsigned short address)
+ {
+- struct resource res = {
+- .name = "pc87360",
+- .flags = IORESOURCE_IO,
+- };
+- int err, i;
++ struct resource res[3];
++ int err, i, res_count;
+
+ pdev = platform_device_alloc("pc87360", address);
+ if (!pdev) {
+@@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(uns
+ goto exit;
+ }
+
++ memset(res, 0, 3 * sizeof(struct resource));
++ res_count = 0;
+ for (i = 0; i < 3; i++) {
+ if (!extra_isa[i])
+ continue;
+- res.start = extra_isa[i];
+- res.end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].start = extra_isa[i];
++ res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
++ res[res_count].name = "pc87360",
++ res[res_count].flags = IORESOURCE_IO,
+
+- err = acpi_check_resource_conflict(&res);
++ err = acpi_check_resource_conflict(&res[res_count]);
+ if (err)
+ goto exit_device_put;
+
+- err = platform_device_add_resources(pdev, &res, 1);
+- if (err) {
+- printk(KERN_ERR "pc87360: Device resource[%d] "
+- "addition failed (%d)\n", i, err);
+- goto exit_device_put;
+- }
++ res_count++;
++ }
++
++ err = platform_device_add_resources(pdev, res, res_count);
++ if (err) {
++ printk(KERN_ERR "pc87360: Device resources addition failed "
++ "(%d)\n", err);
++ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
--- /dev/null
+From c81476df1b4241aefba4ff83a7701b3a926bd7ce Mon Sep 17 00:00:00 2001
+From: Ondrej Zary <linux@rainbow-software.org>
+Date: Thu, 19 Aug 2010 14:13:25 -0700
+Subject: matroxfb: fix incorrect use of memcpy_toio()
+
+From: Ondrej Zary <linux@rainbow-software.org>
+
+commit c81476df1b4241aefba4ff83a7701b3a926bd7ce upstream.
+
+Screen is completely corrupted since 2.6.34. Bisection revealed that it's
+caused by commit 6175ddf06b61720 ("x86: Clean up mem*io functions.").
+
+H. Peter Anvin explained that memcpy_toio() does not copy data in 32bit
+chunks anymore on x86.
+
+Signed-off-by: Ondrej Zary <linux@rainbow-software.org>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Petr Vandrovec <vandrove@vc.cvut.cz>
+Cc: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/video/matrox/matroxfb_base.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va
+ static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
+ #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
+ /*
+- * memcpy_toio works for us if:
++ * iowrite32_rep works for us if:
+ * (1) Copies data as 32bit quantities, not byte after byte,
+ * (2) Performs LE ordered stores, and
+ * (3) It copes with unaligned source (destination is guaranteed to be page
+ * aligned and length is guaranteed to be multiple of 4).
+ */
+- memcpy_toio(va.vaddr, src, len);
++ iowrite32_rep(va.vaddr, src, len >> 2);
+ #else
+ u_int32_t __iomem* addr = va.vaddr;
+
--- /dev/null
+From 8ae664184c45def51ff0b61d4bd6c6671db6cb4f Mon Sep 17 00:00:00 2001
+From: Stefani Seibold <stefani@seibold.net>
+Date: Thu, 5 Aug 2010 09:19:26 +0200
+Subject: mtd: change struct flchip_shared spinlock locking into mutex
+
+From: Stefani Seibold <stefani@seibold.net>
+
+commit 8ae664184c45def51ff0b61d4bd6c6671db6cb4f upstream.
+
+This patch prevent to schedule while atomic by changing the
+flchip_shared spinlock into a mutex. This should be save since no atomic
+path will use this lock.
+
+It was suggested by Arnd Bergmann and Vasiliy Kulikov.
+
+Signed-off-by: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/chips/cfi_cmdset_0001.c | 20 ++++++++++----------
+ drivers/mtd/lpddr/lpddr_cmds.c | 20 ++++++++++----------
+ include/linux/mtd/flashchip.h | 2 +-
+ 3 files changed, 21 insertions(+), 21 deletions(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -717,7 +717,7 @@ static int cfi_intelext_partition_fixup(
+ chip = &newcfi->chips[0];
+ for (i = 0; i < cfi->numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+- spin_lock_init(&shared[i].lock);
++ mutex_init(&shared[i].lock);
+ for (j = 0; j < numparts; j++) {
+ *chip = cfi->chips[i];
+ chip->start += j << partshift;
+@@ -886,7 +886,7 @@ static int get_chip(struct map_info *map
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+@@ -899,7 +899,7 @@ static int get_chip(struct map_info *map
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+@@ -914,7 +914,7 @@ static int get_chip(struct map_info *map
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already
+ * in FL_SYNCING state. Put contender and retry. */
+@@ -930,7 +930,7 @@ static int get_chip(struct map_info *map
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+@@ -944,7 +944,7 @@ static int get_chip(struct map_info *map
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+ ret = chip_ready(map, chip, adr, mode);
+ if (ret == -EAGAIN)
+@@ -959,7 +959,7 @@ static void put_chip(struct map_info *ma
+
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+@@ -967,7 +967,7 @@ static void put_chip(struct map_info *ma
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner, loaner->start);
+ mutex_lock(&chip->mutex);
+@@ -985,11 +985,11 @@ static void put_chip(struct map_info *ma
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ switch(chip->oldstate) {
+--- a/drivers/mtd/lpddr/lpddr_cmds.c
++++ b/drivers/mtd/lpddr/lpddr_cmds.c
+@@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map
+ numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+ for (i = 0; i < numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+- spin_lock_init(&shared[i].lock);
++ mutex_init(&shared[i].lock);
+ for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+ *chip = lpddr->chips[i];
+ chip->start += j << lpddr->chipshift;
+@@ -217,7 +217,7 @@ static int get_chip(struct map_info *map
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+@@ -230,7 +230,7 @@ static int get_chip(struct map_info *map
+ * get_chip returns success we're clear to go ahead.
+ */
+ ret = mutex_trylock(&contender->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ mutex_unlock(&chip->mutex);
+@@ -245,7 +245,7 @@ static int get_chip(struct map_info *map
+ mutex_unlock(&contender->mutex);
+ return ret;
+ }
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+
+ /* We should not own chip if it is already in FL_SYNCING
+ * state. Put contender and retry. */
+@@ -261,7 +261,7 @@ static int get_chip(struct map_info *map
+ Must sleep in such a case. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+@@ -275,7 +275,7 @@ static int get_chip(struct map_info *map
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ ret = chip_ready(map, chip, mode);
+@@ -348,7 +348,7 @@ static void put_chip(struct map_info *ma
+ {
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+- spin_lock(&shared->lock);
++ mutex_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+@@ -356,7 +356,7 @@ static void put_chip(struct map_info *ma
+ /* give back the ownership */
+ struct flchip *loaner = shared->writing;
+ mutex_lock(&loaner->mutex);
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ mutex_unlock(&chip->mutex);
+ put_chip(map, loaner);
+ mutex_lock(&chip->mutex);
+@@ -374,11 +374,11 @@ static void put_chip(struct map_info *ma
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+- spin_unlock(&shared->lock);
++ mutex_unlock(&shared->lock);
+ }
+
+ switch (chip->oldstate) {
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -92,7 +92,7 @@ struct flchip {
+ /* This is used to handle contention on write/erase operations
+ between partitions of the same physical chip. */
+ struct flchip_shared {
+- spinlock_t lock;
++ struct mutex lock;
+ struct flchip *writing;
+ struct flchip *erasing;
+ };
--- /dev/null
+From cfe3fdadb16162327773ef01a575a32000b8c7f4 Mon Sep 17 00:00:00 2001
+From: Tilman Sauerbeck <tilman@code-monkey.de>
+Date: Fri, 20 Aug 2010 14:01:47 -0700
+Subject: mtd: nand: Fix probe of Samsung NAND chips
+
+From: Tilman Sauerbeck <tilman@code-monkey.de>
+
+commit cfe3fdadb16162327773ef01a575a32000b8c7f4 upstream.
+
+Apparently, the check for a 6-byte ID string introduced by commit
+426c457a3216fac74e3d44dd39729b0689f4c7ab ("mtd: nand: extend NAND flash
+detection to new MLC chips") is NOT sufficient to determine whether or
+not a Samsung chip uses their new MLC detection scheme or the old,
+standard scheme. This adds a condition to check cell type.
+
+Signed-off-by: Tilman Sauerbeck <tilman@code-monkey.de>
+Signed-off-by: Brian Norris <norris@broadcom.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/nand/nand_base.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2852,6 +2852,7 @@ static struct nand_flash_dev *nand_get_f
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
++ (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
--- /dev/null
+From 9b00c64318cc337846a7a08a5678f5f19aeff188 Mon Sep 17 00:00:00 2001
+From: Patrick J. LoPresti <lopresti@gmail.com>
+Date: Tue, 10 Aug 2010 17:28:01 -0400
+Subject: nfs: Add "lookupcache" to displayed mount options
+
+From: Patrick J. LoPresti <lopresti@gmail.com>
+
+commit 9b00c64318cc337846a7a08a5678f5f19aeff188 upstream.
+
+Running "cat /proc/mounts" fails to display the "lookupcache" option.
+This oversight cost me a bunch of wasted time recently.
+
+The following simple patch fixes it.
+
+Signed-off-by: Patrick LoPresti <lopresti@gmail.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/super.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -652,6 +652,13 @@ static void nfs_show_mount_options(struc
+
+ if (nfss->options & NFS_OPTION_FSCACHE)
+ seq_printf(m, ",fsc");
++
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
++ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
++ seq_printf(m, ",lookupcache=none");
++ else
++ seq_printf(m, ",lookupcache=pos");
++ }
+ }
+
+ /*
--- /dev/null
+From 0a377cff9428af2da2b293d11e07bc4dbf064ee5 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 18 Aug 2010 09:25:42 -0400
+Subject: NFS: Fix an Oops in the NFSv4 atomic open code
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 0a377cff9428af2da2b293d11e07bc4dbf064ee5 upstream.
+
+Adam Lackorzynski reports:
+
+with 2.6.35.2 I'm getting this reproducible Oops:
+
+[ 110.825396] BUG: unable to handle kernel NULL pointer dereference at
+(null)
+[ 110.828638] IP: [<ffffffff811247b7>] encode_attrs+0x1a/0x2a4
+[ 110.828638] PGD be89f067 PUD bf18f067 PMD 0
+[ 110.828638] Oops: 0000 [#1] SMP
+[ 110.828638] last sysfs file: /sys/class/net/lo/operstate
+[ 110.828638] CPU 2
+[ 110.828638] Modules linked in: rtc_cmos rtc_core rtc_lib amd64_edac_mod
+i2c_amd756 edac_core i2c_core dm_mirror dm_region_hash dm_log dm_snapshot
+sg sr_mod usb_storage ohci_hcd mptspi tg3 mptscsih mptbase usbcore nls_base
+[last unloaded: scsi_wait_scan]
+[ 110.828638]
+[ 110.828638] Pid: 11264, comm: setchecksum Not tainted 2.6.35.2 #1
+[ 110.828638] RIP: 0010:[<ffffffff811247b7>] [<ffffffff811247b7>]
+encode_attrs+0x1a/0x2a4
+[ 110.828638] RSP: 0000:ffff88003bf5b878 EFLAGS: 00010296
+[ 110.828638] RAX: ffff8800bddb48a8 RBX: ffff88003bf5bb18 RCX:
+0000000000000000
+[ 110.828638] RDX: ffff8800be258800 RSI: 0000000000000000 RDI:
+ffff88003bf5b9f8
+[ 110.828638] RBP: 0000000000000000 R08: ffff8800bddb48a8 R09:
+0000000000000004
+[ 110.828638] R10: 0000000000000003 R11: ffff8800be779000 R12:
+ffff8800be258800
+[ 110.828638] R13: ffff88003bf5b9f8 R14: ffff88003bf5bb20 R15:
+ffff8800be258800
+[ 110.828638] FS: 0000000000000000(0000) GS:ffff880041e00000(0063)
+knlGS:00000000556bd6b0
+[ 110.828638] CS: 0010 DS: 002b ES: 002b CR0: 000000008005003b
+[ 110.828638] CR2: 0000000000000000 CR3: 00000000be8ef000 CR4:
+00000000000006e0
+[ 110.828638] DR0: 0000000000000000 DR1: 0000000000000000 DR2:
+0000000000000000
+[ 110.828638] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7:
+0000000000000400
+[ 110.828638] Process setchecksum (pid: 11264, threadinfo
+ffff88003bf5a000, task ffff88003f232210)
+[ 110.828638] Stack:
+[ 110.828638] 0000000000000000 ffff8800bfbcf920 0000000000000000
+0000000000000ffe
+[ 110.828638] <0> 0000000000000000 0000000000000000 0000000000000000
+0000000000000000
+[ 110.828638] <0> 0000000000000000 0000000000000000 0000000000000000
+0000000000000000
+[ 110.828638] Call Trace:
+[ 110.828638] [<ffffffff81124c1f>] ? nfs4_xdr_enc_setattr+0x90/0xb4
+[ 110.828638] [<ffffffff81371161>] ? call_transmit+0x1c3/0x24a
+[ 110.828638] [<ffffffff813774d9>] ? __rpc_execute+0x78/0x22a
+[ 110.828638] [<ffffffff81371a91>] ? rpc_run_task+0x21/0x2b
+[ 110.828638] [<ffffffff81371b7e>] ? rpc_call_sync+0x3d/0x5d
+[ 110.828638] [<ffffffff8111e284>] ? _nfs4_do_setattr+0x11b/0x147
+[ 110.828638] [<ffffffff81109466>] ? nfs_init_locked+0x0/0x32
+[ 110.828638] [<ffffffff810ac521>] ? ifind+0x4e/0x90
+[ 110.828638] [<ffffffff8111e2fb>] ? nfs4_do_setattr+0x4b/0x6e
+[ 110.828638] [<ffffffff8111e634>] ? nfs4_do_open+0x291/0x3a6
+[ 110.828638] [<ffffffff8111ed81>] ? nfs4_open_revalidate+0x63/0x14a
+[ 110.828638] [<ffffffff811056c4>] ? nfs_open_revalidate+0xd7/0x161
+[ 110.828638] [<ffffffff810a2de4>] ? do_lookup+0x1a4/0x201
+[ 110.828638] [<ffffffff810a4733>] ? link_path_walk+0x6a/0x9d5
+[ 110.828638] [<ffffffff810a42b6>] ? do_last+0x17b/0x58e
+[ 110.828638] [<ffffffff810a5fbe>] ? do_filp_open+0x1bd/0x56e
+[ 110.828638] [<ffffffff811cd5e0>] ? _atomic_dec_and_lock+0x30/0x48
+[ 110.828638] [<ffffffff810a9b1b>] ? dput+0x37/0x152
+[ 110.828638] [<ffffffff810ae063>] ? alloc_fd+0x69/0x10a
+[ 110.828638] [<ffffffff81099f39>] ? do_sys_open+0x56/0x100
+[ 110.828638] [<ffffffff81027a22>] ? ia32_sysret+0x0/0x5
+[ 110.828638] Code: 83 f1 01 e8 f5 ca ff ff 48 83 c4 50 5b 5d 41 5c c3 41
+57 41 56 41 55 49 89 fd 41 54 49 89 d4 55 48 89 f5 53 48 81 ec 18 01 00 00
+<8b> 06 89 c2 83 e2 08 83 fa 01 19 db 83 e3 f8 83 c3 18 a8 01 8d
+[ 110.828638] RIP [<ffffffff811247b7>] encode_attrs+0x1a/0x2a4
+[ 110.828638] RSP <ffff88003bf5b878>
+[ 110.828638] CR2: 0000000000000000
+[ 112.840396] ---[ end trace 95282e83fd77358f ]---
+
+We need to ensure that the O_EXCL flag is turned off if the user doesn't
+set O_CREAT.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/dir.c | 2 +-
+ fs/nfs/nfs4proc.c | 8 +++++---
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1103,7 +1103,7 @@ static int nfs_open_revalidate(struct de
+ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ goto no_open_dput;
+ /* We can't create new files, or truncate existing ones here */
+- openflags &= ~(O_CREAT|O_TRUNC);
++ openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
+
+ /*
+ * Note: we're not holding inode->i_mutex and so may be racing with
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2023,7 +2023,8 @@ nfs4_atomic_open(struct inode *dir, stru
+ struct rpc_cred *cred;
+ struct nfs4_state *state;
+ struct dentry *res;
+- fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
++ int open_flags = nd->intent.open.flags;
++ fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+ if (nd->flags & LOOKUP_CREATE) {
+ attr.ia_mode = nd->intent.open.create_mode;
+@@ -2031,8 +2032,9 @@ nfs4_atomic_open(struct inode *dir, stru
+ if (!IS_POSIXACL(dir))
+ attr.ia_mode &= ~current_umask();
+ } else {
++ open_flags &= ~O_EXCL;
+ attr.ia_valid = 0;
+- BUG_ON(nd->intent.open.flags & O_CREAT);
++ BUG_ON(open_flags & O_CREAT);
+ }
+
+ cred = rpc_lookup_cred();
+@@ -2041,7 +2043,7 @@ nfs4_atomic_open(struct inode *dir, stru
+ parent = dentry->d_parent;
+ /* Protect against concurrent sillydeletes */
+ nfs_block_sillyrename(parent);
+- state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
++ state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
+ put_rpccred(cred);
+ if (IS_ERR(state)) {
+ if (PTR_ERR(state) == -ENOENT) {
--- /dev/null
+From af4e36318edb848fcc0a8d5f75000ca00cdc7595 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Date: Fri, 13 Aug 2010 12:42:24 +0900
+Subject: nilfs2: fix list corruption after ifile creation failure
+
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+
+commit af4e36318edb848fcc0a8d5f75000ca00cdc7595 upstream.
+
+If nilfs_attach_checkpoint() gets a memory allocation failure during
+creation of ifile, it will return without removing nilfs_sb_info
+struct from ns_supers list. When a concurrently mounted snapshot is
+unmounted or another new snapshot is mounted after that, this causes
+kernel oops as below:
+
+> BUG: unable to handle kernel NULL pointer dereference at (null)
+> IP: [<f83662ff>] nilfs_find_sbinfo+0x74/0xa4 [nilfs2]
+> *pde = 00000000
+> Oops: 0000 [#1] SMP
+<snip>
+> Call Trace:
+> [<f835dc29>] ? nilfs_get_sb+0x165/0x532 [nilfs2]
+> [<c1173c87>] ? ida_get_new_above+0x16d/0x187
+> [<c109a7f8>] ? alloc_vfsmnt+0x7e/0x10a
+> [<c1070790>] ? kstrdup+0x2c/0x40
+> [<c1089041>] ? vfs_kern_mount+0x96/0x14e
+> [<c108913d>] ? do_kern_mount+0x32/0xbd
+> [<c109b331>] ? do_mount+0x642/0x6a1
+> [<c101a415>] ? do_page_fault+0x0/0x2d1
+> [<c1099c00>] ? copy_mount_options+0x80/0xe2
+> [<c10705d8>] ? strndup_user+0x48/0x67
+> [<c109b3f1>] ? sys_mount+0x61/0x90
+> [<c10027cc>] ? sysenter_do_call+0x12/0x22
+
+This fixes the problem.
+
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nilfs2/super.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -336,9 +336,10 @@ int nilfs_attach_checkpoint(struct nilfs
+ list_add(&sbi->s_list, &nilfs->ns_supers);
+ up_write(&nilfs->ns_super_sem);
+
++ err = -ENOMEM;
+ sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
+ if (!sbi->s_ifile)
+- return -ENOMEM;
++ goto delist;
+
+ down_read(&nilfs->ns_segctor_sem);
+ err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
+@@ -369,6 +370,7 @@ int nilfs_attach_checkpoint(struct nilfs
+ nilfs_mdt_destroy(sbi->s_ifile);
+ sbi->s_ifile = NULL;
+
++ delist:
+ down_write(&nilfs->ns_super_sem);
+ list_del_init(&sbi->s_list);
+ up_write(&nilfs->ns_super_sem);
--- /dev/null
+From 8a2e70c40ff58f82dde67770e6623ca45f0cb0c8 Mon Sep 17 00:00:00 2001
+From: Tao Ma <tao.ma@oracle.com>
+Date: Thu, 22 Jul 2010 13:56:45 +0800
+Subject: ocfs2: Count more refcount records in file system fragmentation.
+
+From: Tao Ma <tao.ma@oracle.com>
+
+commit 8a2e70c40ff58f82dde67770e6623ca45f0cb0c8 upstream.
+
+The refcount record calculation in ocfs2_calc_refcount_meta_credits
+is too optimistic that we can always allocate contiguous clusters
+and handle an already existed refcount rec as a whole. Actually
+because of file system fragmentation, we may have the chance to split
+a refcount record into 3 parts during the transaction. So consider
+the worst case in record calculation.
+
+Signed-off-by: Tao Ma <tao.ma@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/refcounttree.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_cred
+ len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
+ le32_to_cpu(rec.r_clusters)) - cpos;
+ /*
+- * If the refcount rec already exist, cool. We just need
+- * to check whether there is a split. Otherwise we just need
+- * to increase the refcount.
+- * If we will insert one, increases recs_add.
+- *
+ * We record all the records which will be inserted to the
+ * same refcount block, so that we can tell exactly whether
+ * we need a new refcount block or not.
++ *
++ * If we will insert a new one, this is easy and only happens
++ * during adding refcounted flag to the extent, so we don't
++ * have a chance of spliting. We just need one record.
++ *
++ * If the refcount rec already exists, that would be a little
++ * complicated. we may have to:
++ * 1) split at the beginning if the start pos isn't aligned.
++ * we need 1 more record in this case.
++ * 2) split int the end if the end pos isn't aligned.
++ * we need 1 more record in this case.
++ * 3) split in the middle because of file system fragmentation.
++ * we need 2 more records in this case(we can't detect this
++ * beforehand, so always think of the worst case).
+ */
+ if (rec.r_refcount) {
++ recs_add += 2;
+ /* Check whether we need a split at the beginning. */
+ if (cpos == start_cpos &&
+ cpos != le64_to_cpu(rec.r_cpos))
--- /dev/null
+From a524812b7eaa7783d7811198921100f079034e61 Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 30 Jul 2010 16:14:44 +0800
+Subject: ocfs2/dlm: avoid incorrect bit set in refmap on recovery master
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit a524812b7eaa7783d7811198921100f079034e61 upstream.
+
+In the following situation, there remains an incorrect bit in refmap on the
+recovery master. Finally the recovery master will fail at purging the lockres
+due to the incorrect bit in refmap.
+
+1) node A has no interest on lockres A any longer, so it is purging it.
+2) the owner of lockres A is node B, so node A is sending de-ref message
+to node B.
+3) at this time, node B crashed. node C becomes the recovery master. it recovers
+lockres A(because the master is the dead node B).
+4) node A migrated lockres A to node C with a refbit there.
+5) node A failed to send de-ref message to node B because it crashed. The failure
+is ignored. no other action is done for lockres A any more.
+
+For mormal, re-send the deref message to it to recovery master can fix it. Well,
+ignoring the failure of deref to the original master and not recovering the lockres
+to recovery master has the same effect. And the later is simpler.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Acked-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmrecovery.c | 22 ++++++++++------------
+ fs/ocfs2/dlm/dlmthread.c | 34 +++++++++++++++++++++-------------
+ 2 files changed, 31 insertions(+), 25 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(s
+ struct list_head *queue;
+ struct dlm_lock *lock, *next;
+
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ if (!list_empty(&res->recovering)) {
+ mlog(0,
+@@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanu
+ /* zero the lvb if necessary */
+ dlm_revalidate_lvb(dlm, res, dead_node);
+ if (res->owner == dead_node) {
+- if (res->state & DLM_LOCK_RES_DROPPING_REF)
+- mlog(0, "%s:%.*s: owned by "
+- "dead node %u, this node was "
+- "dropping its ref when it died. "
+- "continue, dropping the flag.\n",
+- dlm->name, res->lockname.len,
+- res->lockname.name, dead_node);
+-
+- /* the wake_up for this will happen when the
+- * RECOVERING flag is dropped later */
+- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
++ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
++ mlog(ML_NOTICE, "Ignore %.*s for "
++ "recovery as it is being freed\n",
++ res->lockname.len,
++ res->lockname.name);
++ } else
++ dlm_move_lockres_to_recovery_list(dlm,
++ res);
+
+- dlm_move_lockres_to_recovery_list(dlm, res);
+ } else if (res->owner == dlm->node_num) {
+ dlm_free_dead_locks(dlm, res, dead_node);
+ __dlm_lockres_calc_usage(dlm, res);
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_l
+ * truly ready to be freed. */
+ int __dlm_lockres_unused(struct dlm_lock_resource *res)
+ {
+- if (!__dlm_lockres_has_locks(res) &&
+- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
+- /* try not to scan the bitmap unless the first two
+- * conditions are already true */
+- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+- if (bit >= O2NM_MAX_NODES) {
+- /* since the bit for dlm->node_num is not
+- * set, inflight_locks better be zero */
+- BUG_ON(res->inflight_locks != 0);
+- return 1;
+- }
+- }
+- return 0;
++ int bit;
++
++ if (__dlm_lockres_has_locks(res))
++ return 0;
++
++ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
++ return 0;
++
++ if (res->state & DLM_LOCK_RES_RECOVERING)
++ return 0;
++
++ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
++ if (bit < O2NM_MAX_NODES)
++ return 0;
++
++ /*
++ * since the bit for dlm->node_num is not set, inflight_locks better
++ * be zero
++ */
++ BUG_ON(res->inflight_locks != 0);
++ return 1;
+ }
+
+
--- /dev/null
+From 6d98c3ccb52f692f1a60339dde7c700686a5568b Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 16 Jul 2010 23:13:33 +0800
+Subject: ocfs2/dlm: fix a dead lock
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit 6d98c3ccb52f692f1a60339dde7c700686a5568b upstream.
+
+When we have to take both dlm->master_lock and lockres->spinlock,
+take them in order
+
+lockres->spinlock and then dlm->master_lock.
+
+The patch fixes a violation of the rule.
+We can simply move taking dlm->master_lock to where we have dropped res->spinlock
+since when we access res->state and free mle memory we don't need master_lock's
+protection.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -3050,8 +3050,6 @@ int dlm_migrate_request_handler(struct o
+ /* check for pre-existing lock */
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+- spin_lock(&dlm->master_lock);
+-
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+@@ -3069,14 +3067,15 @@ int dlm_migrate_request_handler(struct o
+ spin_unlock(&res->spinlock);
+ }
+
++ spin_lock(&dlm->master_lock);
+ /* ignore status. only nonzero status would BUG. */
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+ name, namelen,
+ migrate->new_master,
+ migrate->master);
+
+-unlock:
+ spin_unlock(&dlm->master_lock);
++unlock:
+ spin_unlock(&dlm->spinlock);
+
+ if (oldmle) {
--- /dev/null
+From b11f1f1ab73fd358b1b734a9427744802202ba68 Mon Sep 17 00:00:00 2001
+From: Wengang Wang <wen.gang.wang@oracle.com>
+Date: Fri, 30 Jul 2010 23:18:00 +0800
+Subject: ocfs2/dlm: remove potential deadlock -V3
+
+From: Wengang Wang <wen.gang.wang@oracle.com>
+
+commit b11f1f1ab73fd358b1b734a9427744802202ba68 upstream.
+
+When we need to take both dlm_domain_lock and dlm->spinlock, we should take
+them in order of: dlm_domain_lock then dlm->spinlock.
+
+There is pathes disobey this order. That is calling dlm_lockres_put() with
+dlm->spinlock held in dlm_run_purge_list. dlm_lockres_put() calls dlm_put() at
+the ref and dlm_put() locks on dlm_domain_lock.
+
+Fix:
+Don't grab/put the dlm when the initialising/releasing lockres.
+That grab is not required because we don't call dlm_unregister_domain()
+based on refcount.
+
+Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmmaster.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct k
+
+ atomic_dec(&dlm->res_cur_count);
+
+- dlm_put(dlm);
+-
+ if (!hlist_unhashed(&res->hash_node) ||
+ !list_empty(&res->granted) ||
+ !list_empty(&res->converting) ||
+@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_
+ res->migration_pending = 0;
+ res->inflight_locks = 0;
+
+- /* put in dlm_lockres_release */
+- dlm_grab(dlm);
+ res->dlm = dlm;
+
+ kref_init(&res->refs);
--- /dev/null
+From 6eda3dd33f8a0ce58ee56a11351758643a698db4 Mon Sep 17 00:00:00 2001
+From: Tiger Yang <tiger.yang@oracle.com>
+Date: Fri, 16 Jul 2010 11:21:23 +0800
+Subject: ocfs2: do not overwrite error codes in ocfs2_init_acl
+
+From: Tiger Yang <tiger.yang@oracle.com>
+
+commit 6eda3dd33f8a0ce58ee56a11351758643a698db4 upstream.
+
+Setting the acl while creating a new inode depends on
+the error codes of posix_acl_create_masq. This patch fix
+a issue of overwriting the error codes of it.
+
+Reported-by: Pawel Zawora <pzawora@gmail.com>
+Signed-off-by: Tiger Yang <tiger.yang@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/acl.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -344,7 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
+ {
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct posix_acl *acl = NULL;
+- int ret = 0;
++ int ret = 0, ret2;
+ mode_t mode;
+
+ if (!S_ISLNK(inode->i_mode)) {
+@@ -381,7 +381,12 @@ int ocfs2_init_acl(handle_t *handle,
+ mode = inode->i_mode;
+ ret = posix_acl_create_masq(clone, &mode);
+ if (ret >= 0) {
+- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++ if (ret2) {
++ mlog_errno(ret2);
++ ret = ret2;
++ goto cleanup;
++ }
+ if (ret > 0) {
+ ret = ocfs2_set_acl(handle, inode,
+ di_bh, ACL_TYPE_ACCESS,
--- /dev/null
+From 7beaf243787f85a2ef9213ccf13ab4a243283fde Mon Sep 17 00:00:00 2001
+From: Srinivas Eeda <srinivas.eeda@oracle.com>
+Date: Mon, 19 Jul 2010 16:04:12 -0700
+Subject: ocfs2 fix o2dlm dlm run purgelist (rev 3)
+
+From: Srinivas Eeda <srinivas.eeda@oracle.com>
+
+commit 7beaf243787f85a2ef9213ccf13ab4a243283fde upstream.
+
+This patch fixes two problems in dlm_run_purgelist
+
+1. If a lockres is found to be in use, dlm_run_purgelist keeps trying to purge
+the same lockres instead of trying the next lockres.
+
+2. When a lockres is found unused, dlm_run_purgelist releases lockres spinlock
+before setting DLM_LOCK_RES_DROPPING_REF and calls dlm_purge_lockres.
+spinlock is reacquired but in this window lockres can get reused. This leads
+to BUG.
+
+This patch modifies dlm_run_purgelist to skip lockres if it's in use and purge
+ next lockres. It also sets DLM_LOCK_RES_DROPPING_REF before releasing the
+lockres spinlock protecting it from getting reused.
+
+Signed-off-by: Srinivas Eeda <srinivas.eeda@oracle.com>
+Acked-by: Sunil Mushran <sunil.mushran@oracle.com>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ocfs2/dlm/dlmthread.c | 80 +++++++++++++++++++----------------------------
+ 1 file changed, 34 insertions(+), 46 deletions(-)
+
+--- a/fs/ocfs2/dlm/dlmthread.c
++++ b/fs/ocfs2/dlm/dlmthread.c
+@@ -152,45 +152,25 @@ void dlm_lockres_calc_usage(struct dlm_c
+ spin_unlock(&dlm->spinlock);
+ }
+
+-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
++static void dlm_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+ {
+ int master;
+ int ret = 0;
+
+- spin_lock(&res->spinlock);
+- if (!__dlm_lockres_unused(res)) {
+- mlog(0, "%s:%.*s: tried to purge but not unused\n",
+- dlm->name, res->lockname.len, res->lockname.name);
+- __dlm_print_one_lock_resource(res);
+- spin_unlock(&res->spinlock);
+- BUG();
+- }
+-
+- if (res->state & DLM_LOCK_RES_MIGRATING) {
+- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
+- "being remastered\n", dlm->name, res->lockname.len,
+- res->lockname.name);
+- /* Re-add the lockres to the end of the purge list */
+- if (!list_empty(&res->purge)) {
+- list_del_init(&res->purge);
+- list_add_tail(&res->purge, &dlm->purge_list);
+- }
+- spin_unlock(&res->spinlock);
+- return 0;
+- }
++ assert_spin_locked(&dlm->spinlock);
++ assert_spin_locked(&res->spinlock);
+
+ master = (res->owner == dlm->node_num);
+
+- if (!master)
+- res->state |= DLM_LOCK_RES_DROPPING_REF;
+- spin_unlock(&res->spinlock);
+
+ mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
+ res->lockname.name, master);
+
+ if (!master) {
++ res->state |= DLM_LOCK_RES_DROPPING_REF;
+ /* drop spinlock... retake below */
++ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+@@ -208,31 +188,35 @@ static int dlm_purge_lockres(struct dlm_
+ mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
+ dlm->name, res->lockname.len, res->lockname.name, ret);
+ spin_lock(&dlm->spinlock);
++ spin_lock(&res->spinlock);
+ }
+
+- spin_lock(&res->spinlock);
+ if (!list_empty(&res->purge)) {
+ mlog(0, "removing lockres %.*s:%p from purgelist, "
+ "master = %d\n", res->lockname.len, res->lockname.name,
+ res, master);
+ list_del_init(&res->purge);
+- spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+- } else
+- spin_unlock(&res->spinlock);
++ }
++
++ if (!__dlm_lockres_unused(res)) {
++ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
++ dlm->name, res->lockname.len, res->lockname.name);
++ __dlm_print_one_lock_resource(res);
++ BUG();
++ }
+
+ __dlm_unhash_lockres(res);
+
+ /* lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource. */
+ if (!master) {
+- spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+- }
+- return 0;
++ } else
++ spin_unlock(&res->spinlock);
+ }
+
+ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+@@ -251,17 +235,7 @@ static void dlm_run_purge_list(struct dl
+ lockres = list_entry(dlm->purge_list.next,
+ struct dlm_lock_resource, purge);
+
+- /* Status of the lockres *might* change so double
+- * check. If the lockres is unused, holding the dlm
+- * spinlock will prevent people from getting and more
+- * refs on it -- there's no need to keep the lockres
+- * spinlock. */
+ spin_lock(&lockres->spinlock);
+- unused = __dlm_lockres_unused(lockres);
+- spin_unlock(&lockres->spinlock);
+-
+- if (!unused)
+- continue;
+
+ purge_jiffies = lockres->last_used +
+ msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
+@@ -273,15 +247,29 @@ static void dlm_run_purge_list(struct dl
+ * in tail order, we can stop at the first
+ * unpurgable resource -- anyone added after
+ * him will have a greater last_used value */
++ spin_unlock(&lockres->spinlock);
+ break;
+ }
+
++ /* Status of the lockres *might* change so double
++ * check. If the lockres is unused, holding the dlm
++ * spinlock will prevent people from getting and more
++ * refs on it. */
++ unused = __dlm_lockres_unused(lockres);
++ if (!unused ||
++ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
++ mlog(0, "lockres %s:%.*s: is in use or "
++ "being remastered, used %d, state %d\n",
++ dlm->name, lockres->lockname.len,
++ lockres->lockname.name, !unused, lockres->state);
++ list_move_tail(&dlm->purge_list, &lockres->purge);
++ spin_unlock(&lockres->spinlock);
++ continue;
++ }
++
+ dlm_lockres_get(lockres);
+
+- /* This may drop and reacquire the dlm spinlock if it
+- * has to do migration. */
+- if (dlm_purge_lockres(dlm, lockres))
+- BUG();
++ dlm_purge_lockres(dlm, lockres);
+
+ dlm_lockres_put(lockres);
+
--- /dev/null
+From 351af0725e5222e35741011d1ea62215c1ed06db Mon Sep 17 00:00:00 2001
+From: Zhang, Yanmin <yanmin_zhang@linux.intel.com>
+Date: Fri, 6 Aug 2010 13:39:08 +0800
+Subject: perf, x86: Fix Intel-nhm PMU programming errata workaround
+
+From: Zhang, Yanmin <yanmin_zhang@linux.intel.com>
+
+commit 351af0725e5222e35741011d1ea62215c1ed06db upstream.
+
+Fix the Errata AAK100/AAP53/BD53 workaround, the officialy documented
+workaround we implemented in:
+
+ 11164cd: perf, x86: Add Nehelem PMU programming errata workaround
+
+doesn't actually work fully and causes a stuck PMU state
+under load and non-functioning perf profiling.
+
+A functional workaround was found by trial & error.
+
+Affects all Nehalem-class Intel PMUs.
+
+Signed-off-by: Zhang Yanmin <yanmin_zhang@linux.intel.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <1281073148.2125.63.camel@ymzhang.sh.intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel.c | 83 +++++++++++++++++++++++++--------
+ 1 file changed, 64 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int add
+ * Intel Errata AAP53 (model 30)
+ * Intel Errata BD53 (model 44)
+ *
+- * These chips need to be 'reset' when adding counters by programming
+- * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
+- * either in sequence on the same PMC or on different PMCs.
++ * The official story:
++ * These chips need to be 'reset' when adding counters by programming the
++ * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
++ * in sequence on the same PMC or on different PMCs.
++ *
++ * In practise it appears some of these events do in fact count, and
++ * we need to programm all 4 events.
+ */
+-static void intel_pmu_nhm_enable_all(int added)
++static void intel_pmu_nhm_workaround(void)
+ {
+- if (added) {
+- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+- int i;
+-
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
+- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
++ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
++ static const unsigned long nhm_magic[4] = {
++ 0x4300B5,
++ 0x4300D2,
++ 0x4300B1,
++ 0x4300B1
++ };
++ struct perf_event *event;
++ int i;
++
++ /*
++ * The Errata requires below steps:
++ * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
++ * 2) Configure 4 PERFEVTSELx with the magic events and clear
++ * the corresponding PMCx;
++ * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
++ * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
++ * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
++ */
++
++ /*
++ * The real steps we choose are a little different from above.
++ * A) To reduce MSR operations, we don't run step 1) as they
++ * are already cleared before this function is called;
++ * B) Call x86_perf_event_update to save PMCx before configuring
++ * PERFEVTSELx with magic number;
++ * C) With step 5), we do clear only when the PERFEVTSELx is
++ * not used currently.
++ * D) Call x86_perf_event_set_period to restore PMCx;
++ */
++
++ /* We always operate 4 pairs of PERF Counters */
++ for (i = 0; i < 4; i++) {
++ event = cpuc->events[i];
++ if (event)
++ x86_perf_event_update(event);
++ }
+
+- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
++ for (i = 0; i < 4; i++) {
++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
++ wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
++ }
+
+- for (i = 0; i < 3; i++) {
+- struct perf_event *event = cpuc->events[i];
++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
++ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+
+- if (!event)
+- continue;
++ for (i = 0; i < 4; i++) {
++ event = cpuc->events[i];
+
++ if (event) {
++ x86_perf_event_set_period(event);
+ __x86_pmu_enable_event(&event->hw,
+- ARCH_PERFMON_EVENTSEL_ENABLE);
+- }
++ ARCH_PERFMON_EVENTSEL_ENABLE);
++ } else
++ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
+ }
++}
++
++static void intel_pmu_nhm_enable_all(int added)
++{
++ if (added)
++ intel_pmu_nhm_workaround();
+ intel_pmu_enable_all(added);
+ }
+
--- /dev/null
+From 1c250d709fdc8aa5bf42d90be99428a01a256a55 Mon Sep 17 00:00:00 2001
+From: Cyrill Gorcunov <gorcunov@openvz.org>
+Date: Thu, 5 Aug 2010 19:09:17 +0400
+Subject: perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
+
+From: Cyrill Gorcunov <gorcunov@openvz.org>
+
+commit 1c250d709fdc8aa5bf42d90be99428a01a256a55 upstream.
+
+In case if last active performance counter is not overflowed at
+moment of NMI being triggered by another counter, the irq
+statistics may miss an update stage. As a more serious
+consequence -- apic quirk may not be triggered so apic lvt entry
+stay masked.
+
+Tested-by: Lin Ming <ming.m.lin@intel.com>
+Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+LKML-Reference: <20100805150917.GA6311@lenovo>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/perf_event_p4.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/perf_event_p4.c
++++ b/arch/x86/kernel/cpu/perf_event_p4.c
+@@ -581,6 +581,7 @@ static int p4_pmu_handle_irq(struct pt_r
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
++ int overflow;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+@@ -591,12 +592,14 @@ static int p4_pmu_handle_irq(struct pt_r
+ WARN_ON_ONCE(hwc->idx != idx);
+
+ /* it might be unflagged overflow */
+- handled = p4_pmu_clear_cccr_ovf(hwc);
++ overflow = p4_pmu_clear_cccr_ovf(hwc);
+
+ val = x86_perf_event_update(event);
+- if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
++ if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
+ continue;
+
++ handled += overflow;
++
+ /* event overflow for sure */
+ data.period = event->hw.last_period;
+
+@@ -612,7 +615,7 @@ static int p4_pmu_handle_irq(struct pt_r
+ inc_irq_stat(apic_perf_irqs);
+ }
+
+- return handled;
++ return handled > 0;
+ }
+
+ /*
--- /dev/null
+From c3f755e3842108c1cffe570fe9802239810352b6 Mon Sep 17 00:00:00 2001
+From: Victor van den Elzen <victor.vde@gmail.com>
+Date: Sun, 15 Aug 2010 01:19:33 +0200
+Subject: platform/x86: move rfkill for Dell Mini 1012 to compal-laptop
+
+From: Victor van den Elzen <victor.vde@gmail.com>
+
+commit c3f755e3842108c1cffe570fe9802239810352b6 upstream.
+
+Like others in the Mini series, the Dell Mini 1012 does not support
+the smbios hook required by dell-laptop.
+
+Signed-off-by: Victor van den Elzen <victor.vde@gmail.com>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/compal-laptop.c | 9 +++++++++
+ drivers/platform/x86/dell-laptop.c | 7 +++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -259,6 +259,14 @@ static struct dmi_system_id __initdata c
+ .callback = dmi_check_cb
+ },
+ {
++ .ident = "Dell Mini 1012",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
++ },
++ .callback = dmi_check_cb
++ },
++ {
+ .ident = "Dell Inspiron 11z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+@@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*")
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
++MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
+ MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdat
+ },
+ },
+ {
++ .ident = "Dell Mini 1012",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
++ },
++ },
++ {
+ .ident = "Dell Inspiron 11z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
--- /dev/null
+From 93b352fce679945845664b56b0c3afbd655a7a12 Mon Sep 17 00:00:00 2001
+From: Axel Lin <axel.lin@gmail.com>
+Date: Mon, 16 Aug 2010 16:09:09 +0800
+Subject: pxa3xx: fix ns2cycle equation
+
+From: Axel Lin <axel.lin@gmail.com>
+
+commit 93b352fce679945845664b56b0c3afbd655a7a12 upstream.
+
+Test on a PXA310 platform with Samsung K9F2G08X0B NAND flash,
+with tCH=5 and clk is 156MHz, ns2cycle(5, 156000000) returns -1.
+
+ns2cycle returns negtive value will break NDTR0_tXX macros.
+
+After checking the commit log, I found the problem is introduced by
+commit 5b0d4d7c8a67c5ba3d35e6ceb0c5530cc6846db7
+"[MTD] [NAND] pxa3xx: convert from ns to clock ticks more accurately"
+
+To get num of clock cycles, we use below equation:
+num of clock cycles = time (ns) / one clock cycle (ns) + 1
+We need to add 1 cycle here because integer division will truncate the result.
+It is possible the developers set the Min values in SPEC for timing settings.
+Thus the truncate may cause problem, and it is safe to add an extra cycle here.
+
+The various fields in NDTR{01} are in units of clock ticks minus one,
+thus we should subtract 1 cycle then.
+
+Thus the correct equation should be:
+num of clock cycles = time (ns) / one clock cycle (ns) + 1 - 1
+ = time (ns) / one clock cycle (ns)
+
+Signed-off-by: Axel Lin <axel.lin@gmail.com>
+Signed-off-by: Lei Wen <leiwen@marvell.com>
+Acked-by: Eric Miao <eric.y.miao@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/nand/pxa3xx_nand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin
+ #define tAR_NDTR1(r) (((r) >> 0) & 0xf)
+
+ /* convert nano-seconds to nand flash controller clock cycles */
+-#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
++#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+ /* convert nand flash controller clock cycles to nano-seconds */
+ #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
alsa-emu10k1-delay-the-pcm-interrupts-add-pcm_irq_delay-parameter.patch
alsa-hda-fix-missing-stream-for-second-adc-on-realtek-alc260-hda-codec.patch
alsa-hda-add-quirk-for-dell-vostro-1220.patch
+ocfs2-do-not-overwrite-error-codes-in-ocfs2_init_acl.patch
+ocfs2-dlm-fix-a-dead-lock.patch
+ocfs2-fix-o2dlm-dlm-run-purgelist-rev-3.patch
+ocfs2-count-more-refcount-records-in-file-system-fragmentation.patch
+ocfs2-dlm-avoid-incorrect-bit-set-in-refmap-on-recovery-master.patch
+ocfs2-dlm-remove-potential-deadlock-v3.patch
+wl1251-fix-trigger-scan-timeout-usage.patch
+nilfs2-fix-list-corruption-after-ifile-creation-failure.patch
+tracing-fix-an-unallocated-memory-access-in-function_graph.patch
+tracing-fix-ring_buffer_read_page-reading-out-of-page-boundary.patch
+cfg80211-fix-locking-in-action-frame-tx.patch
+x86-asm-refactor-atomic64_386_32.s-to-support-old-binutils-and-be-cleaner.patch
+perf-x86-p4-pmu-update-nmi-irq-statistics-and-unmask-lvt-entry-properly.patch
+x86-apic-map-the-local-apic-when-parsing-the-mp-table.patch
+platform-x86-move-rfkill-for-dell-mini-1012-to-compal-laptop.patch
+x86-hotplug-serialize-cpu-hotplug-to-avoid-bringup-concurrency-issues.patch
+perf-x86-fix-intel-nhm-pmu-programming-errata-workaround.patch
+x86-apic-fix-apic-debug-boot-crash.patch
+fix-the-nested-pr-lock-calling-issue-in-acl.patch
+drm-radeon-kms-add-additional-quirk-for-acer-rv620-laptop.patch
+drm-radeon-kms-add-missing-copy-from-user.patch
+hwmon-pc87360-fix-device-resource-declaration.patch
+arm-tighten-check-for-allowable-cpsr-values.patch
+arm-fix-gen_nand-probe-structures-contents.patch
+bfin-fix-gen_nand-probe-structures-contents.patch
+ath9k_htc-fix-panic-on-packet-injection-using-airbase-ng-tool.patch
+nfs-add-lookupcache-to-displayed-mount-options.patch
+nfs-fix-an-oops-in-the-nfsv4-atomic-open-code.patch
+ath5k-disable-aspm-l0s-for-all-cards.patch
+pxa3xx-fix-ns2cycle-equation.patch
+matroxfb-fix-incorrect-use-of-memcpy_toio.patch
+mtd-nand-fix-probe-of-samsung-nand-chips.patch
+mtd-change-struct-flchip_shared-spinlock-locking-into-mutex.patch
+drm-i915-fixup-pageflip-ringbuffer-commands-for-i8xx.patch
+drm-i915-i8xx-also-doesn-t-like-multiple-oustanding-pageflips.patch
+drm-i915-edp-flush-the-write-before-waiting-for-plls.patch
+dm-snapshot-iterate-origin-and-cow-devices.patch
+dm-snapshot-test-chunk-size-against-both-origin-and-snapshot.patch
+dm-prevent-access-to-md-being-deleted.patch
+dm-ioctl-release-_hash_lock-between-devices-in-remove_all.patch
--- /dev/null
+From 575570f02761bd680ba5731c1dfd4701062e7fb2 Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shaohua.li@intel.com>
+Date: Tue, 27 Jul 2010 16:06:34 +0800
+Subject: tracing: Fix an unallocated memory access in function_graph
+
+From: Shaohua Li <shaohua.li@intel.com>
+
+commit 575570f02761bd680ba5731c1dfd4701062e7fb2 upstream.
+
+With CONFIG_DEBUG_PAGEALLOC, I observed an unallocated memory access in
+function_graph trace. It appears we find a small size entry in ring buffer,
+but we access it as a big size entry. The access overflows the page size
+and touches an unallocated page.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+LKML-Reference: <1280217994.32400.76.camel@sli10-desk.sh.intel.com>
+[ Added a comment to explain the problem - SDR ]
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/trace_functions_graph.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterato
+ * if the output fails.
+ */
+ data->ent = *curr;
+- data->ret = *next;
++ /*
++ * If the next event is not a return type, then
++ * we only care about what type it is. Otherwise we can
++ * safely copy the entire event.
++ */
++ if (next->ent.type == TRACE_GRAPH_RET)
++ data->ret = *next;
++ else
++ data->ret.ent.type = next->ent.type;
+ }
+ }
+
--- /dev/null
+From 18fab912d4fa70133df164d2dcf3310be0c38c34 Mon Sep 17 00:00:00 2001
+From: Huang Ying <ying.huang@intel.com>
+Date: Wed, 28 Jul 2010 14:14:01 +0800
+Subject: tracing: Fix ring_buffer_read_page reading out of page boundary
+
+From: Huang Ying <ying.huang@intel.com>
+
+commit 18fab912d4fa70133df164d2dcf3310be0c38c34 upstream.
+
+With the configuration: CONFIG_DEBUG_PAGEALLOC=y and Shaohua's patch:
+
+[PATCH]x86: make spurious_fault check correct pte bit
+
+Function call graph trace with the following will trigger a page fault.
+
+# cd /sys/kernel/debug/tracing/
+# echo function_graph > current_tracer
+# cat per_cpu/cpu1/trace_pipe_raw > /dev/null
+
+BUG: unable to handle kernel paging request at ffff880006e99000
+IP: [<ffffffff81085572>] rb_event_length+0x1/0x3f
+PGD 1b19063 PUD 1b1d063 PMD 3f067 PTE 6e99160
+Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
+last sysfs file: /sys/devices/virtual/net/lo/operstate
+CPU 1
+Modules linked in:
+
+Pid: 1982, comm: cat Not tainted 2.6.35-rc6-aes+ #300 /Bochs
+RIP: 0010:[<ffffffff81085572>] [<ffffffff81085572>] rb_event_length+0x1/0x3f
+RSP: 0018:ffff880006475e38 EFLAGS: 00010006
+RAX: 0000000000000ff0 RBX: ffff88000786c630 RCX: 000000000000001d
+RDX: ffff880006e98000 RSI: 0000000000000ff0 RDI: ffff880006e99000
+RBP: ffff880006475eb8 R08: 000000145d7008bd R09: 0000000000000000
+R10: 0000000000008000 R11: ffffffff815d9336 R12: ffff880006d08000
+R13: ffff880006e605d8 R14: 0000000000000000 R15: 0000000000000018
+FS: 00007f2b83e456f0(0000) GS:ffff880002100000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+CR2: ffff880006e99000 CR3: 00000000064a8000 CR4: 00000000000006e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Process cat (pid: 1982, threadinfo ffff880006474000, task ffff880006e40770)
+Stack:
+ ffff880006475eb8 ffffffff8108730f 0000000000000ff0 000000145d7008bd
+<0> ffff880006e98010 ffff880006d08010 0000000000000296 ffff88000786c640
+<0> ffffffff81002956 0000000000000000 ffff8800071f4680 ffff8800071f4680
+Call Trace:
+ [<ffffffff8108730f>] ? ring_buffer_read_page+0x15a/0x24a
+ [<ffffffff81002956>] ? return_to_handler+0x15/0x2f
+ [<ffffffff8108a575>] tracing_buffers_read+0xb9/0x164
+ [<ffffffff810debfe>] vfs_read+0xaf/0x150
+ [<ffffffff81002941>] return_to_handler+0x0/0x2f
+ [<ffffffff810248b0>] __bad_area_nosemaphore+0x17e/0x1a1
+ [<ffffffff81002941>] return_to_handler+0x0/0x2f
+ [<ffffffff810248e6>] bad_area_nosemaphore+0x13/0x15
+Code: 80 25 b2 16 b3 00 fe c9 c3 55 48 89 e5 f0 80 0d a4 16 b3 00 02 c9 c3 55 31 c0 48 89 e5 48 83 3d 94 16 b3 00 01 c9 0f 94 c0 c3 55 <8a> 0f 48 89 e5 83 e1 1f b8 08 00 00 00 0f b6 d1 83 fa 1e 74 27
+RIP [<ffffffff81085572>] rb_event_length+0x1/0x3f
+ RSP <ffff880006475e38>
+CR2: ffff880006e99000
+---[ end trace a6877bb92ccb36bb ]---
+
+The root cause is that ring_buffer_read_page() may read out of page
+boundary, because the boundary checking is done after reading. This is
+fixed via doing boundary checking before reading.
+
+Reported-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Huang Ying <ying.huang@intel.com>
+LKML-Reference: <1280297641.2771.307.camel@yhuang-dev>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/ring_buffer.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_bu
+ rpos = reader->read;
+ pos += size;
+
++ if (rpos >= commit)
++ break;
++
+ event = rb_reader_event(cpu_buffer);
+ size = rb_event_length(event);
+ } while (len > size);
--- /dev/null
+From fe0dbcc9d2e941328b3269dab102b94ad697ade5 Mon Sep 17 00:00:00 2001
+From: Yuri Kululin <ext-yuri.kululin@nokia.com>
+Date: Fri, 13 Aug 2010 13:46:12 +0400
+Subject: wl1251: fix trigger scan timeout usage
+
+From: Yuri Kululin <ext-yuri.kululin@nokia.com>
+
+commit fe0dbcc9d2e941328b3269dab102b94ad697ade5 upstream.
+
+Use appropriate command (CMD_TRIGGER_SCAN_TO) instead of scan command
+(CMD_SCAN) to configure trigger scan timeout.
+
+This was broken in commit 3a98c30f3e8bb1f32b5bcb74a39647b3670de275.
+
+This fix address the bug reported here:
+
+https://bugzilla.kernel.org/show_bug.cgi?id=16554
+
+Signed-off-by: Yuri Ershov <ext-yuri.ershov@nokia.com>
+Signed-off-by: Yuri Kululin <ext-yuri.kululin@nokia.com>
+Acked-by: Kalle Valo <kvalo@adurom.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/wl12xx/wl1251_cmd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
++++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
+@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl
+
+ cmd->timeout = timeout;
+
+- ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
++ ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
--- /dev/null
+From 05e407603e527f9d808dd3866d3a17c2ce4dfcc5 Mon Sep 17 00:00:00 2001
+From: Daniel Kiper <dkiper@net-space.pl>
+Date: Fri, 20 Aug 2010 00:46:16 +0200
+Subject: x86, apic: Fix apic=debug boot crash
+
+From: Daniel Kiper <dkiper@net-space.pl>
+
+commit 05e407603e527f9d808dd3866d3a17c2ce4dfcc5 upstream.
+
+Fix a boot crash when apic=debug is used and the APIC is
+not properly initialized.
+
+This issue appears during Xen Dom0 kernel boot but the
+fix is generic and the crash could occur on real hardware
+as well.
+
+Signed-off-by: Daniel Kiper <dkiper@net-space.pl>
+Cc: xen-devel@lists.xensource.com
+Cc: konrad.wilk@oracle.com
+Cc: jeremy@goop.org
+LKML-Reference: <20100819224616.GB9967@router-fw-old.local.net-space.pl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/io_apic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void
+ struct irq_pin_list *entry;
+
+ cfg = desc->chip_data;
++ if (!cfg)
++ continue;
+ entry = cfg->irq_2_pin;
+ if (!entry)
+ continue;
--- /dev/null
+From 5989cd6a1cbf86587edcc856791f960978087311 Mon Sep 17 00:00:00 2001
+From: Eric W. Biederman <ebiederm@xmission.com>
+Date: Wed, 4 Aug 2010 13:30:27 -0700
+Subject: x86, apic: Map the local apic when parsing the MP table.
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 5989cd6a1cbf86587edcc856791f960978087311 upstream.
+
+This fixes a regression in 2.6.35 from 2.6.34, that is
+present for select models of Intel cpus when people are
+using an MP table.
+
+The commit cf7500c0ea133d66f8449d86392d83f840102632
+"x86, ioapic: In mpparse use mp_register_ioapic" started
+calling mp_register_ioapic from MP_ioapic_info. An extremely
+simple change that was obviously correct. Unfortunately
+mp_register_ioapic did just a little more than the previous
+hand crafted code and so we gained this call path.
+
+The problem call path is:
+MP_ioapic_info()
+ mp_register_ioapic()
+ io_apic_unique_id()
+ io_apic_get_unique_id()
+ get_physical_broadcast()
+ modern_apic()
+ lapic_get_version()
+ apic_read(APIC_LVR)
+
+Which turned out to be a problem because the local apic
+was not mapped, at that point, unlike the similar point
+in the ACPI parsing code.
+
+This problem is fixed by mapping the local apic when
+parsing the mptable as soon as we reasonably can.
+
+Looking at the number of places we setup the fixmap for
+the local apic, I see some serious simplification opportunities.
+For the moment except for not duplicating the setting up of the
+fixmap in init_apic_mappings, I have not acted on them.
+
+The regression from 2.6.34 is tracked in bug
+https://bugzilla.kernel.org/show_bug.cgi?id=16173
+
+Reported-by: David Hill <hilld@binarystorm.net>
+Reported-by: Tvrtko Ursulin <tvrtko.ursulin@sophos.com>
+Tested-by: Tvrtko Ursulin <tvrtko.ursulin@sophos.com>
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+LKML-Reference: <m1eiee86jg.fsf_-_@fess.ebiederm.org>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/apic.c | 2 +-
+ arch/x86/kernel/mpparse.c | 16 ++++++++++++++++
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
+ * acpi lapic path already maps that address in
+ * acpi_register_lapic_address()
+ */
+- if (!acpi_lapic)
++ if (!acpi_lapic && !smp_found_config)
+ set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+
+ apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -274,6 +274,18 @@ static void __init smp_dump_mptable(stru
+
+ void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
+
++static void __init smp_register_lapic_address(unsigned long address)
++{
++ mp_lapic_addr = address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, address);
++ if (boot_cpu_physical_apicid == -1U) {
++ boot_cpu_physical_apicid = read_apic_id();
++ apic_version[boot_cpu_physical_apicid] =
++ GET_APIC_VERSION(apic_read(APIC_LVR));
++ }
++}
++
+ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
+ {
+ char str[16];
+@@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mp
+ if (early)
+ return 1;
+
++ /* Initialize the lapic mapping */
++ if (!acpi_lapic)
++ smp_register_lapic_address(mpc->lapic);
++
+ if (mpc->oemptr)
+ x86_init.mpparse.smp_read_mpc_oem(mpc);
+
--- /dev/null
+From 30246557a06bb20618bed906a06d1e1e0faa8bb4 Mon Sep 17 00:00:00 2001
+From: Luca Barbieri <luca@luca-barbieri.com>
+Date: Fri, 6 Aug 2010 04:04:38 +0200
+Subject: x86, asm: Refactor atomic64_386_32.S to support old binutils and be cleaner
+
+From: Luca Barbieri <luca@luca-barbieri.com>
+
+commit 30246557a06bb20618bed906a06d1e1e0faa8bb4 upstream.
+
+The old code didn't work on binutils 2.12 because setting a symbol to
+a register apparently requires a fairly recent version.
+
+This commit refactors the code to use the C preprocessor instead, and
+in the process makes the whole code a bit easier to understand.
+
+The object code produced is unchanged as expected.
+
+This fixes kernel bugzilla 16506.
+
+Reported-by: Dieter Stussy <kd6lvw+software@kd6lvw.ampr.org>
+Signed-off-by: Luca Barbieri <luca@luca-barbieri.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+LKML-Reference: <tip-*@git.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/lib/atomic64_386_32.S | 232 ++++++++++++++++++++++-------------------
+ 1 file changed, 126 insertions(+), 106 deletions(-)
+
+--- a/arch/x86/lib/atomic64_386_32.S
++++ b/arch/x86/lib/atomic64_386_32.S
+@@ -25,150 +25,170 @@
+ CFI_ADJUST_CFA_OFFSET -4
+ .endm
+
+-.macro BEGIN func reg
+-$v = \reg
++#define BEGIN(op) \
++.macro END; \
++ CFI_ENDPROC; \
++ENDPROC(atomic64_##op##_386); \
++.purgem END; \
++.endm; \
++ENTRY(atomic64_##op##_386); \
++ CFI_STARTPROC; \
++ LOCK v;
+
+-ENTRY(atomic64_\func\()_386)
+- CFI_STARTPROC
+- LOCK $v
+-
+-.macro RETURN
+- UNLOCK $v
++#define RET \
++ UNLOCK v; \
+ ret
+-.endm
+-
+-.macro END_
+- CFI_ENDPROC
+-ENDPROC(atomic64_\func\()_386)
+-.purgem RETURN
+-.purgem END_
+-.purgem END
+-.endm
+-
+-.macro END
+-RETURN
+-END_
+-.endm
+-.endm
+-
+-BEGIN read %ecx
+- movl ($v), %eax
+- movl 4($v), %edx
+-END
+-
+-BEGIN set %esi
+- movl %ebx, ($v)
+- movl %ecx, 4($v)
+-END
+-
+-BEGIN xchg %esi
+- movl ($v), %eax
+- movl 4($v), %edx
+- movl %ebx, ($v)
+- movl %ecx, 4($v)
+-END
+-
+-BEGIN add %ecx
+- addl %eax, ($v)
+- adcl %edx, 4($v)
+-END
+
+-BEGIN add_return %ecx
+- addl ($v), %eax
+- adcl 4($v), %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN sub %ecx
+- subl %eax, ($v)
+- sbbl %edx, 4($v)
+-END
++#define RET_END \
++ RET; \
++ END
++
++#define v %ecx
++BEGIN(read)
++ movl (v), %eax
++ movl 4(v), %edx
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(set)
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(xchg)
++ movl (v), %eax
++ movl 4(v), %edx
++ movl %ebx, (v)
++ movl %ecx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(add)
++ addl %eax, (v)
++ adcl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(add_return)
++ addl (v), %eax
++ adcl 4(v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %ecx
++BEGIN(sub)
++ subl %eax, (v)
++ sbbl %edx, 4(v)
++RET_END
++#undef v
+
+-BEGIN sub_return %ecx
++#define v %ecx
++BEGIN(sub_return)
+ negl %edx
+ negl %eax
+ sbbl $0, %edx
+- addl ($v), %eax
+- adcl 4($v), %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN inc %esi
+- addl $1, ($v)
+- adcl $0, 4($v)
+-END
+-
+-BEGIN inc_return %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++ addl (v), %eax
++ adcl 4(v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(inc)
++ addl $1, (v)
++ adcl $0, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(inc_return)
++ movl (v), %eax
++ movl 4(v), %edx
+ addl $1, %eax
+ adcl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
+-
+-BEGIN dec %esi
+- subl $1, ($v)
+- sbbl $0, 4($v)
+-END
+-
+-BEGIN dec_return %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(dec)
++ subl $1, (v)
++ sbbl $0, 4(v)
++RET_END
++#undef v
++
++#define v %esi
++BEGIN(dec_return)
++ movl (v), %eax
++ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
+-END
++ movl %eax, (v)
++ movl %edx, 4(v)
++RET_END
++#undef v
+
+-BEGIN add_unless %ecx
++#define v %ecx
++BEGIN(add_unless)
+ addl %eax, %esi
+ adcl %edx, %edi
+- addl ($v), %eax
+- adcl 4($v), %edx
++ addl (v), %eax
++ adcl 4(v), %edx
+ cmpl %eax, %esi
+ je 3f
+ 1:
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ movl $1, %eax
+ 2:
+-RETURN
++ RET
+ 3:
+ cmpl %edx, %edi
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+-END_
++END
++#undef v
+
+-BEGIN inc_not_zero %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++#define v %esi
++BEGIN(inc_not_zero)
++ movl (v), %eax
++ movl 4(v), %edx
+ testl %eax, %eax
+ je 3f
+ 1:
+ addl $1, %eax
+ adcl $0, %edx
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ movl $1, %eax
+ 2:
+-RETURN
++ RET
+ 3:
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+-END_
++END
++#undef v
+
+-BEGIN dec_if_positive %esi
+- movl ($v), %eax
+- movl 4($v), %edx
++#define v %esi
++BEGIN(dec_if_positive)
++ movl (v), %eax
++ movl 4(v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ js 1f
+- movl %eax, ($v)
+- movl %edx, 4($v)
++ movl %eax, (v)
++ movl %edx, 4(v)
+ 1:
+-END
++RET_END
++#undef v
--- /dev/null
+From d7c53c9e822a4fefa13a0cae76f3190bfd0d5c11 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@amd64.org>
+Date: Thu, 19 Aug 2010 20:10:29 +0200
+Subject: x86, hotplug: Serialize CPU hotplug to avoid bringup concurrency issues
+
+From: Borislav Petkov <bp@amd64.org>
+
+commit d7c53c9e822a4fefa13a0cae76f3190bfd0d5c11 upstream.
+
+When testing cpu hotplug code on 32-bit we kept hitting the "CPU%d:
+Stuck ??" message due to multiple cores concurrently accessing the
+cpu_callin_mask, among others.
+
+Since these codepaths are not protected from concurrent access due to
+the fact that there's no sane reason for making an already complex
+code unnecessarily more complex - we hit the issue only when insanely
+switching cores off- and online - serialize hotplugging cores on the
+sysfs level and be done with it.
+
+[ v2.1: fix !HOTPLUG_CPU build ]
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+LKML-Reference: <20100819181029.GC17171@aftab>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/Kconfig | 5 +++++
+ arch/x86/kernel/smpboot.c | 19 +++++++++++++++++++
+ 2 files changed, 24 insertions(+)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -247,6 +247,11 @@ config ARCH_HWEIGHT_CFLAGS
+
+ config KTIME_SCALAR
+ def_bool X86_32
++
++config ARCH_CPU_PROBE_RELEASE
++ def_bool y
++ depends on HOTPLUG_CPU
++
+ source "init/Kconfig"
+ source "kernel/Kconfig.freezer"
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -91,6 +91,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
+ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+ #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
+ #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
++
++/*
++ * We need this for trampoline_base protection from concurrent accesses when
++ * off- and onlining cores wildly.
++ */
++static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
++
++void cpu_hotplug_driver_lock()
++{
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
++}
++
++void cpu_hotplug_driver_unlock()
++{
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++}
++
++ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
++ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
+ #else
+ static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+ #define get_idle_for_cpu(x) (idle_thread_array[(x)])