--- /dev/null
+From a9f042cbe5284f34ccff15f3084477e11b39b17b Mon Sep 17 00:00:00 2001
+From: Ming Lei <tom.leiming@gmail.com>
+Date: Sun, 28 Feb 2010 00:56:24 +0800
+Subject: ath9k: fix lockdep warning when unloading module
+
+From: Ming Lei <tom.leiming@gmail.com>
+
+commit a9f042cbe5284f34ccff15f3084477e11b39b17b upstream.
+
+Since txq->axq_lock may be hold in softirq context, it must be
+acquired with spin_lock_bh() instead of spin_lock() if softieq is
+enabled.
+
+The patch fixes the lockdep warning below when unloading ath9k modules.
+
+=================================
+[ INFO: inconsistent lock state ]
+2.6.33-wl #12
+---------------------------------
+inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+rmmod/3642 [HC0[0]:SC0[0]:HE1:SE1] takes:
+ (&(&txq->axq_lock)->rlock){+.?...}, at: [<ffffffffa03568c3>] ath_tx_node_cleanup+0x62/0x180 [ath9k]
+{IN-SOFTIRQ-W} state was registered at:
+ [<ffffffff8107577d>] __lock_acquire+0x2f6/0xd35
+ [<ffffffff81076289>] lock_acquire+0xcd/0xf1
+ [<ffffffff813a7486>] _raw_spin_lock_bh+0x3b/0x6e
+ [<ffffffffa0356b49>] spin_lock_bh+0xe/0x10 [ath9k]
+ [<ffffffffa0358ec7>] ath_tx_tasklet+0xcd/0x391 [ath9k]
+ [<ffffffffa0354f5f>] ath9k_tasklet+0x70/0xc8 [ath9k]
+ [<ffffffff8104e601>] tasklet_action+0x8c/0xf4
+ [<ffffffff8104f459>] __do_softirq+0xf8/0x1cd
+ [<ffffffff8100ab1c>] call_softirq+0x1c/0x30
+ [<ffffffff8100c2cf>] do_softirq+0x4b/0xa3
+ [<ffffffff8104f045>] irq_exit+0x4a/0x8c
+ [<ffffffff813acccc>] do_IRQ+0xac/0xc3
+ [<ffffffff813a7d53>] ret_from_intr+0x0/0x16
+ [<ffffffff81302d52>] cpuidle_idle_call+0x9e/0xf8
+ [<ffffffff81008be7>] cpu_idle+0x62/0x9d
+ [<ffffffff81391c1a>] rest_init+0x7e/0x80
+ [<ffffffff818bbd38>] start_kernel+0x3e8/0x3f3
+ [<ffffffff818bb2bc>] x86_64_start_reservations+0xa7/0xab
+ [<ffffffff818bb3b8>] x86_64_start_kernel+0xf8/0x107
+irq event stamp: 42037
+hardirqs last enabled at (42037): [<ffffffff813a7b21>] _raw_spin_unlock_irqrestore+0x47/0x56
+hardirqs last disabled at (42036): [<ffffffff813a72f4>] _raw_spin_lock_irqsave+0x2b/0x88
+softirqs last enabled at (42000): [<ffffffffa0353ea6>] spin_unlock_bh+0xe/0x10 [ath9k]
+softirqs last disabled at (41998): [<ffffffff813a7463>] _raw_spin_lock_bh+0x18/0x6e
+
+other info that might help us debug this:
+4 locks held by rmmod/3642:
+ #0: (rtnl_mutex){+.+.+.}, at: [<ffffffff8132c10d>] rtnl_lock+0x17/0x19
+ #1: (&wdev->mtx){+.+.+.}, at: [<ffffffffa01e53f2>] cfg80211_netdev_notifier_call+0x28d/0x46d [cfg80211]
+ #2: (&ifmgd->mtx){+.+.+.}, at: [<ffffffffa0260834>] ieee80211_mgd_deauth+0x3f/0x17e [mac80211]
+ #3: (&local->sta_mtx){+.+.+.}, at: [<ffffffffa025a381>] sta_info_destroy_addr+0x2b/0x5e [mac80211]
+
+stack backtrace:
+Pid: 3642, comm: rmmod Not tainted 2.6.33-wl #12
+Call Trace:
+ [<ffffffff81074469>] valid_state+0x178/0x18b
+ [<ffffffff81014f94>] ? save_stack_trace+0x2f/0x4c
+ [<ffffffff81074e08>] ? check_usage_backwards+0x0/0x88
+ [<ffffffff8107458f>] mark_lock+0x113/0x230
+ [<ffffffff810757f1>] __lock_acquire+0x36a/0xd35
+ [<ffffffff8101018d>] ? native_sched_clock+0x2d/0x5f
+ [<ffffffffa03568c3>] ? ath_tx_node_cleanup+0x62/0x180 [ath9k]
+ [<ffffffff81076289>] lock_acquire+0xcd/0xf1
+ [<ffffffffa03568c3>] ? ath_tx_node_cleanup+0x62/0x180 [ath9k]
+ [<ffffffff810732eb>] ? trace_hardirqs_off+0xd/0xf
+ [<ffffffff813a7193>] _raw_spin_lock+0x36/0x69
+ [<ffffffffa03568c3>] ? ath_tx_node_cleanup+0x62/0x180 [ath9k]
+ [<ffffffffa03568c3>] ath_tx_node_cleanup+0x62/0x180 [ath9k]
+ [<ffffffff810749ed>] ? trace_hardirqs_on+0xd/0xf
+ [<ffffffffa0353950>] ath9k_sta_remove+0x22/0x26 [ath9k]
+ [<ffffffffa025a08f>] __sta_info_destroy+0x1ad/0x38c [mac80211]
+ [<ffffffffa025a394>] sta_info_destroy_addr+0x3e/0x5e [mac80211]
+ [<ffffffffa02605d6>] ieee80211_set_disassoc+0x175/0x180 [mac80211]
+ [<ffffffffa026084d>] ieee80211_mgd_deauth+0x58/0x17e [mac80211]
+ [<ffffffff813a60c1>] ? __mutex_lock_common+0x37f/0x3a4
+ [<ffffffffa01e53f2>] ? cfg80211_netdev_notifier_call+0x28d/0x46d [cfg80211]
+ [<ffffffffa026786e>] ieee80211_deauth+0x1e/0x20 [mac80211]
+ [<ffffffffa01f47f9>] __cfg80211_mlme_deauth+0x130/0x13f [cfg80211]
+ [<ffffffffa01e53f2>] ? cfg80211_netdev_notifier_call+0x28d/0x46d [cfg80211]
+ [<ffffffff810732eb>] ? trace_hardirqs_off+0xd/0xf
+ [<ffffffffa01f7eee>] __cfg80211_disconnect+0x111/0x189 [cfg80211]
+ [<ffffffffa01e5433>] cfg80211_netdev_notifier_call+0x2ce/0x46d [cfg80211]
+ [<ffffffff813aa9ea>] notifier_call_chain+0x37/0x63
+ [<ffffffff81068c98>] raw_notifier_call_chain+0x14/0x16
+ [<ffffffff81322e97>] call_netdevice_notifiers+0x1b/0x1d
+ [<ffffffff8132386d>] dev_close+0x6a/0xa6
+ [<ffffffff8132395f>] rollback_registered_many+0xb6/0x2f4
+ [<ffffffff81323bb8>] unregister_netdevice_many+0x1b/0x66
+ [<ffffffffa026494f>] ieee80211_remove_interfaces+0xc5/0xd0 [mac80211]
+ [<ffffffffa02580a2>] ieee80211_unregister_hw+0x47/0xe8 [mac80211]
+ [<ffffffffa035290e>] ath9k_deinit_device+0x7a/0x9b [ath9k]
+ [<ffffffffa035bc26>] ath_pci_remove+0x38/0x76 [ath9k]
+ [<ffffffff8120940a>] pci_device_remove+0x2d/0x51
+ [<ffffffff8129d797>] __device_release_driver+0x7b/0xd1
+ [<ffffffff8129d885>] driver_detach+0x98/0xbe
+ [<ffffffff8129ca7a>] bus_remove_driver+0x94/0xb7
+ [<ffffffff8129ddd6>] driver_unregister+0x6c/0x74
+ [<ffffffff812096d2>] pci_unregister_driver+0x46/0xad
+ [<ffffffffa035bae1>] ath_pci_exit+0x15/0x17 [ath9k]
+ [<ffffffffa035e1a2>] ath9k_exit+0xe/0x2f [ath9k]
+ [<ffffffff8108050a>] sys_delete_module+0x1c7/0x236
+ [<ffffffff813a7df5>] ? retint_swapgs+0x13/0x1b
+ [<ffffffff810749b5>] ? trace_hardirqs_on_caller+0x119/0x144
+ [<ffffffff8109b9f6>] ? audit_syscall_entry+0x11e/0x14a
+ [<ffffffff81009bb2>] system_call_fastpath+0x16/0x1b
+wlan1: deauthenticating from 00:23:cd:e1:f9:b2 by local choice (reason=3)
+PM: Removing info for No Bus:wlan1
+cfg80211: Calling CRDA to update world regulatory domain
+PM: Removing info for No Bus:rfkill2
+PM: Removing info for No Bus:phy1
+ath9k 0000:16:00.0: PCI INT A disabled
+
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath/ath9k/xmit.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2210,7 +2210,7 @@ void ath_tx_node_cleanup(struct ath_soft
+ if (ATH_TXQ_SETUP(sc, i)) {
+ txq = &sc->tx.txq[i];
+
+- spin_lock(&txq->axq_lock);
++ spin_lock_bh(&txq->axq_lock);
+
+ list_for_each_entry_safe(ac,
+ ac_tmp, &txq->axq_acq, list) {
+@@ -2231,7 +2231,7 @@ void ath_tx_node_cleanup(struct ath_soft
+ }
+ }
+
+- spin_unlock(&txq->axq_lock);
++ spin_unlock_bh(&txq->axq_lock);
+ }
+ }
+ }
--- /dev/null
+From Larry.Finger@lwfinger.net Thu Mar 18 17:21:44 2010
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Wed, 10 Mar 2010 22:10:32 -0600
+Subject: b43: Workaround circular locking in hw-tkip key update callback
+To: Greg Kroah-Hartman <gregkh@suse.de>
+Cc: Michael Buesch <mb@bu3sch.de>
+Message-ID: <4b986d38.aOKVIPS3U9/aYsOP%Larry.Finger@lwfinger.net>
+
+From: Michael Buesch <mb@bu3sch.de>
+
+commit 96869a39399269a776a94812e9fff3d38b47d838 upstream
+
+The TKIP key update callback is called from the RX path, where the driver
+mutex is already locked. This results in a circular locking bug.
+Avoid this by removing the lock.
+
+Johannes noted that there is a separate bug: The callback still breaks on SDIO
+hardware, because SDIO hardware access needs to sleep, but we are not allowed
+to sleep in the callback due to mac80211's RCU locking.
+
+Signed-off-by: Michael Buesch <mb@bu3sch.de>
+Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
+Reported-by: kecsa@kutfo.hit.bme.hu
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/b43/main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -852,19 +852,19 @@ static void b43_op_update_tkip_key(struc
+ if (B43_WARN_ON(!modparam_hwtkip))
+ return;
+
+- mutex_lock(&wl->mutex);
+-
++ /* This is only called from the RX path through mac80211, where
++ * our mutex is already locked. */
++ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
+ dev = wl->current_dev;
+- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
+- goto out_unlock;
++ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
+
+ keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
+
+ rx_tkip_phase1_write(dev, index, iv32, phase1key);
++ /* only pairwise TKIP keys are supported right now */
++ if (WARN_ON(!sta))
++ return;
+ keymac_write(dev, index, addr);
+-
+-out_unlock:
+- mutex_unlock(&wl->mutex);
+ }
+
+ static void do_key_write(struct b43_wldev *dev,
--- /dev/null
+From 76595f79d76fbe6267a51b3a866a028d150f06d4 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Fri, 5 Mar 2010 13:44:16 -0800
+Subject: coredump: suppress uid comparison test if core output files are pipes
+
+From: Neil Horman <nhorman@tuxdriver.com>
+
+commit 76595f79d76fbe6267a51b3a866a028d150f06d4 upstream.
+
+Modify uid check in do_coredump so as to not apply it in the case of
+pipes.
+
+This just got noticed in testing. The end of do_coredump validates the
+uid of the inode for the created file against the uid of the crashing
+process to ensure that no one can pre-create a core file with different
+ownership and grab the information contained in the core when they
+shouldn' tbe able to. This causes failures when using pipes for a core
+dumps if the crashing process is not root, which is the uid of the pipe
+when it is created.
+
+The fix is simple. Since the check for matching uid's isn't relevant for
+pipes (a process can't create a pipe that the uermodehelper code will open
+anyway), we can just just skip it in the event ispipe is non-zero
+
+Reverts a pipe-affecting change which was accidentally made in
+
+: commit c46f739dd39db3b07ab5deb4e3ec81e1c04a91af
+: Author: Ingo Molnar <mingo@elte.hu>
+: AuthorDate: Wed Nov 28 13:59:18 2007 +0100
+: Commit: Linus Torvalds <torvalds@woody.linux-foundation.org>
+: CommitDate: Wed Nov 28 10:58:01 2007 -0800
+:
+: vfs: coredumping fix
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/exec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1913,8 +1913,9 @@ void do_coredump(long signr, int exit_co
+ /*
+ * Dont allow local users get cute and trick others to coredump
+ * into their pre-created files:
++ * Note, this is not relevant for pipes
+ */
+- if (inode->i_uid != current_fsuid())
++ if (!ispipe && (inode->i_uid != current_fsuid()))
+ goto close_fail;
+ if (!file->f_op)
+ goto close_fail;
--- /dev/null
+From 9cf00977da092096c7a983276dad8b3002d23a99 Mon Sep 17 00:00:00 2001
+From: Adam Jackson <ajax@redhat.com>
+Date: Thu, 3 Dec 2009 17:44:36 -0500
+Subject: drm/edid: Unify detailed block parsing between base and extension blocks
+
+From: Adam Jackson <ajax@redhat.com>
+
+commit 9cf00977da092096c7a983276dad8b3002d23a99 upstream.
+
+Also fix an embarassing bug in standard timing subblock parsing that
+would result in an infinite loop.
+
+Signed-off-by: Adam Jackson <ajax@redhat.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/drm_edid.c | 163 ++++++++++++++++-----------------------------
+ 1 file changed, 61 insertions(+), 102 deletions(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -834,8 +834,57 @@ static int add_standard_modes(struct drm
+ return modes;
+ }
+
++static int add_detailed_modes(struct drm_connector *connector,
++ struct detailed_timing *timing,
++ struct edid *edid, u32 quirks, int preferred)
++{
++ int i, modes = 0;
++ struct detailed_non_pixel *data = &timing->data.other_data;
++ int timing_level = standard_timing_level(edid);
++ struct drm_display_mode *newmode;
++ struct drm_device *dev = connector->dev;
++
++ if (timing->pixel_clock) {
++ newmode = drm_mode_detailed(dev, edid, timing, quirks);
++ if (!newmode)
++ return 0;
++
++ if (preferred)
++ newmode->type |= DRM_MODE_TYPE_PREFERRED;
++
++ drm_mode_probed_add(connector, newmode);
++ return 1;
++ }
++
++ /* other timing types */
++ switch (data->type) {
++ case EDID_DETAIL_MONITOR_RANGE:
++ /* Get monitor range data */
++ break;
++ case EDID_DETAIL_STD_MODES:
++ /* Six modes per detailed section */
++ for (i = 0; i < 6; i++) {
++ struct std_timing *std;
++ struct drm_display_mode *newmode;
++
++ std = &data->data.timings[i];
++ newmode = drm_mode_std(dev, std, edid->revision,
++ timing_level);
++ if (newmode) {
++ drm_mode_probed_add(connector, newmode);
++ modes++;
++ }
++ }
++ break;
++ default:
++ break;
++ }
++
++ return modes;
++}
++
+ /**
+- * add_detailed_modes - get detailed mode info from EDID data
++ * add_detailed_info - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+@@ -846,67 +895,24 @@ static int add_standard_modes(struct drm
+ static int add_detailed_info(struct drm_connector *connector,
+ struct edid *edid, u32 quirks)
+ {
+- struct drm_device *dev = connector->dev;
+- int i, j, modes = 0;
+- int timing_level;
+-
+- timing_level = standard_timing_level(edid);
++ int i, modes = 0;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+ struct detailed_timing *timing = &edid->detailed_timings[i];
+- struct detailed_non_pixel *data = &timing->data.other_data;
+- struct drm_display_mode *newmode;
++ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+- /* X server check is version 1.1 or higher */
+- if (edid->version == 1 && edid->revision >= 1 &&
+- !timing->pixel_clock) {
+- /* Other timing or info */
+- switch (data->type) {
+- case EDID_DETAIL_MONITOR_SERIAL:
+- break;
+- case EDID_DETAIL_MONITOR_STRING:
+- break;
+- case EDID_DETAIL_MONITOR_RANGE:
+- /* Get monitor range data */
+- break;
+- case EDID_DETAIL_MONITOR_NAME:
+- break;
+- case EDID_DETAIL_MONITOR_CPDATA:
+- break;
+- case EDID_DETAIL_STD_MODES:
+- for (j = 0; j < 6; i++) {
+- struct std_timing *std;
+- struct drm_display_mode *newmode;
+-
+- std = &data->data.timings[j];
+- newmode = drm_mode_std(dev, std,
+- edid->revision,
+- timing_level);
+- if (newmode) {
+- drm_mode_probed_add(connector, newmode);
+- modes++;
+- }
+- }
+- break;
+- default:
+- break;
+- }
+- } else {
+- newmode = drm_mode_detailed(dev, edid, timing, quirks);
+- if (!newmode)
+- continue;
+-
+- /* First detailed mode is preferred */
+- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
+- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+- drm_mode_probed_add(connector, newmode);
++ /* In 1.0, only timings are allowed */
++ if (!timing->pixel_clock && edid->version == 1 &&
++ edid->revision == 0)
++ continue;
+
+- modes++;
+- }
++ modes += add_detailed_modes(connector, timing, edid, quirks,
++ preferred);
+ }
+
+ return modes;
+ }
++
+ /**
+ * add_detailed_mode_eedid - get detailed mode info from addtional timing
+ * EDID block
+@@ -920,12 +926,9 @@ static int add_detailed_info(struct drm_
+ static int add_detailed_info_eedid(struct drm_connector *connector,
+ struct edid *edid, u32 quirks)
+ {
+- struct drm_device *dev = connector->dev;
+- int i, j, modes = 0;
++ int i, modes = 0;
+ char *edid_ext = NULL;
+ struct detailed_timing *timing;
+- struct detailed_non_pixel *data;
+- struct drm_display_mode *newmode;
+ int edid_ext_num;
+ int start_offset, end_offset;
+ int timing_level;
+@@ -976,51 +979,7 @@ static int add_detailed_info_eedid(struc
+ for (i = start_offset; i < end_offset;
+ i += sizeof(struct detailed_timing)) {
+ timing = (struct detailed_timing *)(edid_ext + i);
+- data = &timing->data.other_data;
+- /* Detailed mode timing */
+- if (timing->pixel_clock) {
+- newmode = drm_mode_detailed(dev, edid, timing, quirks);
+- if (!newmode)
+- continue;
+-
+- drm_mode_probed_add(connector, newmode);
+-
+- modes++;
+- continue;
+- }
+-
+- /* Other timing or info */
+- switch (data->type) {
+- case EDID_DETAIL_MONITOR_SERIAL:
+- break;
+- case EDID_DETAIL_MONITOR_STRING:
+- break;
+- case EDID_DETAIL_MONITOR_RANGE:
+- /* Get monitor range data */
+- break;
+- case EDID_DETAIL_MONITOR_NAME:
+- break;
+- case EDID_DETAIL_MONITOR_CPDATA:
+- break;
+- case EDID_DETAIL_STD_MODES:
+- /* Five modes per detailed section */
+- for (j = 0; j < 5; i++) {
+- struct std_timing *std;
+- struct drm_display_mode *newmode;
+-
+- std = &data->data.timings[j];
+- newmode = drm_mode_std(dev, std,
+- edid->revision,
+- timing_level);
+- if (newmode) {
+- drm_mode_probed_add(connector, newmode);
+- modes++;
+- }
+- }
+- break;
+- default:
+- break;
+- }
++ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ }
+
+ return modes;
--- /dev/null
+From 9e135a2e6266eba276f33c404a2478499bc07ff5 Mon Sep 17 00:00:00 2001
+From: Bruce Allan <bruce.w.allan@intel.com>
+Date: Tue, 1 Dec 2009 15:50:31 +0000
+Subject: e1000e: enable new 82567V-3 device
+
+From: Bruce Allan <bruce.w.allan@intel.com>
+
+commit 9e135a2e6266eba276f33c404a2478499bc07ff5 upstream.
+
+This new PCI device ID is for a new combination of MAC and PHY both of
+which already have supporting code in the driver, just not yet in this
+combination. During validation of the device, an intermittent issue was
+discovered with waking it from a suspended state which can be resolved with
+the pre-existing workaround to disable gigabit speed prior to suspending.
+
+Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/e1000e/hw.h | 1 +
+ drivers/net/e1000e/ich8lan.c | 1 +
+ drivers/net/e1000e/netdev.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+--- a/drivers/net/e1000e/hw.h
++++ b/drivers/net/e1000e/hw.h
+@@ -356,6 +356,7 @@ enum e1e_registers {
+ #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+ #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
++#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+ #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+ #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+ #define E1000_DEV_ID_ICH8_IGP_C 0x104B
+--- a/drivers/net/e1000e/ich8lan.c
++++ b/drivers/net/e1000e/ich8lan.c
+@@ -3209,6 +3209,7 @@ void e1000e_disable_gig_wol_ich8lan(stru
+ u32 phy_ctrl;
+
+ switch (hw->mac.type) {
++ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ case e1000_pchlan:
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -5360,6 +5360,7 @@ static struct pci_device_id e1000_pci_tb
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
--- /dev/null
+From 89f3f2199084a160a3a45fa6d9af235696321758 Mon Sep 17 00:00:00 2001
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+Date: Mon, 22 Feb 2010 12:44:22 -0800
+Subject: efifb: fix framebuffer handoff
+
+From: Marcin Slusarz <marcin.slusarz@gmail.com>
+
+commit 89f3f2199084a160a3a45fa6d9af235696321758 upstream.
+
+Commit 4410f3910947dcea8672280b3adecd53cec4e85e ("fbdev: add support for
+handoff from firmware to hw framebuffers") didn't add fb_destroy
+operation to efifb. Fix it and change aperture_size to match size
+passed to request_mem_region.
+
+Addresses http://bugzilla.kernel.org/show_bug.cgi?id=15151
+
+Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
+Reported-by: Alex Zhavnerchik <alex.vizor@gmail.com>
+Tested-by: Alex Zhavnerchik <alex.vizor@gmail.com>
+Acked-by: Peter Jones <pjones@redhat.com>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/video/efifb.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/video/efifb.c
++++ b/drivers/video/efifb.c
+@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regn
+ return 0;
+ }
+
++static void efifb_destroy(struct fb_info *info)
++{
++ if (info->screen_base)
++ iounmap(info->screen_base);
++ release_mem_region(info->aperture_base, info->aperture_size);
++ framebuffer_release(info);
++}
++
+ static struct fb_ops efifb_ops = {
+ .owner = THIS_MODULE,
++ .fb_destroy = efifb_destroy,
+ .fb_setcolreg = efifb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+@@ -281,7 +290,7 @@ static int __init efifb_probe(struct pla
+ info->par = NULL;
+
+ info->aperture_base = efifb_fix.smem_start;
+- info->aperture_size = size_total;
++ info->aperture_size = size_remap;
+
+ info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
+ if (!info->screen_base) {
--- /dev/null
+From 41d2e494937715d3150e5c75d01f0e75ae899337 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Nov 2009 17:05:44 +0100
+Subject: hrtimer: Tune hrtimer_interrupt hang logic
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 41d2e494937715d3150e5c75d01f0e75ae899337 upstream.
+
+The hrtimer_interrupt hang logic adjusts min_delta_ns based on the
+execution time of the hrtimer callbacks.
+
+This is error-prone for virtual machines, where a guest vcpu can be
+scheduled out during the execution of the callbacks (and the callbacks
+themselves can do operations that translate to blocking operations in
+the hypervisor), which in can lead to large min_delta_ns rendering the
+system unusable.
+
+Replace the current heuristics with something more reliable. Allow the
+interrupt code to try 3 times to catch up with the lost time. If that
+fails use the total time spent in the interrupt handler to defer the
+next timer interrupt so the system can catch up with other things
+which got delayed. Limit that deferment to 100ms.
+
+The retry events and the maximum time spent in the interrupt handler
+are recorded and exposed via /proc/timer_list
+
+Inspired by a patch from Marcelo.
+
+Reported-by: Michael Tokarev <mjt@tls.msk.ru>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: kvm@vger.kernel.org
+Cc: Jeremy Fitzhardinge <jeremy@goop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/hrtimer.h | 13 ++++--
+ kernel/hrtimer.c | 96 +++++++++++++++++++++++++++--------------------
+ kernel/time/timer_list.c | 5 +-
+ 3 files changed, 70 insertions(+), 44 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -162,10 +162,11 @@ struct hrtimer_clock_base {
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @hres_active: State of high resolution mode
+- * @check_clocks: Indictator, when set evaluate time source and clock
+- * event devices whether high resolution mode can be
+- * activated.
+- * @nr_events: Total number of timer interrupt events
++ * @hang_detected: The last hrtimer interrupt detected a hang
++ * @nr_events: Total number of hrtimer interrupt events
++ * @nr_retries: Total number of hrtimer interrupt retries
++ * @nr_hangs: Total number of hrtimer interrupt hangs
++ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ */
+ struct hrtimer_cpu_base {
+ spinlock_t lock;
+@@ -173,7 +174,11 @@ struct hrtimer_cpu_base {
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int hres_active;
++ int hang_detected;
+ unsigned long nr_events;
++ unsigned long nr_retries;
++ unsigned long nr_hangs;
++ ktime_t max_hang_time;
+ #endif
+ };
+
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_c
+ static int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+ {
+- ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+ int res;
+
+@@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrti
+ if (expires.tv64 < 0)
+ return -ETIME;
+
+- if (expires.tv64 >= expires_next->tv64)
++ if (expires.tv64 >= cpu_base->expires_next.tv64)
++ return 0;
++
++ /*
++ * If a hang was detected in the last timer interrupt then we
++ * do not schedule a timer which is earlier than the expiry
++ * which we enforced in the hang detection. We want the system
++ * to make progress.
++ */
++ if (cpu_base->hang_detected)
+ return 0;
+
+ /*
+@@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrti
+ */
+ res = tick_program_event(expires, 0);
+ if (!IS_ERR_VALUE(res))
+- *expires_next = expires;
++ cpu_base->expires_next = expires;
+ return res;
+ }
+
+@@ -1217,29 +1226,6 @@ static void __run_hrtimer(struct hrtimer
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
+-static int force_clock_reprogram;
+-
+-/*
+- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
+- * is hanging, which could happen with something that slows the interrupt
+- * such as the tracing. Then we force the clock reprogramming for each future
+- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
+- * threshold that we will overwrite.
+- * The next tick event will be scheduled to 3 times we currently spend on
+- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
+- * 1/4 of their time to process the hrtimer interrupts. This is enough to
+- * let it running without serious starvation.
+- */
+-
+-static inline void
+-hrtimer_interrupt_hanging(struct clock_event_device *dev,
+- ktime_t try_time)
+-{
+- force_clock_reprogram = 1;
+- dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
+- printk(KERN_WARNING "hrtimer: interrupt too slow, "
+- "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
+-}
+ /*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+@@ -1248,21 +1234,15 @@ void hrtimer_interrupt(struct clock_even
+ {
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_clock_base *base;
+- ktime_t expires_next, now;
+- int nr_retries = 0;
+- int i;
++ ktime_t expires_next, now, entry_time, delta;
++ int i, retries = 0;
+
+ BUG_ON(!cpu_base->hres_active);
+ cpu_base->nr_events++;
+ dev->next_event.tv64 = KTIME_MAX;
+
+- retry:
+- /* 5 retries is enough to notice a hang */
+- if (!(++nr_retries % 5))
+- hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
+-
+- now = ktime_get();
+-
++ entry_time = now = ktime_get();
++retry:
+ expires_next.tv64 = KTIME_MAX;
+
+ spin_lock(&cpu_base->lock);
+@@ -1324,10 +1304,48 @@ void hrtimer_interrupt(struct clock_even
+ spin_unlock(&cpu_base->lock);
+
+ /* Reprogramming necessary ? */
+- if (expires_next.tv64 != KTIME_MAX) {
+- if (tick_program_event(expires_next, force_clock_reprogram))
+- goto retry;
++ if (expires_next.tv64 == KTIME_MAX ||
++ !tick_program_event(expires_next, 0)) {
++ cpu_base->hang_detected = 0;
++ return;
+ }
++
++ /*
++ * The next timer was already expired due to:
++ * - tracing
++ * - long lasting callbacks
++ * - being scheduled away when running in a VM
++ *
++ * We need to prevent that we loop forever in the hrtimer
++ * interrupt routine. We give it 3 attempts to avoid
++ * overreacting on some spurious event.
++ */
++ now = ktime_get();
++ cpu_base->nr_retries++;
++ if (++retries < 3)
++ goto retry;
++ /*
++ * Give the system a chance to do something else than looping
++ * here. We stored the entry time, so we know exactly how long
++ * we spent here. We schedule the next event this amount of
++ * time away.
++ */
++ cpu_base->nr_hangs++;
++ cpu_base->hang_detected = 1;
++ delta = ktime_sub(now, entry_time);
++ if (delta.tv64 > cpu_base->max_hang_time.tv64)
++ cpu_base->max_hang_time = delta;
++ /*
++ * Limit it to a sensible value as we enforce a longer
++ * delay. Give the CPU at least 100ms to catch up.
++ */
++ if (delta.tv64 > 100 * NSEC_PER_MSEC)
++ expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
++ else
++ expires_next = ktime_add(now, delta);
++ tick_program_event(expires_next, 1);
++ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
++ ktime_to_ns(delta));
+ }
+
+ /*
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m
+ P_ns(expires_next);
+ P(hres_active);
+ P(nr_events);
++ P(nr_retries);
++ P(nr_hangs);
++ P_ns(max_hang_time);
+ #endif
+ #undef P
+ #undef P_ns
+@@ -252,7 +255,7 @@ static int timer_list_show(struct seq_fi
+ u64 now = ktime_to_ns(ktime_get());
+ int cpu;
+
+- SEQ_printf(m, "Timer List Version: v0.4\n");
++ SEQ_printf(m, "Timer List Version: v0.5\n");
+ SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
+ SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
+
--- /dev/null
+From 232f5693e5c9483e222528ef81979e42ea2f2908 Mon Sep 17 00:00:00 2001
+From: Ping Cheng <pingc@wacom.com>
+Date: Tue, 15 Dec 2009 00:35:24 -0800
+Subject: Input: wacom - ensure the device is initialized properly upon resume
+
+From: Ping Cheng <pingc@wacom.com>
+
+commit 232f5693e5c9483e222528ef81979e42ea2f2908 upstream.
+
+Call wacom_query_tablet_data() from wacom_resume() so the device will be
+switched to Wacom mode upon resume. Devices that require this are: regular
+tablets and two finger touch devices.
+
+Signed-off-by: Ping Cheng <pingc@wacom.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/input/tablet/wacom.h | 7 ++++---
+ drivers/input/tablet/wacom_sys.c | 7 ++++---
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/input/tablet/wacom.h
++++ b/drivers/input/tablet/wacom.h
+@@ -1,7 +1,7 @@
+ /*
+ * drivers/input/tablet/wacom.h
+ *
+- * USB Wacom Graphire and Wacom Intuos tablet support
++ * USB Wacom tablet support
+ *
+ * Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
+ * Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
+@@ -69,6 +69,7 @@
+ * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
+ * v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
+ * v1.51 (pc) - Added support for Intuos4
++ * v1.52 (pc) - Query Wacom data upon system resume
+ */
+
+ /*
+@@ -89,9 +90,9 @@
+ /*
+ * Version Information
+ */
+-#define DRIVER_VERSION "v1.51"
++#define DRIVER_VERSION "v1.52"
+ #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
+-#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
++#define DRIVER_DESC "USB Wacom tablet driver"
+ #define DRIVER_LICENSE "GPL"
+
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+--- a/drivers/input/tablet/wacom_sys.c
++++ b/drivers/input/tablet/wacom_sys.c
+@@ -1,7 +1,7 @@
+ /*
+ * drivers/input/tablet/wacom_sys.c
+ *
+- * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
++ * USB Wacom tablet support - system specific code
+ */
+
+ /*
+@@ -562,9 +562,10 @@ static int wacom_resume(struct usb_inter
+ int rv;
+
+ mutex_lock(&wacom->lock);
+- if (wacom->open)
++ if (wacom->open) {
+ rv = usb_submit_urb(wacom->irq, GFP_NOIO);
+- else
++ wacom_query_tablet_data(intf);
++ } else
+ rv = 0;
+ mutex_unlock(&wacom->lock);
+
--- /dev/null
+From 74757d49016a8b06ca028196886641d7aeb78de5 Mon Sep 17 00:00:00 2001
+From: Don Skidmore <donald.c.skidmore@intel.com>
+Date: Tue, 8 Dec 2009 07:22:23 +0000
+Subject: ixgbe: add support for 82599 KR device 0x1517
+
+From: Don Skidmore <donald.c.skidmore@intel.com>
+
+commit 74757d49016a8b06ca028196886641d7aeb78de5 upstream.
+
+Signed-off-by: Don Skidmore <donald.c.skidmore@intel.com>
+Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/ixgbe/ixgbe_82599.c | 1 +
+ drivers/net/ixgbe/ixgbe_main.c | 2 ++
+ drivers/net/ixgbe/ixgbe_type.h | 1 +
+ 3 files changed, 4 insertions(+)
+
+--- a/drivers/net/ixgbe/ixgbe_82599.c
++++ b/drivers/net/ixgbe/ixgbe_82599.c
+@@ -332,6 +332,7 @@ static enum ixgbe_media_type ixgbe_get_m
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
++ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+--- a/drivers/net/ixgbe/ixgbe_main.c
++++ b/drivers/net/ixgbe/ixgbe_main.c
+@@ -96,6 +96,8 @@ static struct pci_device_id ixgbe_pci_tb
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
+ board_82599 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
++ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+--- a/drivers/net/ixgbe/ixgbe_type.h
++++ b/drivers/net/ixgbe/ixgbe_type.h
+@@ -50,6 +50,7 @@
+ #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+ #define IXGBE_DEV_ID_82599_KX4 0x10F7
+ #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
++#define IXGBE_DEV_ID_82599_KR 0x1517
+ #define IXGBE_DEV_ID_82599_CX4 0x10F9
+ #define IXGBE_DEV_ID_82599_SFP 0x10FB
+ #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
--- /dev/null
+From 4294a8eedb17bbc45e1e7447c2a4d05332943248 Mon Sep 17 00:00:00 2001
+From: André Goddard Rosa <andre.goddard@gmail.com>
+Date: Tue, 23 Feb 2010 04:04:28 -0300
+Subject: mqueue: fix mq_open() file descriptor leak on user-space processes
+
+From: André Goddard Rosa <andre.goddard@gmail.com>
+
+commit 4294a8eedb17bbc45e1e7447c2a4d05332943248 upstream.
+
+We leak fd on lookup_one_len() failure
+
+Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ ipc/mqueue.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -706,7 +706,7 @@ SYSCALL_DEFINE4(mq_open, const char __us
+ dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
+ if (IS_ERR(dentry)) {
+ error = PTR_ERR(dentry);
+- goto out_err;
++ goto out_putfd;
+ }
+ mntget(ipc_ns->mq_mnt);
+
+@@ -744,7 +744,6 @@ out:
+ mntput(ipc_ns->mq_mnt);
+ out_putfd:
+ put_unused_fd(fd);
+-out_err:
+ fd = error;
+ out_upsem:
+ mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
--- /dev/null
+From 7ec4ad0125db0222e397508c190b01c8f2b5f7cd Mon Sep 17 00:00:00 2001
+From: Srinivas <satyasrinivasp@hcl.in>
+Date: Tue, 24 Nov 2009 20:07:39 +0530
+Subject: [SCSI] mvsas: add support for Adaptec ASC-1045/1405 SAS/SATA HBA
+
+From: Srinivas <satyasrinivasp@hcl.in>
+
+commit 7ec4ad0125db0222e397508c190b01c8f2b5f7cd upstream.
+
+This is support for Adaptec ASC-1045/1405 SAS/SATA HBA on mvsas, which
+is based on Marvell 88SE6440 chipset.
+
+Signed-off-by: Srinivas <satyasrinivasp@hcl.in>
+Cc: Andy Yan <ayan@marvell.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Cc: Thomas Voegtle <tv@lio96.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/mvsas/mv_init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/scsi/mvsas/mv_init.c
++++ b/drivers/scsi/mvsas/mv_init.c
+@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdat
+ { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+ { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
+ { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
++ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+
+ { } /* terminate list */
+ };
--- /dev/null
+From 7a0deb6bcda98c2a764cb87f1441eef920fd3663 Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+Date: Fri, 19 Feb 2010 17:57:46 +0000
+Subject: pci: add support for 82576NS serdes to existing SR-IOV quirk
+
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+
+commit 7a0deb6bcda98c2a764cb87f1441eef920fd3663 upstream.
+
+This patch adds support for the 82576NS Serdes adapter to the existing pci
+quirk for 82576 parts.
+
+Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/quirks.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2513,6 +2513,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
+
+ #endif /* CONFIG_PCI_IOV */
+
--- /dev/null
+From 65a80b4c61f5b5f6eb0f5669c8fb120893bfb388 Mon Sep 17 00:00:00 2001
+From: Hisashi Hifumi <hifumi.hisashi@oss.ntt.co.jp>
+Date: Thu, 17 Dec 2009 15:27:26 -0800
+Subject: readahead: add blk_run_backing_dev
+
+From: Hisashi Hifumi <hifumi.hisashi@oss.ntt.co.jp>
+
+commit 65a80b4c61f5b5f6eb0f5669c8fb120893bfb388 upstream.
+
+I added blk_run_backing_dev on page_cache_async_readahead so readahead I/O
+is unpluged to improve throughput on especially RAID environment.
+
+The normal case is, if page N become uptodate at time T(N), then T(N) <=
+T(N+1) holds. With RAID (and NFS to some degree), there is no strict
+ordering, the data arrival time depends on runtime status of individual
+disks, which breaks that formula. So in do_generic_file_read(), just
+after submitting the async readahead IO request, the current page may well
+be uptodate, so the page won't be locked, and the block device won't be
+implicitly unplugged:
+
+ if (PageReadahead(page))
+ page_cache_async_readahead()
+ if (!PageUptodate(page))
+ goto page_not_up_to_date;
+ //...
+page_not_up_to_date:
+ lock_page_killable(page);
+
+Therefore explicit unplugging can help.
+
+Following is the test result with dd.
+
+#dd if=testdir/testfile of=/dev/null bs=16384
+
+-2.6.30-rc6
+1048576+0 records in
+1048576+0 records out
+17179869184 bytes (17 GB) copied, 224.182 seconds, 76.6 MB/s
+
+-2.6.30-rc6-patched
+1048576+0 records in
+1048576+0 records out
+17179869184 bytes (17 GB) copied, 206.465 seconds, 83.2 MB/s
+
+(7Disks RAID-0 Array)
+
+-2.6.30-rc6
+1054976+0 records in
+1054976+0 records out
+17284726784 bytes (17 GB) copied, 212.233 seconds, 81.4 MB/s
+
+-2.6.30-rc6-patched
+1054976+0 records out
+17284726784 bytes (17 GB) copied, 198.878 seconds, 86.9 MB/s
+
+(7Disks RAID-5 Array)
+
+The patch was found to improve performance with the SCST scsi target
+driver. See
+http://sourceforge.net/mailarchive/forum.php?thread_name=a0272b440906030714g67eabc5k8f847fb1e538cc62%40mail.gmail.com&forum_name=scst-devel
+
+[akpm@linux-foundation.org: unbust comment layout]
+[akpm@linux-foundation.org: "fix" CONFIG_BLOCK=n]
+Signed-off-by: Hisashi Hifumi <hifumi.hisashi@oss.ntt.co.jp>
+Acked-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Jens Axboe <jens.axboe@oracle.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Tested-by: Ronald <intercommit@gmail.com>
+Cc: Bart Van Assche <bart.vanassche@gmail.com>
+Cc: Vladislav Bolkhovitin <vst@vlnb.net>
+Cc: Randy Dunlap <randy.dunlap@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/readahead.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -553,5 +553,17 @@ page_cache_async_readahead(struct addres
+
+ /* do read-ahead */
+ ondemand_readahead(mapping, ra, filp, true, offset, req_size);
++
++#ifdef CONFIG_BLOCK
++ /*
++ * Normally the current page is !uptodate and lock_page() will be
++ * immediately called to implicitly unplug the device. However this
++ * is not always true for RAID conifgurations, where data arrives
++ * not strictly in their submission order. In this case we need to
++ * explicitly kick off the IO.
++ */
++ if (PageUptodate(page))
++ blk_run_backing_dev(mapping->backing_dev_info, NULL);
++#endif
+ }
+ EXPORT_SYMBOL_GPL(page_cache_async_readahead);
--- /dev/null
+From peterz@infradead.org Thu Mar 18 17:22:59 2010
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Thu, 11 Mar 2010 09:45:44 +0100
+Subject: sched: Fix SCHED_MC regression caused by change in sched cpu_power
+To: Greg KH <greg@kroah.com>
+Cc: Ma Ling <ling.ma@intel.com>, Zhang@kroah.com, Yanmin <yanmin_zhang@linux.intel.com>, Suresh Siddha <suresh.b.siddha@intel.com>, Ingo Molnar <mingo@elte.hu>, stable@kernel.org
+Message-ID: <1268297144.5279.940.camel@twins>
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit dd5feea14a7de4edbd9f36db1a2db785de91b88d upstream
+
+On platforms like dual socket quad-core platform, the scheduler load
+balancer is not detecting the load imbalances in certain scenarios. This
+is leading to scenarios like where one socket is completely busy (with
+all the 4 cores running with 4 tasks) and leaving another socket
+completely idle. This causes performance issues as those 4 tasks share
+the memory controller, last-level cache bandwidth etc. Also we won't be
+taking advantage of turbo-mode as much as we would like, etc.
+
+Some of the comparisons in the scheduler load balancing code are
+comparing the "weighted cpu load that is scaled wrt sched_group's
+cpu_power" with the "weighted average load per task that is not scaled
+wrt sched_group's cpu_power". While this has probably been broken for a
+longer time (for multi socket numa nodes etc), the problem got aggrevated
+via this recent change:
+
+|
+| commit f93e65c186ab3c05ce2068733ca10e34fd00125e
+| Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
+| Date: Tue Sep 1 10:34:32 2009 +0200
+|
+| sched: Restore __cpu_power to a straight sum of power
+|
+
+Also with this change, the sched group cpu power alone no longer reflects
+the group capacity that is needed to implement MC, MT performance
+(default) and power-savings (user-selectable) policies.
+
+We need to use the computed group capacity (sgs.group_capacity, that is
+computed using the SD_PREFER_SIBLING logic in update_sd_lb_stats()) to
+find out if the group with the max load is above its capacity and how
+much load to move etc.
+
+Reported-by: Ma Ling <ling.ma@intel.com>
+Initial-Analysis-by: Zhang, Yanmin <yanmin_zhang@linux.intel.com>
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+[ -v2: build fix ]
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <1266970432.11588.22.camel@sbs-t61.sc.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ kernel/sched.c | 76 ++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 43 insertions(+), 33 deletions(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3402,6 +3402,7 @@ struct sd_lb_stats {
+ unsigned long max_load;
+ unsigned long busiest_load_per_task;
+ unsigned long busiest_nr_running;
++ unsigned long busiest_group_capacity;
+
+ int group_imb; /* Is there imbalance in this sd */
+ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+@@ -3721,8 +3722,7 @@ static inline void update_sg_lb_stats(st
+ unsigned long load, max_cpu_load, min_cpu_load;
+ int i;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
+- unsigned long sum_avg_load_per_task;
+- unsigned long avg_load_per_task;
++ unsigned long avg_load_per_task = 0;
+
+ if (local_group) {
+ balance_cpu = group_first_cpu(group);
+@@ -3731,7 +3731,6 @@ static inline void update_sg_lb_stats(st
+ }
+
+ /* Tally up the load of all CPUs in the group */
+- sum_avg_load_per_task = avg_load_per_task = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
+
+@@ -3761,7 +3760,6 @@ static inline void update_sg_lb_stats(st
+ sgs->sum_nr_running += rq->nr_running;
+ sgs->sum_weighted_load += weighted_cpuload(i);
+
+- sum_avg_load_per_task += cpu_avg_load_per_task(i);
+ }
+
+ /*
+@@ -3779,7 +3777,6 @@ static inline void update_sg_lb_stats(st
+ /* Adjust by relative CPU power of the group */
+ sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
+
+-
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of two tasks.
+@@ -3789,8 +3786,8 @@ static inline void update_sg_lb_stats(st
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+- avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
+- group->cpu_power;
++ if (sgs->sum_nr_running)
++ avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ sgs->group_imb = 1;
+@@ -3859,6 +3856,7 @@ static inline void update_sd_lb_stats(st
+ sds->max_load = sgs.avg_load;
+ sds->busiest = group;
+ sds->busiest_nr_running = sgs.sum_nr_running;
++ sds->busiest_group_capacity = sgs.group_capacity;
+ sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->group_imb = sgs.group_imb;
+ }
+@@ -3881,6 +3879,7 @@ static inline void fix_small_imbalance(s
+ {
+ unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ unsigned int imbn = 2;
++ unsigned long scaled_busy_load_per_task;
+
+ if (sds->this_nr_running) {
+ sds->this_load_per_task /= sds->this_nr_running;
+@@ -3891,8 +3890,12 @@ static inline void fix_small_imbalance(s
+ sds->this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+- if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+- sds->busiest_load_per_task * imbn) {
++ scaled_busy_load_per_task = sds->busiest_load_per_task
++ * SCHED_LOAD_SCALE;
++ scaled_busy_load_per_task /= sds->busiest->cpu_power;
++
++ if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
++ (scaled_busy_load_per_task * imbn)) {
+ *imbalance = sds->busiest_load_per_task;
+ return;
+ }
+@@ -3943,7 +3946,14 @@ static inline void fix_small_imbalance(s
+ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+ unsigned long *imbalance)
+ {
+- unsigned long max_pull;
++ unsigned long max_pull, load_above_capacity = ~0UL;
++
++ sds->busiest_load_per_task /= sds->busiest_nr_running;
++ if (sds->group_imb) {
++ sds->busiest_load_per_task =
++ min(sds->busiest_load_per_task, sds->avg_load);
++ }
++
+ /*
+ * In the presence of smp nice balancing, certain scenarios can have
+ * max load less than avg load(as we skip the groups at or below
+@@ -3954,9 +3964,29 @@ static inline void calculate_imbalance(s
+ return fix_small_imbalance(sds, this_cpu, imbalance);
+ }
+
+- /* Don't want to pull so many tasks that a group would go idle */
+- max_pull = min(sds->max_load - sds->avg_load,
+- sds->max_load - sds->busiest_load_per_task);
++ if (!sds->group_imb) {
++ /*
++ * Don't want to pull so many tasks that a group would go idle.
++ */
++ load_above_capacity = (sds->busiest_nr_running -
++ sds->busiest_group_capacity);
++
++ load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
++
++ load_above_capacity /= sds->busiest->cpu_power;
++ }
++
++ /*
++ * We're trying to get all the cpus to the average_load, so we don't
++ * want to push ourselves above the average load, nor do we wish to
++ * reduce the max loaded cpu below the average load. At the same time,
++ * we also don't want to reduce the group load below the group capacity
++ * (so that we can implement power-savings policies etc). Thus we look
++ * for the minimum possible imbalance.
++ * Be careful of negative numbers as they'll appear as very large values
++ * with unsigned longs.
++ */
++ max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
+
+ /* How much load to actually move to equalise the imbalance */
+ *imbalance = min(max_pull * sds->busiest->cpu_power,
+@@ -4024,7 +4054,6 @@ find_busiest_group(struct sched_domain *
+ * 4) This group is more busy than the avg busieness at this
+ * sched_domain.
+ * 5) The imbalance is within the specified limit.
+- * 6) Any rebalance would lead to ping-pong
+ */
+ if (balance && !(*balance))
+ goto ret;
+@@ -4043,25 +4072,6 @@ find_busiest_group(struct sched_domain *
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
+
+- sds.busiest_load_per_task /= sds.busiest_nr_running;
+- if (sds.group_imb)
+- sds.busiest_load_per_task =
+- min(sds.busiest_load_per_task, sds.avg_load);
+-
+- /*
+- * We're trying to get all the cpus to the average_load, so we don't
+- * want to push ourselves above the average load, nor do we wish to
+- * reduce the max loaded cpu below the average load, as either of these
+- * actions would just result in more rebalancing later, and ping-pong
+- * tasks around. Thus we look for the minimum possible imbalance.
+- * Negative imbalances (*we* are more loaded than anyone else) will
+- * be counted as no imbalance for these purposes -- we can't fix that
+- * by pulling tasks to us. Be careful of negative numbers as they'll
+- * appear as very large values with unsigned longs.
+- */
+- if (sds.max_load <= sds.busiest_load_per_task)
+- goto out_balanced;
+-
+ /* Looks like there is an imbalance. Compute it */
+ calculate_imbalance(&sds, this_cpu, imbalance);
+ return sds.busiest;
--- /dev/null
+From 933b0618d8b2a59c7a0742e43836544e02f1e9bd Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Wed, 16 Dec 2009 18:04:31 +0100
+Subject: sched: Mark boot-cpu active before smp_init()
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 933b0618d8b2a59c7a0742e43836544e02f1e9bd upstream.
+
+A UP machine has 1 active cpu, not having the boot-cpu in the
+active map when starting the scheduler confuses things.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Mike Galbraith <efault@gmx.de>
+LKML-Reference: <20091216170517.423469527@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Christoph Biedl <linux-kernel.bfrz@manchmal.in-ulm.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ init/main.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -369,12 +369,6 @@ static void __init smp_init(void)
+ {
+ unsigned int cpu;
+
+- /*
+- * Set up the current CPU as possible to migrate to.
+- * The other ones will be done by cpu_up/cpu_down()
+- */
+- set_cpu_active(smp_processor_id(), true);
+-
+ /* FIXME: This should be done in userspace --RR */
+ for_each_present_cpu(cpu) {
+ if (num_online_cpus() >= setup_max_cpus)
+@@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
+ int cpu = smp_processor_id();
+ /* Mark the boot cpu "present", "online" etc for SMP and UP case */
+ set_cpu_online(cpu, true);
++ set_cpu_active(cpu, true);
+ set_cpu_present(cpu, true);
+ set_cpu_possible(cpu, true);
+ }
tracing-disable-buffer-switching-when-starting-or-stopping-trace.patch
tracing-do-not-record-user-stack-trace-from-nmi-context.patch
pci-unconditionally-clear-aer-uncorr-status-register-during-cleanup.patch
+drm-edid-unify-detailed-block-parsing-between-base-and-extension-blocks.patch
+efifb-fix-framebuffer-handoff.patch
+coredump-suppress-uid-comparison-test-if-core-output-files-are-pipes.patch
+v4l-dvb-13961-em28xx-dvb-fix-memleak-in-dvb_fini.patch
+hrtimer-tune-hrtimer_interrupt-hang-logic.patch
+x86-apic-don-t-use-logical-flat-mode-when-cpu-hotplug-may-exceed-8-cpus.patch
+mvsas-add-support-for-adaptec-asc-1045-1405-sas-sata-hba.patch
+pci-add-support-for-82576ns-serdes-to-existing-sr-iov-quirk.patch
+sched-mark-boot-cpu-active-before-smp_init.patch
+sparc64-make-prom-entry-spinlock-nmi-safe.patch
+sysctl-require-cap_sys_rawio-to-set-mmap_min_addr.patch
+e1000e-enable-new-82567v-3-device.patch
+ixgbe-add-support-for-82599-kr-device-0x1517.patch
+input-wacom-ensure-the-device-is-initialized-properly-upon-resume.patch
+ath9k-fix-lockdep-warning-when-unloading-module.patch
+mqueue-fix-mq_open-file-descriptor-leak-on-user-space-processes.patch
+virtio-fix-out-of-range-array-access.patch
+x86-set_personality_ia32-misses-force_personality32.patch
+b43-workaround-circular-locking-in-hw-tkip-key-update-callback.patch
+sched-fix-sched_mc-regression-caused-by-change-in-sched-cpu_power.patch
+readahead-add-blk_run_backing_dev.patch
+thinkpad-acpi-lock-down-video-output-state-access.patch
--- /dev/null
+From c682420e87cdd8db0212e29eb70c325cdfc0860c Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Wed, 3 Mar 2010 09:06:03 -0800
+Subject: [PATCH] sparc64: Make prom entry spinlock NMI safe.
+
+From: David S. Miller <davem@davemloft.net>
+
+[ Upstream commit 8a4fd1e4922413cfdfa6c51a59efb720d904a5eb ]
+
+If we do something like try to print to the OF console from an NMI
+while we're already in OpenFirmware, we'll deadlock on the spinlock.
+
+Use a raw spinlock and disable NMIs when we take it.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/sparc/prom/p1275.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -32,8 +32,7 @@ extern void prom_cif_interface(void);
+ extern void prom_cif_callback(void);
+
+ /*
+- * This provides SMP safety on the p1275buf. prom_callback() drops this lock
+- * to allow recursuve acquisition.
++ * This provides SMP safety on the p1275buf.
+ */
+ DEFINE_SPINLOCK(prom_entry_lock);
+
+@@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long
+
+ p = p1275buf.prom_buffer;
+
+- spin_lock_irqsave(&prom_entry_lock, flags);
++ raw_local_save_flags(flags);
++ raw_local_irq_restore(PIL_NMI);
++ spin_lock(&prom_entry_lock);
+
+ p1275buf.prom_args[0] = (unsigned long)p; /* service */
+ strcpy (p, service);
+@@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long
+ va_end(list);
+ x = p1275buf.prom_args [nargs + 3];
+
+- spin_unlock_irqrestore(&prom_entry_lock, flags);
++ spin_unlock(&prom_entry_lock);
++ raw_local_irq_restore(flags);
+
+ return x;
+ }
--- /dev/null
+From 0e1a6ef2dea88101b056b6d9984f3325c5efced3 Mon Sep 17 00:00:00 2001
+From: Kees Cook <kees.cook@canonical.com>
+Date: Sun, 8 Nov 2009 09:37:00 -0800
+Subject: sysctl: require CAP_SYS_RAWIO to set mmap_min_addr
+
+From: Kees Cook <kees.cook@canonical.com>
+
+commit 0e1a6ef2dea88101b056b6d9984f3325c5efced3 upstream.
+
+Currently the mmap_min_addr value can only be bypassed during mmap when
+the task has CAP_SYS_RAWIO. However, the mmap_min_addr sysctl value itself
+can be adjusted to 0 if euid == 0, allowing a bypass without CAP_SYS_RAWIO.
+This patch adds a check for the capability before allowing mmap_min_addr to
+be changed.
+
+Signed-off-by: Kees Cook <kees.cook@canonical.com>
+Acked-by: Serge Hallyn <serue@us.ibm.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ security/min_addr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/security/min_addr.c
++++ b/security/min_addr.c
+@@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_tab
+ {
+ int ret;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+
+ update_mmap_min_addr();
--- /dev/null
+From b525c06cdbd8a3963f0173ccd23f9147d4c384b5 Mon Sep 17 00:00:00 2001
+From: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+Date: Thu, 25 Feb 2010 22:22:22 -0300
+Subject: thinkpad-acpi: lock down video output state access
+
+From: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+
+commit b525c06cdbd8a3963f0173ccd23f9147d4c384b5 upstream.
+
+Given the right combination of ThinkPad and X.org, just reading the
+video output control state is enough to hard-crash X.org.
+
+Until the day I somehow find out a model or BIOS cut date to not
+provide this feature to ThinkPads that can do video switching through
+X RandR, change permissions so that only processes with CAP_SYS_ADMIN
+can access any sort of video output control state.
+
+This bug could be considered a local DoS I suppose, as it allows any
+non-privledged local user to cause some versions of X.org to
+hard-crash some ThinkPads.
+
+Reported-by: Jidanni <jidanni@jidanni.org>
+Signed-off-by: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/Documentation/laptops/thinkpad-acpi.txt
++++ b/Documentation/laptops/thinkpad-acpi.txt
+@@ -650,6 +650,10 @@ LCD, CRT or DVI (if available). The following commands are available:
+ echo expand_toggle > /proc/acpi/ibm/video
+ echo video_switch > /proc/acpi/ibm/video
+
++NOTE: Access to this feature is restricted to processes owning the
++CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
++enough with some versions of X.org to crash it.
++
+ Each video output device can be enabled or disabled individually.
+ Reading /proc/acpi/ibm/video shows the status of each device.
+
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index f526e73..11fce79 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -319,9 +319,15 @@ config THINKPAD_ACPI_VIDEO
+ server running, phase of the moon, and the current mood of
+ Schroedinger's cat. If you can use X.org's RandR to control
+ your ThinkPad's video output ports instead of this feature,
+- don't think twice: do it and say N here to save some memory.
++ don't think twice: do it and say N here to save memory and avoid
++ bad interactions with X.org.
+
+- If you are not sure, say Y here.
++ NOTE: access to this feature is limited to processes with the
++ CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
++ where it interacts badly with X.org.
++
++ If you are not sure, say Y here but do try to check if you could
++ be using X.org RandR instead.
+
+ config THINKPAD_ACPI_HOTKEY_POLL
+ bool "Support NVRAM polling for hot keys"
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 3af4628..5d02cc0 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -286,6 +286,7 @@ struct ibm_init_struct {
+ char param[32];
+
+ int (*init) (struct ibm_init_struct *);
++ mode_t base_procfs_mode;
+ struct ibm_struct *data;
+ };
+
+@@ -4629,6 +4630,10 @@ static int video_read(struct seq_file *m)
+ return 0;
+ }
+
++ /* Even reads can crash X.org, so... */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
+ status = video_outputsw_get();
+ if (status < 0)
+ return status;
+@@ -4662,6 +4667,10 @@ static int video_write(char *buf)
+ if (video_supported == TPACPI_VIDEO_NONE)
+ return -ENODEV;
+
++ /* Even reads can crash X.org, let alone writes... */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
+ enable = 0;
+ disable = 0;
+
+@@ -8487,9 +8496,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
+ "%s installed\n", ibm->name);
+
+ if (ibm->read) {
+- mode_t mode;
++ mode_t mode = iibm->base_procfs_mode;
+
+- mode = S_IRUGO;
++ if (!mode)
++ mode = S_IRUGO;
+ if (ibm->write)
+ mode |= S_IWUSR;
+ entry = proc_create_data(ibm->name, mode, proc_dir,
+@@ -8680,6 +8690,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
+ #ifdef CONFIG_THINKPAD_ACPI_VIDEO
+ {
+ .init = video_init,
++ .base_procfs_mode = S_IRUSR,
+ .data = &video_driver_data,
+ },
+ #endif
--- /dev/null
+From 19f48cb105b7fa18d0dcab435919a3a29b7a7c4c Mon Sep 17 00:00:00 2001
+From: Francesco Lavra <francescolavra@interfree.it>
+Date: Thu, 31 Dec 2009 08:47:11 -0300
+Subject: V4L/DVB (13961): em28xx-dvb: fix memleak in dvb_fini()
+
+From: Francesco Lavra <francescolavra@interfree.it>
+
+commit 19f48cb105b7fa18d0dcab435919a3a29b7a7c4c upstream.
+
+this patch fixes a memory leak which occurs when an em28xx card with DVB
+extension is unplugged or its DVB extension driver is unloaded. In
+dvb_fini(), dev->dvb must be freed before being set to NULL, as is done
+in dvb_init() in case of error.
+Note that this bug is also present in the latest stable kernel release.
+
+Signed-off-by: Francesco Lavra <francescolavra@interfree.it>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/em28xx/em28xx-dvb.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/media/video/em28xx/em28xx-dvb.c
++++ b/drivers/media/video/em28xx/em28xx-dvb.c
+@@ -610,6 +610,7 @@ static int dvb_fini(struct em28xx *dev)
+
+ if (dev->dvb) {
+ unregister_dvb(dev->dvb);
++ kfree(dev->dvb);
+ dev->dvb = NULL;
+ }
+
--- /dev/null
+From 3119815912a220bdac943dfbdfee640414c0c611 Mon Sep 17 00:00:00 2001
+From: Michael S. Tsirkin <mst@redhat.com>
+Date: Thu, 25 Feb 2010 19:08:55 +0200
+Subject: virtio: fix out of range array access
+
+From: Michael S. Tsirkin <mst@redhat.com>
+
+commit 3119815912a220bdac943dfbdfee640414c0c611 upstream.
+
+I have observed the following error on virtio-net module unload:
+
+------------[ cut here ]------------
+WARNING: at kernel/irq/manage.c:858 __free_irq+0xa0/0x14c()
+Hardware name: Bochs
+Trying to free already-free IRQ 0
+Modules linked in: virtio_net(-) virtio_blk virtio_pci virtio_ring
+virtio af_packet e1000 shpchp aacraid uhci_hcd ohci_hcd ehci_hcd [last
+unloaded: scsi_wait_scan]
+Pid: 1957, comm: rmmod Not tainted 2.6.33-rc8-vhost #24
+Call Trace:
+ [<ffffffff8103e195>] warn_slowpath_common+0x7c/0x94
+ [<ffffffff8103e204>] warn_slowpath_fmt+0x41/0x43
+ [<ffffffff810a7a36>] ? __free_pages+0x5a/0x70
+ [<ffffffff8107cc00>] __free_irq+0xa0/0x14c
+ [<ffffffff8107cceb>] free_irq+0x3f/0x65
+ [<ffffffffa0081424>] vp_del_vqs+0x81/0xb1 [virtio_pci]
+ [<ffffffffa0091d29>] virtnet_remove+0xda/0x10b [virtio_net]
+ [<ffffffffa0075200>] virtio_dev_remove+0x22/0x4a [virtio]
+ [<ffffffff812709ee>] __device_release_driver+0x66/0xac
+ [<ffffffff81270ab7>] driver_detach+0x83/0xa9
+ [<ffffffff8126fc66>] bus_remove_driver+0x91/0xb4
+ [<ffffffff81270fcf>] driver_unregister+0x6c/0x74
+ [<ffffffffa0075418>] unregister_virtio_driver+0xe/0x10 [virtio]
+ [<ffffffffa0091c4d>] fini+0x15/0x17 [virtio_net]
+ [<ffffffff8106997b>] sys_delete_module+0x1c3/0x230
+ [<ffffffff81007465>] ? old_ich_force_enable_hpet+0x117/0x164
+ [<ffffffff813bb720>] ? do_page_fault+0x29c/0x2cc
+ [<ffffffff81028e58>] sysenter_dispatch+0x7/0x27
+---[ end trace 15e88e4c576cc62b ]---
+
+The bug is in virtio-pci: we use msix_vector as array index to get irq
+entry, but some vqs do not have a dedicated vector so this causes an out
+of bounds access. By chance, we seem to often get 0 value, which
+results in this error.
+
+Fix by verifying that vector is legal before using it as index.
+
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Anthony Liguori <aliguori@us.ibm.com>
+Acked-by: Shirley Ma <xma@us.ibm.com>
+Acked-by: Amit Shah <amit.shah@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/virtio/virtio_pci.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_pci.c
++++ b/drivers/virtio/virtio_pci.c
+@@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_dev
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ info = vq->priv;
+- if (vp_dev->per_vq_vectors)
++ if (vp_dev->per_vq_vectors &&
++ info->msix_vector != VIRTIO_MSI_NO_VECTOR)
+ free_irq(vp_dev->msix_entries[info->msix_vector].vector,
+ vq);
+ vp_del_vq(vq);
--- /dev/null
+From suresh.b.siddha@intel.com Thu Mar 18 16:25:46 2010
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Thu, 18 Feb 2010 15:30:55 -0800
+Subject: x86, apic: Don't use logical-flat mode when CPU hotplug may exceed 8 CPUs
+To: Greg KH <greg@kroah.com>
+Cc: "Zheng, Shaohui" <shaohui.zheng@intel.com>, Yinghai Lu <yinghai@kernel.org>, "H. Peter Anvin" <hpa@zytor.com>, "stable@kernel.org" <stable@kernel.org>
+Message-ID: <1266535855.2909.36.camel@sbs-t61.sc.intel.com>
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit 681ee44d40d7c93b42118320e4620d07d8704fd6 upstream
+
+We need to fall back from logical-flat APIC mode to physical-flat mode
+when we have more than 8 CPUs. However, in the presence of CPU
+hotplug(with bios listing not enabled but possible cpus as disabled cpus in
+MADT), we have to consider the number of possible CPUs rather than
+the number of current CPUs; otherwise we may cross the 8-CPU boundary
+when CPUs are added later.
+
+32bit apic code can use more cleanups (like the removal of vendor checks in
+32bit default_setup_apic_routing()) and more unifications with 64bit code.
+Yinghai has some patches in works already. This patch addresses the boot issue
+that is reported in the virtualization guest context.
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Acked-by: Shaohui Zheng <shaohui.zheng@intel.com>
+Reviewed-by: Yinghai Lu <yinghai@kernel.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/acpi/boot.c | 5 -----
+ arch/x86/kernel/apic/apic.c | 14 --------------
+ arch/x86/kernel/apic/probe_32.c | 27 ++++++++++++++++++++++++++-
+ arch/x86/kernel/apic/probe_64.c | 13 ++-----------
+ arch/x86/kernel/mpparse.c | 7 -------
+ arch/x86/kernel/smpboot.c | 2 --
+ 6 files changed, 28 insertions(+), 40 deletions(-)
+
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1191,9 +1191,6 @@ static void __init acpi_process_madt(voi
+ if (!error) {
+ acpi_lapic = 1;
+
+-#ifdef CONFIG_X86_BIGSMP
+- generic_bigsmp_probe();
+-#endif
+ /*
+ * Parse MADT IO-APIC entries
+ */
+@@ -1203,8 +1200,6 @@ static void __init acpi_process_madt(voi
+ acpi_ioapic = 1;
+
+ smp_found_config = 1;
+- if (apic->setup_apic_routing)
+- apic->setup_apic_routing();
+ }
+ }
+ if (error == -EINVAL) {
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1665,9 +1665,7 @@ int __init APIC_init_uniprocessor(void)
+ #endif
+
+ enable_IR_x2apic();
+-#ifdef CONFIG_X86_64
+ default_setup_apic_routing();
+-#endif
+
+ verify_local_APIC();
+ connect_bsp_APIC();
+@@ -1915,18 +1913,6 @@ void __cpuinit generic_processor_info(in
+ if (apicid > max_physical_apicid)
+ max_physical_apicid = apicid;
+
+-#ifdef CONFIG_X86_32
+- switch (boot_cpu_data.x86_vendor) {
+- case X86_VENDOR_INTEL:
+- if (num_processors > 8)
+- def_to_bigsmp = 1;
+- break;
+- case X86_VENDOR_AMD:
+- if (max_physical_apicid >= 8)
+- def_to_bigsmp = 1;
+- }
+-#endif
+-
+ #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
+ early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+ early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
+--- a/arch/x86/kernel/apic/probe_32.c
++++ b/arch/x86/kernel/apic/probe_32.c
+@@ -54,6 +54,31 @@ late_initcall(print_ipi_mode);
+
+ void default_setup_apic_routing(void)
+ {
++ int version = apic_version[boot_cpu_physical_apicid];
++
++ if (num_possible_cpus() > 8) {
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (!APIC_XAPIC(version)) {
++ def_to_bigsmp = 0;
++ break;
++ }
++ /* If P4 and above fall through */
++ case X86_VENDOR_AMD:
++ def_to_bigsmp = 1;
++ }
++ }
++
++#ifdef CONFIG_X86_BIGSMP
++ generic_bigsmp_probe();
++#endif
++
++ if (apic->setup_apic_routing)
++ apic->setup_apic_routing();
++}
++
++void setup_apic_flat_routing(void)
++{
+ #ifdef CONFIG_X86_IO_APIC
+ printk(KERN_INFO
+ "Enabling APIC mode: Flat. Using %d I/O APICs\n",
+@@ -103,7 +128,7 @@ struct apic apic_default = {
+ .init_apic_ldr = default_init_apic_ldr,
+
+ .ioapic_phys_id_map = default_ioapic_phys_id_map,
+- .setup_apic_routing = default_setup_apic_routing,
++ .setup_apic_routing = setup_apic_flat_routing,
+ .multi_timer_check = NULL,
+ .apicid_to_node = default_apicid_to_node,
+ .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
+--- a/arch/x86/kernel/apic/probe_64.c
++++ b/arch/x86/kernel/apic/probe_64.c
+@@ -67,17 +67,8 @@ void __init default_setup_apic_routing(v
+ }
+ #endif
+
+- if (apic == &apic_flat) {
+- switch (boot_cpu_data.x86_vendor) {
+- case X86_VENDOR_INTEL:
+- if (num_processors > 8)
+- apic = &apic_physflat;
+- break;
+- case X86_VENDOR_AMD:
+- if (max_physical_apicid >= 8)
+- apic = &apic_physflat;
+- }
+- }
++ if (apic == &apic_flat && num_possible_cpus() > 8)
++ apic = &apic_physflat;
+
+ printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
+
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mp
+ x86_init.mpparse.mpc_record(1);
+ }
+
+-#ifdef CONFIG_X86_BIGSMP
+- generic_bigsmp_probe();
+-#endif
+-
+- if (apic->setup_apic_routing)
+- apic->setup_apic_routing();
+-
+ if (!num_processors)
+ printk(KERN_ERR "MPTABLE: no processors registered!\n");
+ return num_processors;
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1066,9 +1066,7 @@ void __init native_smp_prepare_cpus(unsi
+ set_cpu_sibling_map(0);
+
+ enable_IR_x2apic();
+-#ifdef CONFIG_X86_64
+ default_setup_apic_routing();
+-#endif
+
+ if (smp_sanity_check(max_cpus) < 0) {
+ printk(KERN_INFO "SMP disabled\n");
--- /dev/null
+From 1252f238db48ec419f40c1bdf30fda649860eed9 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 16 Feb 2010 15:02:13 +0100
+Subject: x86: set_personality_ia32() misses force_personality32
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 1252f238db48ec419f40c1bdf30fda649860eed9 upstream.
+
+05d43ed8a "x86: get rid of the insane TIF_ABI_PENDING bit" forgot about
+force_personality32. Fix.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/process_64.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -546,6 +546,7 @@ void set_personality_ia32(void)
+
+ /* Make sure to be in 32bit mode */
+ set_thread_flag(TIF_IA32);
++ current->personality |= force_personality32;
+
+ /* Prepare the first "return" to user space */
+ current_thread_info()->status |= TS_COMPAT;