--- /dev/null
+From 6148b130eb84edc76e4fa88da1877b27be6c2f06 Mon Sep 17 00:00:00 2001
+From: Sophie Hamilton <kernel@theblob.org>
+Date: Tue, 8 Sep 2009 10:58:42 +0200
+Subject: ALSA: cs46xx - Fix minimum period size
+
+From: Sophie Hamilton <kernel@theblob.org>
+
+commit 6148b130eb84edc76e4fa88da1877b27be6c2f06 upstream.
+
+Fix minimum period size for cs46xx cards. This fixes a problem in the
+case where neither a period size nor a buffer size is passed to ALSA;
+this is the case in Audacious, OpenAL, and others.
+
+Signed-off-by: Sophie Hamilton <kernel@theblob.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/cs46xx/cs46xx_lib.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/cs46xx/cs46xx_lib.h
++++ b/sound/pci/cs46xx/cs46xx_lib.h
+@@ -35,7 +35,7 @@
+
+
+ #ifdef CONFIG_SND_CS46XX_NEW_DSP
+-#define CS46XX_MIN_PERIOD_SIZE 1
++#define CS46XX_MIN_PERIOD_SIZE 64
+ #define CS46XX_MAX_PERIOD_SIZE 1024*1024
+ #else
+ #define CS46XX_MIN_PERIOD_SIZE 2048
--- /dev/null
+From 7929eb9cf643ae416e5081b2a6fa558d37b9854c Mon Sep 17 00:00:00 2001
+From: Nicolas Pitre <nico@cam.org>
+Date: Thu, 3 Sep 2009 21:45:59 +0100
+Subject: ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() with highmem
+
+From: Nicolas Pitre <nico@cam.org>
+
+commit 7929eb9cf643ae416e5081b2a6fa558d37b9854c upstream.
+
+Let's suppose a highmem page is kmap'd with kmap(). A pkmap entry is
+used, the page mapped to it, and the virtual cache is dirtied. Then
+kunmap() is used which does virtually nothing except for decrementing a
+usage count.
+
+Then, let's suppose the _same_ page gets mapped using kmap_atomic().
+It is therefore mapped onto a fixmap entry instead, which has a
+different virtual address unaware of the dirty cache data for that page
+sitting in the pkmap mapping.
+
+Fortunately it is easy to know if a pkmap mapping still exists for that
+page and use it directly with kmap_atomic(), thanks to kmap_high_get().
+
+And actual testing with a printk in the added code path shows that this
+condition is actually met *extremely* frequently. Seems that we've been
+quite lucky that things have worked so well with highmem so far.
+
+Signed-off-by: Nicolas Pitre <nico@marvell.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mm/highmem.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enu
+ {
+ unsigned int idx;
+ unsigned long vaddr;
++ void *kmap;
+
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
++ kmap = kmap_high_get(page);
++ if (kmap)
++ return kmap;
++
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
++ /* this address was obtained through kmap_high_get() */
++ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+ }
+ pagefault_enable();
+ }
--- /dev/null
+From 87831cb660954356d68cebdb1406f3be09e784e9 Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Date: Mon, 7 Sep 2009 18:09:58 +0100
+Subject: ASoC: Fix WM835x Out4 capture enumeration
+
+From: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+commit 87831cb660954356d68cebdb1406f3be09e784e9 upstream.
+
+It's the 8th enum of a zero indexed array. This is why I don't let
+new drivers use these arrays of enums...
+
+Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/soc/codecs/wm8350.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/wm8350.c
++++ b/sound/soc/codecs/wm8350.c
+@@ -613,7 +613,7 @@ SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VO
+
+ /* Out4 Capture Mux */
+ static const struct snd_kcontrol_new wm8350_out4_capture_controls =
+-SOC_DAPM_ENUM("Route", wm8350_enum[8]);
++SOC_DAPM_ENUM("Route", wm8350_enum[7]);
+
+ static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = {
+
--- /dev/null
+From ac8672ea922bde59acf50eaa1eaa1640a6395fd2 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <htejun@gmail.com>
+Date: Sun, 16 Aug 2009 21:21:21 +0900
+Subject: libata: fix off-by-one error in ata_tf_read_block()
+
+From: Tejun Heo <htejun@gmail.com>
+
+commit ac8672ea922bde59acf50eaa1eaa1640a6395fd2 upstream.
+
+ata_tf_read_block() has off-by-one error when converting CHS address
+to LBA. The bug isn't very visible because ata_tf_read_block() is
+used only when generating sense data for a failed RW command and CHS
+addressing isn't used too often these days.
+
+This problem was spotted by Atsushi Nemoto.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+ head = tf->device & 0xf;
+ sect = tf->lbal;
+
+- block = (cyl * dev->heads + head) * dev->sectors + sect;
++ if (!sect) {
++ ata_dev_printk(dev, KERN_WARNING, "device reported "
++ "invalid CHS sector 0\n");
++ sect = 1; /* oh well */
++ }
++
++ block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
+ }
+
+ return block;
--- /dev/null
+From fa0681d2129732027355d6b7083dd8932b9b799d Mon Sep 17 00:00:00 2001
+From: Roland Dreier <rolandd@cisco.com>
+Date: Sat, 5 Sep 2009 20:24:49 -0700
+Subject: mlx4_core: Allocate and map sufficient ICM memory for EQ context
+
+From: Roland Dreier <rolandd@cisco.com>
+
+commit fa0681d2129732027355d6b7083dd8932b9b799d upstream.
+
+The current implementation allocates a single host page for EQ context
+memory, which was OK when we only allocated a few EQs. However, since
+we now allocate an EQ for each CPU core, this patch removes the
+hard-coded limit (which we exceed with 4 KB pages and 128 byte EQ
+context entries with 32 CPUs) and uses the same ICM table code as all
+other context tables, which ends up simplifying the code quite a bit
+while fixing the problem.
+
+This problem was actually hit in practice on a dual-socket Nehalem box
+with 16 real hardware threads and sufficiently odd ACPI tables that it
+shows on boot
+
+ SMP: Allowing 32 CPUs, 16 hotplug CPUs
+
+so num_possible_cpus() ends up 32, and mlx4 ends up creating 33 MSI-X
+interrupts and 33 EQs. This mlx4 bug means that mlx4 can't even
+initialize at all on this quite mainstream system.
+
+Reported-by: Eli Cohen <eli@mellanox.co.il>
+Tested-by: Christoph Lameter <cl@linux-foundation.org>
+Signed-off-by: Roland Dreier <rolandd@cisco.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/mlx4/eq.c | 42 ------------------------------------------
+ drivers/net/mlx4/main.c | 9 ++++++---
+ drivers/net/mlx4/mlx4.h | 7 +------
+ 3 files changed, 7 insertions(+), 51 deletions(-)
+
+--- a/drivers/net/mlx4/eq.c
++++ b/drivers/net/mlx4/eq.c
+@@ -524,48 +524,6 @@ static void mlx4_unmap_clr_int(struct ml
+ iounmap(priv->clr_base);
+ }
+
+-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
+-{
+- struct mlx4_priv *priv = mlx4_priv(dev);
+- int ret;
+-
+- /*
+- * We assume that mapping one page is enough for the whole EQ
+- * context table. This is fine with all current HCAs, because
+- * we only use 32 EQs and each EQ uses 64 bytes of context
+- * memory, or 1 KB total.
+- */
+- priv->eq_table.icm_virt = icm_virt;
+- priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
+- if (!priv->eq_table.icm_page)
+- return -ENOMEM;
+- priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
+- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+- if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
+- __free_page(priv->eq_table.icm_page);
+- return -ENOMEM;
+- }
+-
+- ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
+- if (ret) {
+- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
+- PCI_DMA_BIDIRECTIONAL);
+- __free_page(priv->eq_table.icm_page);
+- }
+-
+- return ret;
+-}
+-
+-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
+-{
+- struct mlx4_priv *priv = mlx4_priv(dev);
+-
+- mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
+- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
+- PCI_DMA_BIDIRECTIONAL);
+- __free_page(priv->eq_table.icm_page);
+-}
+-
+ int mlx4_alloc_eq_table(struct mlx4_dev *dev)
+ {
+ struct mlx4_priv *priv = mlx4_priv(dev);
+--- a/drivers/net/mlx4/main.c
++++ b/drivers/net/mlx4/main.c
+@@ -520,7 +520,10 @@ static int mlx4_init_icm(struct mlx4_dev
+ goto err_unmap_aux;
+ }
+
+- err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
++ err = mlx4_init_icm_table(dev, &priv->eq_table.table,
++ init_hca->eqc_base, dev_cap->eqc_entry_sz,
++ dev->caps.num_eqs, dev->caps.num_eqs,
++ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ goto err_unmap_cmpt;
+@@ -663,7 +666,7 @@ err_unmap_mtt:
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+
+ err_unmap_eq:
+- mlx4_unmap_eq_icm(dev);
++ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+
+ err_unmap_cmpt:
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+@@ -693,11 +696,11 @@ static void mlx4_free_icms(struct mlx4_d
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
++ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+- mlx4_unmap_eq_icm(dev);
+
+ mlx4_UNMAP_ICM_AUX(dev);
+ mlx4_free_icm(dev, priv->fw.aux_icm, 0);
+--- a/drivers/net/mlx4/mlx4.h
++++ b/drivers/net/mlx4/mlx4.h
+@@ -205,9 +205,7 @@ struct mlx4_eq_table {
+ void __iomem **uar_map;
+ u32 clr_mask;
+ struct mlx4_eq *eq;
+- u64 icm_virt;
+- struct page *icm_page;
+- dma_addr_t icm_dma;
++ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+ int have_irq;
+ u8 inta_pin;
+@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *d
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca);
+
+-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
+-void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
+-
+ int mlx4_cmd_init(struct mlx4_dev *dev);
+ void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
--- /dev/null
+From 6dab62ee5a3bf4f71b8320c09db2e6022a19f40e Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 21 Jul 2009 16:08:43 -0700
+Subject: PCI: apply nv_msi_ht_cap_quirk on resume too
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 6dab62ee5a3bf4f71b8320c09db2e6022a19f40e upstream.
+
+http://bugzilla.kernel.org/show_bug.cgi?id=12542 reports that with the
+quirk not applied on resume, msi stops working after resuming and mcp78s
+ahci fails due to IRQ mis-delivery. Apply it on resume too.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Peer Chen <pchen@nvidia.com>
+Cc: Tj <linux@tjworld.net>
+Reported-by: Nicolas Derive <kalon33@ubuntu.com>
+Cc: Greg KH <greg@kroah.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pci/quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2353,8 +2353,10 @@ static void __devinit nv_msi_ht_cap_quir
+ }
+
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+
+ static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
+ {
--- /dev/null
+From 6b5096e4d4496e185cd1ada5d1b8e1d941c805ed Mon Sep 17 00:00:00 2001
+From: Jean Delvare <khali@linux-fr.org>
+Date: Tue, 28 Jul 2009 11:49:19 +0200
+Subject: PCI: Unhide the SMBus on the Compaq Evo D510 USDT
+
+From: Jean Delvare <khali@linux-fr.org>
+
+commit 6b5096e4d4496e185cd1ada5d1b8e1d941c805ed upstream.
+
+One more form factor for Compaq Evo D510, which needs the same quirk
+as the other form factors. Apparently there's no hardware monitoring
+chip on that one, but SPD EEPROMs, so it's still worth unhiding the
+SMBus.
+
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Tested-by: Nuzhna Pomoshch <nuzhna_pomoshch@yahoo.com>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1201,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
+ switch(dev->subsystem_device) {
+ case 0x00b8: /* Compaq Evo D510 CMT */
+ case 0x00b9: /* Compaq Evo D510 SFF */
++ case 0x00ba: /* Compaq Evo D510 USDT */
+ /* Motherboard doesn't have Host bridge
+ * subvendor/subdevice IDs and on-board VGA
+ * controller is disabled if an AGP card is
powerpc-ps3-workaround-for-flash-memory-i-o-error.patch
tpm-fixup-boot-probe-timeout-for-tpm_tis-driver.patch
udf-use-device-size-when-drive-reported-bogus-number-of-written-blocks.patch
+alsa-cs46xx-fix-minimum-period-size.patch
+arm-5691-1-fix-cache-aliasing-issues-between-kmap-and-kmap_atomic-with-highmem.patch
+asoc-fix-wm835x-out4-capture-enumeration.patch
+mlx4_core-allocate-and-map-sufficient-icm-memory-for-eq-context.patch
+pci-apply-nv_msi_ht_cap_quirk-on-resume-too.patch
+sound-oxygen-work-around-mce-when-changing-volume.patch
+x86-fix-x86_model-test-in-es7000_apic_is_cluster.patch
+x86-i386-make-sure-stack-protector-segment-base-is-cache-aligned.patch
+x86-pat-fix-cacheflush-address-in-change_page_attr_set_clr.patch
+v4l-em28xx-set-up-tda9887_conf-in-em28xx_card_setup.patch
+virtio_blk-don-t-bounce-highmem-requests.patch
+libata-fix-off-by-one-error-in-ata_tf_read_block.patch
+pci-unhide-the-smbus-on-the-compaq-evo-d510-usdt.patch
--- /dev/null
+From f1bc07af9a9edc5c1d4bdd971f7099316ed2e405 Mon Sep 17 00:00:00 2001
+From: Clemens Ladisch <clemens@ladisch.de>
+Date: Mon, 7 Sep 2009 10:18:54 +0200
+Subject: sound: oxygen: work around MCE when changing volume
+
+From: Clemens Ladisch <clemens@ladisch.de>
+
+commit f1bc07af9a9edc5c1d4bdd971f7099316ed2e405 upstream.
+
+When the volume is changed continuously (e.g., when the user drags a
+volume slider with the mouse), the driver does lots of I2C writes.
+Apparently, the sound chip can get confused when we poll the I2C status
+register too much, and fails to complete a read from it. On the PCI-E
+models, the PCI-E/PCI bridge gets upset by this and generates a machine
+check exception.
+
+To avoid this, this patch replaces the polling with an unconditional
+wait that is guaranteed to be long enough.
+
+Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
+Tested-by: Johann Messner <johann.messner at jku.at>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/oxygen/oxygen_io.c | 11 +----------
+ 1 file changed, 1 insertion(+), 10 deletions(-)
+
+--- a/sound/pci/oxygen/oxygen_io.c
++++ b/sound/pci/oxygen/oxygen_io.c
+@@ -215,17 +215,8 @@ EXPORT_SYMBOL(oxygen_write_spi);
+
+ void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data)
+ {
+- unsigned long timeout;
+-
+ /* should not need more than about 300 us */
+- timeout = jiffies + msecs_to_jiffies(1);
+- do {
+- if (!(oxygen_read16(chip, OXYGEN_2WIRE_BUS_STATUS)
+- & OXYGEN_2WIRE_BUSY))
+- break;
+- udelay(1);
+- cond_resched();
+- } while (time_after_eq(timeout, jiffies));
++ msleep(1);
+
+ oxygen_write8(chip, OXYGEN_2WIRE_MAP, map);
+ oxygen_write8(chip, OXYGEN_2WIRE_DATA, data);
--- /dev/null
+From mkrufky@linuxtv.org Wed Sep 16 14:36:28 2009
+From: Michael Krufky <mkrufky@linuxtv.org>
+Date: Sat, 12 Sep 2009 10:31:05 -0400
+Subject: V4L: em28xx: set up tda9887_conf in em28xx_card_setup()
+To: stable@kernel.org
+Cc: Larry Finger <Larry.Finger@lwfinger.net>, linux-media <linux-media@vger.kernel.org>, Mauro Carvalho Chehab <mchehab@redhat.com>, Douglas Schilling Landgraf <dougsland@redhat.com>, Franklin Meng <fmeng2002@yahoo.com>
+Message-ID: <37219a840909120731j1166b2b0r8c51dc7ba8dbea6a@mail.gmail.com>
+
+From: Franklin Meng <fmeng2002@yahoo.com>
+
+V4L: em28xx: set up tda9887_conf in em28xx_card_setup()
+
+(cherry picked from commit ae3340cbf59ea362c2016eea762456cc0969fd9e)
+
+Added tda9887_conf set up into em28xx_card_setup()
+
+Signed-off-by: Franklin Meng <fmeng2002@yahoo.com>
+Signed-off-by: Douglas Schilling Landgraf <dougsland@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: Michael Krufky <mkrufky@linuxtv.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/em28xx/em28xx-cards.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/media/video/em28xx/em28xx-cards.c
++++ b/drivers/media/video/em28xx/em28xx-cards.c
+@@ -1886,6 +1886,9 @@ void em28xx_card_setup(struct em28xx *de
+ if (em28xx_boards[dev->model].tuner_addr)
+ dev->tuner_addr = em28xx_boards[dev->model].tuner_addr;
+
++ if (em28xx_boards[dev->model].tda9887_conf)
++ dev->tda9887_conf = em28xx_boards[dev->model].tda9887_conf;
++
+ /* request some modules */
+ switch (dev->model) {
+ case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
--- /dev/null
+From cebbert@redhat.com Wed Sep 16 14:36:57 2009
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 11 Sep 2009 18:49:19 -0400
+Subject: virtio_blk: don't bounce highmem requests
+To: stable@kernel.org
+Cc: Christoph Hellwig <hch@lst.de>
+Message-ID: <20090911184919.6602f379@dhcp-100-2-144.bos.redhat.com>
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 4eff3cae9c9809720c636e64bc72f212258e0bd5 upstream
+
+virtio_blk: don't bounce highmem requests
+
+By default a block driver bounces highmem requests, but virtio-blk is
+perfectly fine with any request that fit into it's 64 bit addressing scheme,
+mapped in the kernel virtual space or not.
+
+Besides improving performance on highmem systems this also makes the
+reproducible oops in __bounce_end_io go away (but hiding the real cause).
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+---
+ drivers/block/virtio_blk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -308,6 +308,9 @@ static int virtblk_probe(struct virtio_d
+ else
+ blk_queue_max_segment_size(vblk->disk->queue, -1U);
+
++ /* No need to bounce any requests */
++ blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
++
+ /* Host can optionally specify the block size of the device */
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
+ offsetof(struct virtio_blk_config, blk_size),
--- /dev/null
+From 005155b1f626d2b2d7932e4afdf4fead168c6888 Mon Sep 17 00:00:00 2001
+From: Roel Kluin <roel.kluin@gmail.com>
+Date: Tue, 25 Aug 2009 15:35:12 +0200
+Subject: x86: Fix x86_model test in es7000_apic_is_cluster()
+
+From: Roel Kluin <roel.kluin@gmail.com>
+
+commit 005155b1f626d2b2d7932e4afdf4fead168c6888 upstream.
+
+For the x86_model to be greater than 6 or less than 12 is
+logically always true.
+
+Signed-off-by: Roel Kluin <roel.kluin@gmail.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/apic/es7000_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apic/es7000_32.c
++++ b/arch/x86/kernel/apic/es7000_32.c
+@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void)
+ {
+ /* MPENTIUMIII */
+ if (boot_cpu_data.x86 == 6 &&
+- (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
++ (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11))
+ return 1;
+
+ return 0;
--- /dev/null
+From 1ea0d14e480c245683927eecc03a70faf06e80c8 Mon Sep 17 00:00:00 2001
+From: Jeremy Fitzhardinge <jeremy@goop.org>
+Date: Thu, 3 Sep 2009 12:27:15 -0700
+Subject: x86/i386: Make sure stack-protector segment base is cache aligned
+
+From: Jeremy Fitzhardinge <jeremy@goop.org>
+
+commit 1ea0d14e480c245683927eecc03a70faf06e80c8 upstream.
+
+The Intel Optimization Reference Guide says:
+
+ In Intel Atom microarchitecture, the address generation unit
+ assumes that the segment base will be 0 by default. Non-zero
+ segment base will cause load and store operations to experience
+ a delay.
+ - If the segment base isn't aligned to a cache line
+ boundary, the max throughput of memory operations is
+ reduced to one [e]very 9 cycles.
+ [...]
+ Assembly/Compiler Coding Rule 15. (H impact, ML generality)
+ For Intel Atom processors, use segments with base set to 0
+ whenever possible; avoid non-zero segment base address that is
+ not aligned to cache line boundary at all cost.
+
+We can't avoid having a non-zero base for the stack-protector
+segment, but we can make it cache-aligned.
+
+Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
+LKML-Reference: <4AA01893.6000507@goop.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/processor.h | 12 +++++++++++-
+ arch/x86/include/asm/stackprotector.h | 4 ++--
+ arch/x86/include/asm/system.h | 2 +-
+ arch/x86/kernel/cpu/common.c | 2 +-
+ arch/x86/kernel/head_32.S | 1 -
+ 5 files changed, 15 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -402,7 +402,17 @@ extern unsigned long kernel_eflags;
+ extern asmlinkage void ignore_sysret(void);
+ #else /* X86_64 */
+ #ifdef CONFIG_CC_STACKPROTECTOR
+-DECLARE_PER_CPU(unsigned long, stack_canary);
++/*
++ * Make sure stack canary segment base is cached-aligned:
++ * "For Intel Atom processors, avoid non zero segment base address
++ * that is not aligned to cache line boundary at all cost."
++ * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
++ */
++struct stack_canary {
++ char __pad[20]; /* canary at %gs:20 */
++ unsigned long canary;
++};
++DECLARE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned;
+ #endif
+ #endif /* X86_64 */
+
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -78,14 +78,14 @@ static __always_inline void boot_init_st
+ #ifdef CONFIG_X86_64
+ percpu_write(irq_stack_union.stack_canary, canary);
+ #else
+- percpu_write(stack_canary, canary);
++ percpu_write(stack_canary.canary, canary);
+ #endif
+ }
+
+ static inline void setup_stack_canary_segment(int cpu)
+ {
+ #ifdef CONFIG_X86_32
+- unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
++ unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
+ struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
+ struct desc_struct desc;
+
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct
+ "movl %P[task_canary](%[next]), %%ebx\n\t" \
+ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+ #define __switch_canary_oparam \
+- , [stack_canary] "=m" (per_cpu_var(stack_canary))
++ , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
+ #define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+ #else /* CC_STACKPROTECTOR */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1033,7 +1033,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist
+ #else /* CONFIG_X86_64 */
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+-DEFINE_PER_CPU(unsigned long, stack_canary);
++DEFINE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned;
+ #endif
+
+ /* Make sure %fs and %gs are initialized properly in idle threads */
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -442,7 +442,6 @@ is386: movl $2,%ecx # set MP
+ jne 1f
+ movl $per_cpu__gdt_page,%eax
+ movl $per_cpu__stack_canary,%ecx
+- subl $20, %ecx
+ movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
--- /dev/null
+From fa526d0d641b5365676a1fb821ce359e217c9b85 Mon Sep 17 00:00:00 2001
+From: Jack Steiner <steiner@sgi.com>
+Date: Thu, 3 Sep 2009 12:56:02 -0500
+Subject: x86, pat: Fix cacheflush address in change_page_attr_set_clr()
+
+From: Jack Steiner <steiner@sgi.com>
+
+commit fa526d0d641b5365676a1fb821ce359e217c9b85 upstream.
+
+Fix address passed to cpa_flush_range() when changing page
+attributes from WB to UC. The address (*addr) is
+modified by __change_page_attr_set_clr(). The result is that
+the pages being flushed start at the _end_ of the changed range
+instead of the beginning.
+
+This should be considered for 2.6.30-stable and 2.6.31-stable.
+
+Signed-off-by: Jack Steiner <steiner@sgi.com>
+Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/pageattr.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -807,6 +807,7 @@ static int change_page_attr_set_clr(unsi
+ {
+ struct cpa_data cpa;
+ int ret, cache, checkalias;
++ unsigned long baddr = 0;
+
+ /*
+ * Check, if we are requested to change a not supported
+@@ -838,6 +839,11 @@ static int change_page_attr_set_clr(unsi
+ */
+ WARN_ON_ONCE(1);
+ }
++ /*
++ * Save address for cache flush. *addr is modified in the call
++ * to __change_page_attr_set_clr() below.
++ */
++ baddr = *addr;
+ }
+
+ /* Must avoid aliasing mappings in the highmem code */
+@@ -892,7 +898,7 @@ static int change_page_attr_set_clr(unsi
+ cpa_flush_array(addr, numpages, cache,
+ cpa.flags, pages);
+ } else
+- cpa_flush_range(*addr, numpages, cache);
++ cpa_flush_range(baddr, numpages, cache);
+ } else
+ cpa_flush_all(cache);
+