--- /dev/null
+From c0ff17720ec5f42205b3d2ca03a18da0a8272976 Mon Sep 17 00:00:00 2001
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Thu, 16 Oct 2008 02:02:33 +0400
+Subject: ACPI: EC: Check for IBF=0 periodically if not in GPE mode
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit c0ff17720ec5f42205b3d2ca03a18da0a8272976 upstream.
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Tested-by: Alan Jenkins <alan-jenkins@tuffmail.co.uk>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/ec.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -298,6 +298,18 @@ static int ec_check_ibf0(struct acpi_ec
+ return (status & ACPI_EC_FLAG_IBF) == 0;
+ }
+
++static int ec_wait_ibf0(struct acpi_ec *ec)
++{
++ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
++ /* interrupt wait manually if GPE mode is not active */
++ unsigned long timeout = test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ?
++ msecs_to_jiffies(ACPI_EC_DELAY) : msecs_to_jiffies(1);
++ while (time_before(jiffies, delay))
++ if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), timeout))
++ return 0;
++ return -ETIME;
++}
++
+ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
+ int force_poll)
+ {
+@@ -315,8 +327,7 @@ static int acpi_ec_transaction(struct ac
+ goto unlock;
+ }
+ }
+- if (!wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+- msecs_to_jiffies(ACPI_EC_DELAY))) {
++ if (ec_wait_ibf0(ec)) {
+ pr_err(PREFIX "input buffer is not empty, "
+ "aborting transaction\n");
+ status = -ETIME;
--- /dev/null
+From 7c6db4e050601f359081fde418ca6dc4fc2d0011 Mon Sep 17 00:00:00 2001
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Thu, 25 Sep 2008 21:00:31 +0400
+Subject: ACPI: EC: do transaction from interrupt context
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit 7c6db4e050601f359081fde418ca6dc4fc2d0011 upstream.
+
+It is easier and faster to do transaction directly from interrupt context
+rather than waking control thread.
+Also, cleaner GPE storm avoidance is implemented.
+References: http://bugzilla.kernel.org/show_bug.cgi?id=9998
+ http://bugzilla.kernel.org/show_bug.cgi?id=10724
+ http://bugzilla.kernel.org/show_bug.cgi?id=10919
+ http://bugzilla.kernel.org/show_bug.cgi?id=11309
+ http://bugzilla.kernel.org/show_bug.cgi?id=11549
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Tested-by: Sitsofe Wheeler <sitsofe@yahoo.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/ec.c | 309 ++++++++++++++++++++++++++----------------------------
+ 1 file changed, 149 insertions(+), 160 deletions(-)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1,7 +1,7 @@
+ /*
+- * ec.c - ACPI Embedded Controller Driver (v2.0)
++ * ec.c - ACPI Embedded Controller Driver (v2.1)
+ *
+- * Copyright (C) 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
++ * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
+ * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
+ * Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+@@ -26,7 +26,7 @@
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+-/* Uncomment next line to get verbose print outs*/
++/* Uncomment next line to get verbose printout */
+ /* #define DEBUG */
+
+ #include <linux/kernel.h>
+@@ -38,6 +38,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/list.h>
++#include <linux/spinlock.h>
+ #include <asm/io.h>
+ #include <acpi/acpi_bus.h>
+ #include <acpi/acpi_drivers.h>
+@@ -65,22 +66,21 @@ enum ec_command {
+ ACPI_EC_COMMAND_QUERY = 0x84,
+ };
+
+-/* EC events */
+-enum ec_event {
+- ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
+- ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
+-};
+-
+ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
+
++#define ACPI_EC_STORM_THRESHOLD 20 /* number of false interrupts
++ per one transaction */
++
+ enum {
+- EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+- EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
++ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
++ * for status change */
+ EC_FLAGS_NO_GPE, /* Don't use GPE mode */
+- EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */
++ EC_FLAGS_GPE_STORM, /* GPE storm detected */
++ EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
++ * OpReg are installed */
+ };
+
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+@@ -95,6 +95,14 @@ struct acpi_ec_query_handler {
+ u8 query_bit;
+ };
+
++struct transaction_data {
++ const u8 *wdata;
++ u8 *rdata;
++ unsigned short irq_count;
++ u8 wlen;
++ u8 rlen;
++};
++
+ static struct acpi_ec {
+ acpi_handle handle;
+ unsigned long gpe;
+@@ -105,9 +113,8 @@ static struct acpi_ec {
+ struct mutex lock;
+ wait_queue_head_t wait;
+ struct list_head list;
+- struct delayed_work work;
+- atomic_t irq_count;
+- u8 handlers_installed;
++ struct transaction_data *t;
++ spinlock_t t_lock;
+ } *boot_ec, *first_ec;
+
+ /*
+@@ -150,7 +157,7 @@ static inline u8 acpi_ec_read_data(struc
+ {
+ u8 x = inb(ec->data_addr);
+ pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+- return inb(ec->data_addr);
++ return x;
+ }
+
+ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
+@@ -165,68 +172,79 @@ static inline void acpi_ec_write_data(st
+ outb(data, ec->data_addr);
+ }
+
+-static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
++static int ec_transaction_done(struct acpi_ec *ec)
+ {
+- if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags))
+- return 0;
+- if (event == ACPI_EC_EVENT_OBF_1) {
+- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
+- return 1;
+- } else if (event == ACPI_EC_EVENT_IBF_0) {
+- if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
+- return 1;
+- }
+-
+- return 0;
++ unsigned long flags;
++ int ret = 0;
++ spin_lock_irqsave(&ec->t_lock, flags);
++ if (!ec->t || (!ec->t->wlen && !ec->t->rlen))
++ ret = 1;
++ spin_unlock_irqrestore(&ec->t_lock, flags);
++ return ret;
+ }
+
+-static void ec_schedule_ec_poll(struct acpi_ec *ec)
++static void gpe_transaction(struct acpi_ec *ec, u8 status)
+ {
+- if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags))
+- schedule_delayed_work(&ec->work,
+- msecs_to_jiffies(ACPI_EC_DELAY));
++ unsigned long flags;
++ spin_lock_irqsave(&ec->t_lock, flags);
++ if (!ec->t)
++ goto unlock;
++ if (ec->t->wlen > 0) {
++ if ((status & ACPI_EC_FLAG_IBF) == 0) {
++ acpi_ec_write_data(ec, *(ec->t->wdata++));
++ --ec->t->wlen;
++ } else
++ /* false interrupt, state didn't change */
++ ++ec->t->irq_count;
++
++ } else if (ec->t->rlen > 0) {
++ if ((status & ACPI_EC_FLAG_OBF) == 1) {
++ *(ec->t->rdata++) = acpi_ec_read_data(ec);
++ --ec->t->rlen;
++ } else
++ /* false interrupt, state didn't change */
++ ++ec->t->irq_count;
++ }
++unlock:
++ spin_unlock_irqrestore(&ec->t_lock, flags);
+ }
+
+-static void ec_switch_to_poll_mode(struct acpi_ec *ec)
++static int acpi_ec_wait(struct acpi_ec *ec)
+ {
++ if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
++ msecs_to_jiffies(ACPI_EC_DELAY)))
++ return 0;
++ /* missing GPEs, switch back to poll mode */
++ if (printk_ratelimit())
++ pr_info(PREFIX "missing confirmations, "
++ "switch off interrupt mode.\n");
+ set_bit(EC_FLAGS_NO_GPE, &ec->flags);
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+- acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+- set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
++ return 1;
+ }
+
+-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
++static void acpi_ec_gpe_query(void *ec_cxt);
++
++static int ec_check_sci(struct acpi_ec *ec, u8 state)
+ {
+- atomic_set(&ec->irq_count, 0);
+- if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
+- likely(!force_poll)) {
+- if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
+- msecs_to_jiffies(ACPI_EC_DELAY)))
+- return 0;
+- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+- if (acpi_ec_check_status(ec, event)) {
+- /* missing GPEs, switch back to poll mode */
+- if (printk_ratelimit())
+- pr_info(PREFIX "missing confirmations, "
+- "switch off interrupt mode.\n");
+- ec_switch_to_poll_mode(ec);
+- ec_schedule_ec_poll(ec);
+- return 0;
+- }
+- } else {
+- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+- while (time_before(jiffies, delay)) {
+- if (acpi_ec_check_status(ec, event))
+- return 0;
+- msleep(1);
+- }
+- if (acpi_ec_check_status(ec,event))
++ if (state & ACPI_EC_FLAG_SCI) {
++ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
++ return acpi_os_execute(OSL_EC_BURST_HANDLER,
++ acpi_ec_gpe_query, ec);
++ }
++ return 0;
++}
++
++static int ec_poll(struct acpi_ec *ec)
++{
++ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
++ msleep(1);
++ while (time_before(jiffies, delay)) {
++ gpe_transaction(ec, acpi_ec_read_status(ec));
++ msleep(1);
++ if (ec_transaction_done(ec))
+ return 0;
+ }
+- pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
+- acpi_ec_read_status(ec),
+- (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
+ return -ETIME;
+ }
+
+@@ -235,45 +253,51 @@ static int acpi_ec_transaction_unlocked(
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
+ {
+- int result = 0;
+- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
++ unsigned long tmp;
++ struct transaction_data t = {.wdata = wdata, .rdata = rdata,
++ .wlen = wdata_len, .rlen = rdata_len,
++ .irq_count = 0};
++ int ret = 0;
+ pr_debug(PREFIX "transaction start\n");
++ /* disable GPE during transaction if storm is detected */
++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
++ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
++ acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
++ }
++ /* start transaction */
++ spin_lock_irqsave(&ec->t_lock, tmp);
++ /* following two actions should be kept atomic */
++ ec->t = &t;
+ acpi_ec_write_cmd(ec, command);
+- for (; wdata_len > 0; --wdata_len) {
+- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
+- if (result) {
+- pr_err(PREFIX
+- "write_cmd timeout, command = %d\n", command);
+- goto end;
+- }
+- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+- acpi_ec_write_data(ec, *(wdata++));
+- }
+-
+- if (!rdata_len) {
+- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
+- if (result) {
+- pr_err(PREFIX
+- "finish-write timeout, command = %d\n", command);
+- goto end;
+- }
+- } else if (command == ACPI_EC_COMMAND_QUERY)
++ if (command == ACPI_EC_COMMAND_QUERY)
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+-
+- for (; rdata_len > 0; --rdata_len) {
+- result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
+- if (result) {
+- pr_err(PREFIX "read timeout, command = %d\n", command);
+- goto end;
+- }
+- /* Don't expect GPE after last read */
+- if (rdata_len > 1)
+- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+- *(rdata++) = acpi_ec_read_data(ec);
+- }
+- end:
++ spin_unlock_irqrestore(&ec->t_lock, tmp);
++ /* if we selected poll mode or failed in GPE-mode do a poll loop */
++ if (force_poll ||
++ !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
++ acpi_ec_wait(ec))
++ ret = ec_poll(ec);
+ pr_debug(PREFIX "transaction end\n");
+- return result;
++ spin_lock_irqsave(&ec->t_lock, tmp);
++ ec->t = NULL;
++ spin_unlock_irqrestore(&ec->t_lock, tmp);
++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
++ /* check if we received SCI during transaction */
++ ec_check_sci(ec, acpi_ec_read_status(ec));
++ /* it is safe to enable GPE outside of transaction */
++ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
++ } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
++ t.irq_count > ACPI_EC_STORM_THRESHOLD) {
++ pr_debug(PREFIX "GPE storm detected\n");
++ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
++ }
++ return ret;
++}
++
++static int ec_check_ibf0(struct acpi_ec *ec)
++{
++ u8 status = acpi_ec_read_status(ec);
++ return (status & ACPI_EC_FLAG_IBF) == 0;
+ }
+
+ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
+@@ -283,40 +307,34 @@ static int acpi_ec_transaction(struct ac
+ {
+ int status;
+ u32 glk;
+-
+ if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
+ return -EINVAL;
+-
+ if (rdata)
+ memset(rdata, 0, rdata_len);
+-
+ mutex_lock(&ec->lock);
+ if (ec->global_lock) {
+ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
+ if (ACPI_FAILURE(status)) {
+- mutex_unlock(&ec->lock);
+- return -ENODEV;
++ status = -ENODEV;
++ goto unlock;
+ }
+ }
+-
+- status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
+- if (status) {
++ if (!wait_event_timeout(ec->wait, ec_check_ibf0(ec),
++ msecs_to_jiffies(ACPI_EC_DELAY))) {
+ pr_err(PREFIX "input buffer is not empty, "
+ "aborting transaction\n");
++ status = -ETIME;
+ goto end;
+ }
+-
+ status = acpi_ec_transaction_unlocked(ec, command,
+ wdata, wdata_len,
+ rdata, rdata_len,
+ force_poll);
+-
+- end:
+-
++end:
+ if (ec->global_lock)
+ acpi_release_global_lock(glk);
++unlock:
+ mutex_unlock(&ec->lock);
+-
+ return status;
+ }
+
+@@ -332,7 +350,9 @@ int acpi_ec_burst_enable(struct acpi_ec
+
+ int acpi_ec_burst_disable(struct acpi_ec *ec)
+ {
+- return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
++ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
++ acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE,
++ NULL, 0, NULL, 0, 0) : 0;
+ }
+
+ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
+@@ -513,46 +533,26 @@ static void acpi_ec_gpe_query(void *ec_c
+
+ static u32 acpi_ec_gpe_handler(void *data)
+ {
+- acpi_status status = AE_OK;
+ struct acpi_ec *ec = data;
+- u8 state = acpi_ec_read_status(ec);
++ u8 status;
+
+ pr_debug(PREFIX "~~~> interrupt\n");
+- atomic_inc(&ec->irq_count);
+- if (atomic_read(&ec->irq_count) > 5) {
+- pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
+- ec_switch_to_poll_mode(ec);
+- goto end;
+- }
+- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+- if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
++ status = acpi_ec_read_status(ec);
++
++ gpe_transaction(ec, status);
++ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
+ wake_up(&ec->wait);
+
+- if (state & ACPI_EC_FLAG_SCI) {
+- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+- status = acpi_os_execute(OSL_EC_BURST_HANDLER,
+- acpi_ec_gpe_query, ec);
+- } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+- !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
+- in_interrupt()) {
++ ec_check_sci(ec, status);
++ if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
++ !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
+ /* this is non-query, must be confirmation */
+ if (printk_ratelimit())
+ pr_info(PREFIX "non-query interrupt received,"
+ " switching to interrupt mode\n");
+ set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+ }
+-end:
+- ec_schedule_ec_poll(ec);
+- return ACPI_SUCCESS(status) ?
+- ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
+-}
+-
+-static void do_ec_poll(struct work_struct *work)
+-{
+- struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
+- atomic_set(&ec->irq_count, 0);
+- (void)acpi_ec_gpe_handler(ec);
++ return ACPI_INTERRUPT_HANDLED;
+ }
+
+ /* --------------------------------------------------------------------------
+@@ -696,8 +696,7 @@ static struct acpi_ec *make_acpi_ec(void
+ mutex_init(&ec->lock);
+ init_waitqueue_head(&ec->wait);
+ INIT_LIST_HEAD(&ec->list);
+- INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll);
+- atomic_set(&ec->irq_count, 0);
++ spin_lock_init(&ec->t_lock);
+ return ec;
+ }
+
+@@ -736,22 +735,15 @@ ec_parse_device(acpi_handle handle, u32
+ return AE_CTRL_TERMINATE;
+ }
+
+-static void ec_poll_stop(struct acpi_ec *ec)
+-{
+- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+- cancel_delayed_work(&ec->work);
+-}
+-
+ static void ec_remove_handlers(struct acpi_ec *ec)
+ {
+- ec_poll_stop(ec);
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ pr_err(PREFIX "failed to remove space handler\n");
+ if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
+ pr_err(PREFIX "failed to remove gpe handler\n");
+- ec->handlers_installed = 0;
++ clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+ }
+
+ static int acpi_ec_add(struct acpi_device *device)
+@@ -846,17 +838,15 @@ ec_parse_io_ports(struct acpi_resource *
+ static int ec_install_handlers(struct acpi_ec *ec)
+ {
+ acpi_status status;
+- if (ec->handlers_installed)
++ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
+ return 0;
+ status = acpi_install_gpe_handler(NULL, ec->gpe,
+- ACPI_GPE_EDGE_TRIGGERED,
+- &acpi_ec_gpe_handler, ec);
++ ACPI_GPE_EDGE_TRIGGERED,
++ &acpi_ec_gpe_handler, ec);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+-
+ acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
+ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+-
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+@@ -866,7 +856,7 @@ static int ec_install_handlers(struct ac
+ return -ENODEV;
+ }
+
+- ec->handlers_installed = 1;
++ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+ return 0;
+ }
+
+@@ -887,7 +877,6 @@ static int acpi_ec_start(struct acpi_dev
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+- ec_schedule_ec_poll(ec);
+ return ret;
+ }
+
+@@ -906,7 +895,7 @@ static int acpi_ec_stop(struct acpi_devi
+
+ int __init acpi_boot_ec_enable(void)
+ {
+- if (!boot_ec || boot_ec->handlers_installed)
++ if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
+ return 0;
+ if (!ec_install_handlers(boot_ec)) {
+ first_ec = boot_ec;
--- /dev/null
+From 8463200a00fe2aea938b40173198a0983f2929ef Mon Sep 17 00:00:00 2001
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+Date: Fri, 26 Sep 2008 00:54:28 +0400
+Subject: ACPI: EC: Rename some variables
+
+From: Alexey Starikovskiy <astarikovskiy@suse.de>
+
+commit 8463200a00fe2aea938b40173198a0983f2929ef upstream
+(needed by the next patch)
+
+No functional changes.
+
+Signed-off-by: Alexey Starikovskiy <astarikovskiy@suse.de>
+Acked-by: Rafael J. Wysocki <rjw@suse.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/ec.c | 118 ++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 63 insertions(+), 55 deletions(-)
+
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -95,10 +95,11 @@ struct acpi_ec_query_handler {
+ u8 query_bit;
+ };
+
+-struct transaction_data {
++struct transaction {
+ const u8 *wdata;
+ u8 *rdata;
+ unsigned short irq_count;
++ u8 command;
+ u8 wlen;
+ u8 rlen;
+ };
+@@ -113,8 +114,8 @@ static struct acpi_ec {
+ struct mutex lock;
+ wait_queue_head_t wait;
+ struct list_head list;
+- struct transaction_data *t;
+- spinlock_t t_lock;
++ struct transaction *curr;
++ spinlock_t curr_lock;
+ } *boot_ec, *first_ec;
+
+ /*
+@@ -176,37 +177,37 @@ static int ec_transaction_done(struct ac
+ {
+ unsigned long flags;
+ int ret = 0;
+- spin_lock_irqsave(&ec->t_lock, flags);
+- if (!ec->t || (!ec->t->wlen && !ec->t->rlen))
++ spin_lock_irqsave(&ec->curr_lock, flags);
++ if (!ec->curr || (!ec->curr->wlen && !ec->curr->rlen))
+ ret = 1;
+- spin_unlock_irqrestore(&ec->t_lock, flags);
++ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ return ret;
+ }
+
+ static void gpe_transaction(struct acpi_ec *ec, u8 status)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&ec->t_lock, flags);
+- if (!ec->t)
++ spin_lock_irqsave(&ec->curr_lock, flags);
++ if (!ec->curr)
+ goto unlock;
+- if (ec->t->wlen > 0) {
++ if (ec->curr->wlen > 0) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0) {
+- acpi_ec_write_data(ec, *(ec->t->wdata++));
+- --ec->t->wlen;
++ acpi_ec_write_data(ec, *(ec->curr->wdata++));
++ --ec->curr->wlen;
+ } else
+ /* false interrupt, state didn't change */
+- ++ec->t->irq_count;
++ ++ec->curr->irq_count;
+
+- } else if (ec->t->rlen > 0) {
++ } else if (ec->curr->rlen > 0) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+- *(ec->t->rdata++) = acpi_ec_read_data(ec);
+- --ec->t->rlen;
++ *(ec->curr->rdata++) = acpi_ec_read_data(ec);
++ --ec->curr->rlen;
+ } else
+ /* false interrupt, state didn't change */
+- ++ec->t->irq_count;
++ ++ec->curr->irq_count;
+ }
+ unlock:
+- spin_unlock_irqrestore(&ec->t_lock, flags);
++ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ }
+
+ static int acpi_ec_wait(struct acpi_ec *ec)
+@@ -248,15 +249,11 @@ static int ec_poll(struct acpi_ec *ec)
+ return -ETIME;
+ }
+
+-static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
+- const u8 * wdata, unsigned wdata_len,
+- u8 * rdata, unsigned rdata_len,
++static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
++ struct transaction *t,
+ int force_poll)
+ {
+ unsigned long tmp;
+- struct transaction_data t = {.wdata = wdata, .rdata = rdata,
+- .wlen = wdata_len, .rlen = rdata_len,
+- .irq_count = 0};
+ int ret = 0;
+ pr_debug(PREFIX "transaction start\n");
+ /* disable GPE during transaction if storm is detected */
+@@ -265,29 +262,30 @@ static int acpi_ec_transaction_unlocked(
+ acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+ }
+ /* start transaction */
+- spin_lock_irqsave(&ec->t_lock, tmp);
++ spin_lock_irqsave(&ec->curr_lock, tmp);
+ /* following two actions should be kept atomic */
+- ec->t = &t;
+- acpi_ec_write_cmd(ec, command);
+- if (command == ACPI_EC_COMMAND_QUERY)
++ t->irq_count = 0;
++ ec->curr = t;
++ acpi_ec_write_cmd(ec, ec->curr->command);
++ if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+- spin_unlock_irqrestore(&ec->t_lock, tmp);
++ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ /* if we selected poll mode or failed in GPE-mode do a poll loop */
+ if (force_poll ||
+ !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
+ acpi_ec_wait(ec))
+ ret = ec_poll(ec);
+ pr_debug(PREFIX "transaction end\n");
+- spin_lock_irqsave(&ec->t_lock, tmp);
+- ec->t = NULL;
+- spin_unlock_irqrestore(&ec->t_lock, tmp);
++ spin_lock_irqsave(&ec->curr_lock, tmp);
++ ec->curr = NULL;
++ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ /* check if we received SCI during transaction */
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+ } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+- t.irq_count > ACPI_EC_STORM_THRESHOLD) {
++ t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_debug(PREFIX "GPE storm detected\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+@@ -300,17 +298,15 @@ static int ec_check_ibf0(struct acpi_ec
+ return (status & ACPI_EC_FLAG_IBF) == 0;
+ }
+
+-static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
+- const u8 * wdata, unsigned wdata_len,
+- u8 * rdata, unsigned rdata_len,
++static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
+ int force_poll)
+ {
+ int status;
+ u32 glk;
+- if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
++ if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
+ return -EINVAL;
+- if (rdata)
+- memset(rdata, 0, rdata_len);
++ if (t->rdata)
++ memset(t->rdata, 0, t->rlen);
+ mutex_lock(&ec->lock);
+ if (ec->global_lock) {
+ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
+@@ -326,10 +322,7 @@ static int acpi_ec_transaction(struct ac
+ status = -ETIME;
+ goto end;
+ }
+- status = acpi_ec_transaction_unlocked(ec, command,
+- wdata, wdata_len,
+- rdata, rdata_len,
+- force_poll);
++ status = acpi_ec_transaction_unlocked(ec, t, force_poll);
+ end:
+ if (ec->global_lock)
+ acpi_release_global_lock(glk);
+@@ -345,23 +338,32 @@ unlock:
+ int acpi_ec_burst_enable(struct acpi_ec *ec)
+ {
+ u8 d;
+- return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
++ struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
++ .wdata = NULL, .rdata = &d,
++ .wlen = 0, .rlen = 1};
++
++ return acpi_ec_transaction(ec, &t, 0);
+ }
+
+ int acpi_ec_burst_disable(struct acpi_ec *ec)
+ {
++ struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
++ .wdata = NULL, .rdata = NULL,
++ .wlen = 0, .rlen = 0};
++
+ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
+- acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE,
+- NULL, 0, NULL, 0, 0) : 0;
++ acpi_ec_transaction(ec, &t, 0) : 0;
+ }
+
+ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
+ {
+ int result;
+ u8 d;
++ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
++ .wdata = &address, .rdata = &d,
++ .wlen = 1, .rlen = 1};
+
+- result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
+- &address, 1, &d, 1, 0);
++ result = acpi_ec_transaction(ec, &t, 0);
+ *data = d;
+ return result;
+ }
+@@ -369,8 +371,11 @@ static int acpi_ec_read(struct acpi_ec *
+ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
+ {
+ u8 wdata[2] = { address, data };
+- return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
+- wdata, 2, NULL, 0, 0);
++ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
++ .wdata = wdata, .rdata = NULL,
++ .wlen = 2, .rlen = 0};
++
++ return acpi_ec_transaction(ec, &t, 0);
+ }
+
+ /*
+@@ -432,12 +437,13 @@ int ec_transaction(u8 command,
+ u8 * rdata, unsigned rdata_len,
+ int force_poll)
+ {
++ struct transaction t = {.command = command,
++ .wdata = wdata, .rdata = rdata,
++ .wlen = wdata_len, .rlen = rdata_len};
+ if (!first_ec)
+ return -ENODEV;
+
+- return acpi_ec_transaction(first_ec, command, wdata,
+- wdata_len, rdata, rdata_len,
+- force_poll);
++ return acpi_ec_transaction(first_ec, &t, force_poll);
+ }
+
+ EXPORT_SYMBOL(ec_transaction);
+@@ -446,7 +452,9 @@ static int acpi_ec_query(struct acpi_ec
+ {
+ int result;
+ u8 d;
+-
++ struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
++ .wdata = NULL, .rdata = &d,
++ .wlen = 0, .rlen = 1};
+ if (!ec || !data)
+ return -EINVAL;
+
+@@ -456,7 +464,7 @@ static int acpi_ec_query(struct acpi_ec
+ * bit to be cleared (and thus clearing the interrupt source).
+ */
+
+- result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
++ result = acpi_ec_transaction(ec, &t, 0);
+ if (result)
+ return result;
+
+@@ -696,7 +704,7 @@ static struct acpi_ec *make_acpi_ec(void
+ mutex_init(&ec->lock);
+ init_waitqueue_head(&ec->wait);
+ INIT_LIST_HEAD(&ec->list);
+- spin_lock_init(&ec->t_lock);
++ spin_lock_init(&ec->curr_lock);
+ return ec;
+ }
+
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:29:45 2008
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Date: Sun, 26 Oct 2008 18:56:04 -0400
+Subject: rtc-cmos: look for PNP RTC first, then for platform RTC
+To: stable@kernel.org
+Cc: David Brownell <dbrownell@users.sourceforge.net>, Bjorn Helgaas <bjorn.helgaas@hp.com>
+Message-ID: <20081026185604.7c631e52@redhat.com>
+
+
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+
+commit 72f22b1eb6ca5e4676a632a04d40d46cb61d4562 upstream
+
+rtc-cmos: look for PNP RTC first, then for platform RTC
+
+We shouldn't rely on "pnp_platform_devices" to tell us whether there
+is a PNP RTC device.
+
+I introduced "pnp_platform_devices", but I think it was a mistake.
+All it tells us is whether we found any PNPBIOS or PNPACPI devices.
+Many machines have some PNP devices, but do not describe the RTC
+via PNP. On those machines, we need to do the platform driver probe
+to find the RTC.
+
+We should just register the PNP driver and see whether it claims anything.
+If we don't find a PNP RTC, fall back to the platform driver probe.
+
+This (in conjunction with the arch/x86/kernel/rtc.c patch to add
+a platform RTC device when PNP doesn't have one) should resolve
+these issues:
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=11580
+ https://bugzilla.redhat.com/show_bug.cgi?id=451188
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: David Brownell <dbrownell@users.sourceforge.net>
+Reported-by: Rik Theys <rik.theys@esat.kuleuven.be>
+Reported-by: shr_msn@yahoo.com.tw
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/rtc/rtc-cmos.c | 33 ++++++++++++++++++---------------
+ 1 file changed, 18 insertions(+), 15 deletions(-)
+
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -1030,29 +1030,32 @@ static struct platform_driver cmos_platf
+
+ static int __init cmos_init(void)
+ {
++ int retval = 0;
++
++#ifdef CONFIG_PNP
++ pnp_register_driver(&cmos_pnp_driver);
++#endif
++
++ if (!cmos_rtc.dev)
++ retval = platform_driver_probe(&cmos_platform_driver,
++ cmos_platform_probe);
++
++ if (retval == 0)
++ return 0;
++
+ #ifdef CONFIG_PNP
+- if (pnp_platform_devices)
+- return pnp_register_driver(&cmos_pnp_driver);
+- else
+- return platform_driver_probe(&cmos_platform_driver,
+- cmos_platform_probe);
+-#else
+- return platform_driver_probe(&cmos_platform_driver,
+- cmos_platform_probe);
+-#endif /* CONFIG_PNP */
++ pnp_unregister_driver(&cmos_pnp_driver);
++#endif
++ return retval;
+ }
+ module_init(cmos_init);
+
+ static void __exit cmos_exit(void)
+ {
+ #ifdef CONFIG_PNP
+- if (pnp_platform_devices)
+- pnp_unregister_driver(&cmos_pnp_driver);
+- else
+- platform_driver_unregister(&cmos_platform_driver);
+-#else
++ pnp_unregister_driver(&cmos_pnp_driver);
++#endif
+ platform_driver_unregister(&cmos_platform_driver);
+-#endif /* CONFIG_PNP */
+ }
+ module_exit(cmos_exit);
+
--- /dev/null
+From 3c324283e6cdb79210cf7975c3e40d3ba3e672b2 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 3 Nov 2008 12:37:49 +0900
+Subject: sata_nv: fix generic, nf2/3 detection regression
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 3c324283e6cdb79210cf7975c3e40d3ba3e672b2 upstream
+
+All three flavors of sata_nv's are different in how their hardreset
+behaves.
+
+* generic: Hardreset is not reliable. Link often doesn't come online
+ after hardreset.
+
+* nf2/3: A little bit better - link comes online with longer debounce
+ timing. However, nf2/3 can't reliable wait for the first D2H
+ Register FIS, so it can't wait for device readiness or classify the
+ device after hardreset. Follow-up SRST required.
+
+* ck804: Hardreset finally works.
+
+The core layer change to prefer hardreset and follow up changes
+exposed the above issues and caused various detection regressions for
+all three flavors. This patch, hopefully, fixes all the known issues
+and should make sata_nv error handling more reliable.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/sata_nv.c | 53 +++++++++++++++++++++++---------------------------
+ 1 file changed, 25 insertions(+), 28 deletions(-)
+
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -307,10 +307,10 @@ static int nv_scr_write(struct ata_port
+
+ static void nv_nf2_freeze(struct ata_port *ap);
+ static void nv_nf2_thaw(struct ata_port *ap);
++static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
+ static void nv_ck804_freeze(struct ata_port *ap);
+ static void nv_ck804_thaw(struct ata_port *ap);
+-static int nv_hardreset(struct ata_link *link, unsigned int *class,
+- unsigned long deadline);
+ static int nv_adma_slave_config(struct scsi_device *sdev);
+ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+ static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+@@ -405,17 +405,8 @@ static struct scsi_host_template nv_swnc
+ .slave_configure = nv_swncq_slave_config,
+ };
+
+-/* OSDL bz3352 reports that some nv controllers can't determine device
+- * signature reliably and nv_hardreset is implemented to work around
+- * the problem. This was reported on nf3 and it's unclear whether any
+- * other controllers are affected. However, the workaround has been
+- * applied to all variants and there isn't much to gain by trying to
+- * find out exactly which ones are affected at this point especially
+- * because NV has moved over to ahci for newer controllers.
+- */
+ static struct ata_port_operations nv_common_ops = {
+ .inherits = &ata_bmdma_port_ops,
+- .hardreset = nv_hardreset,
+ .scr_read = nv_scr_read,
+ .scr_write = nv_scr_write,
+ };
+@@ -429,12 +420,22 @@ static struct ata_port_operations nv_gen
+ .hardreset = ATA_OP_NULL,
+ };
+
++/* OSDL bz3352 reports that nf2/3 controllers can't determine device
++ * signature reliably. Also, the following thread reports detection
++ * failure on cold boot with the standard debouncing timing.
++ *
++ * http://thread.gmane.org/gmane.linux.ide/34098
++ *
++ * Debounce with hotplug timing and request follow-up SRST.
++ */
+ static struct ata_port_operations nv_nf2_ops = {
+ .inherits = &nv_common_ops,
+ .freeze = nv_nf2_freeze,
+ .thaw = nv_nf2_thaw,
++ .hardreset = nv_nf2_hardreset,
+ };
+
++/* CK804 finally gets hardreset right */
+ static struct ata_port_operations nv_ck804_ops = {
+ .inherits = &nv_common_ops,
+ .freeze = nv_ck804_freeze,
+@@ -443,7 +444,7 @@ static struct ata_port_operations nv_ck8
+ };
+
+ static struct ata_port_operations nv_adma_ops = {
+- .inherits = &nv_common_ops,
++ .inherits = &nv_ck804_ops,
+
+ .check_atapi_dma = nv_adma_check_atapi_dma,
+ .sff_tf_read = nv_adma_tf_read,
+@@ -467,7 +468,7 @@ static struct ata_port_operations nv_adm
+ };
+
+ static struct ata_port_operations nv_swncq_ops = {
+- .inherits = &nv_common_ops,
++ .inherits = &nv_generic_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .qc_prep = nv_swncq_qc_prep,
+@@ -1553,6 +1554,17 @@ static void nv_nf2_thaw(struct ata_port
+ iowrite8(mask, scr_addr + NV_INT_ENABLE);
+ }
+
++static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ bool online;
++ int rc;
++
++ rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
++ &online, NULL);
++ return online ? -EAGAIN : rc;
++}
++
+ static void nv_ck804_freeze(struct ata_port *ap)
+ {
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+@@ -1605,21 +1617,6 @@ static void nv_mcp55_thaw(struct ata_por
+ ata_sff_thaw(ap);
+ }
+
+-static int nv_hardreset(struct ata_link *link, unsigned int *class,
+- unsigned long deadline)
+-{
+- int rc;
+-
+- /* SATA hardreset fails to retrieve proper device signature on
+- * some controllers. Request follow up SRST. For more info,
+- * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
+- */
+- rc = sata_sff_hardreset(link, class, deadline);
+- if (rc)
+- return rc;
+- return -EAGAIN;
+-}
+-
+ static void nv_adma_error_handler(struct ata_port *ap)
+ {
+ struct nv_adma_port_priv *pp = ap->private_data;
--- /dev/null
+From cadef677e4a9b9c1d069675043767df486782986 Mon Sep 17 00:00:00 2001
+From: Mikael Pettersson <mikpe@it.uu.se>
+Date: Fri, 31 Oct 2008 08:03:55 +0100
+Subject: sata_promise: add ATA engine reset to reset ops
+
+From: Mikael Pettersson <mikpe@it.uu.se>
+
+commit cadef677e4a9b9c1d069675043767df486782986 upstream
+
+Promise ATA engines need to be reset when errors occur.
+That's currently done for errors detected by sata_promise itself,
+but it's not done for errors like timeouts detected outside of
+the low-level driver.
+
+The effect of this omission is that a timeout tends to result
+in a sequence of failed COMRESETs after which libata EH gives
+up and disables the port. At that point the port's ATA engine
+hangs and even reloading the driver will not resume it.
+
+To fix this, make sata_promise override ->hardreset on SATA
+ports with code which calls pdc_reset_port() on the port in
+question before calling libata's hardreset. PATA ports don't
+use ->hardreset, so for those we override ->softreset instead.
+
+Signed-off-by: Mikael Pettersson <mikpe@it.uu.se>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/sata_promise.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -153,6 +153,10 @@ static void pdc_freeze(struct ata_port *
+ static void pdc_sata_freeze(struct ata_port *ap);
+ static void pdc_thaw(struct ata_port *ap);
+ static void pdc_sata_thaw(struct ata_port *ap);
++static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
++static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline);
+ static void pdc_error_handler(struct ata_port *ap);
+ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
+ static int pdc_pata_cable_detect(struct ata_port *ap);
+@@ -186,6 +190,7 @@ static struct ata_port_operations pdc_sa
+ .scr_read = pdc_sata_scr_read,
+ .scr_write = pdc_sata_scr_write,
+ .port_start = pdc_sata_port_start,
++ .hardreset = pdc_sata_hardreset,
+ };
+
+ /* First-generation chips need a more restrictive ->check_atapi_dma op */
+@@ -200,6 +205,7 @@ static struct ata_port_operations pdc_pa
+ .freeze = pdc_freeze,
+ .thaw = pdc_thaw,
+ .port_start = pdc_common_port_start,
++ .softreset = pdc_pata_softreset,
+ };
+
+ static const struct ata_port_info pdc_port_info[] = {
+@@ -691,6 +697,20 @@ static void pdc_sata_thaw(struct ata_por
+ readl(host_mmio + hotplug_offset); /* flush */
+ }
+
++static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ pdc_reset_port(link->ap);
++ return ata_sff_softreset(link, class, deadline);
++}
++
++static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
++ unsigned long deadline)
++{
++ pdc_reset_port(link->ap);
++ return sata_sff_hardreset(link, class, deadline);
++}
++
+ static void pdc_error_handler(struct ata_port *ap)
+ {
+ if (!(ap->pflags & ATA_PFLAG_FROZEN))
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:25:46 2008
+From: Ingo Molnar <mingo@elte.hu>
+Date: Sun, 26 Oct 2008 18:21:40 -0400
+Subject: sched: disable the hrtick for now
+To: stable@kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>, Ingo Molnar <mingo@elte.hu>
+Message-ID: <20081026182140.371373c0@redhat.com>
+
+
+From: Ingo Molnar <mingo@elte.hu>
+
+commit 0c4b83da58ec2e96ce9c44c211d6eac5f9dae478 upstream
+
+sched: disable the hrtick for now
+
+David Miller reported that hrtick update overhead has tripled the
+wakeup overhead on Sparc64.
+
+That is too much - disable the HRTICK feature for now by default,
+until a faster implementation is found.
+
+Reported-by: David Miller <davem@davemloft.net>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched_features.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched_features.h
++++ b/kernel/sched_features.h
+@@ -5,7 +5,7 @@ SCHED_FEAT(START_DEBIT, 1)
+ SCHED_FEAT(AFFINE_WAKEUPS, 1)
+ SCHED_FEAT(CACHE_HOT_BUDDY, 1)
+ SCHED_FEAT(SYNC_WAKEUPS, 1)
+-SCHED_FEAT(HRTICK, 1)
++SCHED_FEAT(HRTICK, 0)
+ SCHED_FEAT(DOUBLE_TICK, 0)
+ SCHED_FEAT(ASYM_GRAN, 1)
+ SCHED_FEAT(LB_BIAS, 1)
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:26:49 2008
+From: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+Date: Sun, 26 Oct 2008 18:20:14 -0400
+Subject: sched_clock: prevent scd->clock from moving backwards
+To: stable@kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>, Molnar <mingo@elte.hu>, Ingo@hera.kernel.org
+Message-ID: <20081026182014.3706d944@redhat.com>
+
+From: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+
+commit 5b7dba4ff834259a5623e03a565748704a8fe449 upstream
+
+sched_clock: prevent scd->clock from moving backwards
+
+When sched_clock_cpu() couples the clocks between two cpus, it may
+increment scd->clock beyond the GTOD tick window that __update_sched_clock()
+uses to clamp the clock. A later call to __update_sched_clock() may move
+the clock back to scd->tick_gtod + TICK_NSEC, violating the clock's
+monotonic property.
+
+This patch ensures that scd->clock will not be set backward.
+
+Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/sched_clock.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched_clock.c
++++ b/kernel/sched_clock.c
+@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct s
+
+ /*
+ * scd->clock = clamp(scd->tick_gtod + delta,
+- * max(scd->tick_gtod, scd->clock),
+- * scd->tick_gtod + TICK_NSEC);
++ * max(scd->tick_gtod, scd->clock),
++ * max(scd->clock, scd->tick_gtod + TICK_NSEC));
+ */
+
+ clock = scd->tick_gtod + delta;
+ min_clock = wrap_max(scd->tick_gtod, scd->clock);
+- max_clock = scd->tick_gtod + TICK_NSEC;
++ max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
+
+ clock = wrap_max(clock, min_clock);
+ clock = wrap_min(clock, max_clock);
dvb-s5h1411-perform-s5h1411-soft-reset-after-tuning.patch
dvb-s5h1411-power-down-s5h1411-when-not-in-use.patch
pci-fix-64-vbit-prefetchable-memory-resource-bars.patch
+sched-disable-the-hrtick-for-now.patch
+sched_clock-prevent-scd-clock-from-moving-backwards.patch
+x86-avoid-dereferencing-beyond-stack-thread_size.patch
+rtc-cmos-look-for-pnp-rtc-first-then-for-platform-rtc.patch
+usb-storage-avoid-i-o-errors-when-issuing-scsi-ioctls-to-jmicron-usb-ata-bridge.patch
+x86-register-a-platform-rtc-device-if-pnp-doesn-t-describe-it.patch
+sata_promise-add-ata-engine-reset-to-reset-ops.patch
+sata_nv-fix-generic-nf2-3-detection-regression.patch
+acpi-ec-do-transaction-from-interrupt-context.patch
+acpi-ec-rename-some-variables.patch
+acpi-ec-check-for-ibf-0-periodically-if-not-in-gpe-mode.patch
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:30:30 2008
+From: Phil Dibowitz <phil@ipom.com>
+Date: Sun, 26 Oct 2008 18:25:10 -0400
+Subject: USB: storage: Avoid I/O errors when issuing SCSI ioctls to JMicron USB/ATA bridge
+To: stable@kernel.org
+Message-ID: <20081026182510.5a42b391@redhat.com>
+
+From: Phil Dibowitz <phil@ipom.com>
+
+commit 3030ca4cf4abbdd2dd850a14d20e9fca5937ffb5 upstream
+
+USB: storage: Avoid I/O errors when issuing SCSI ioctls to JMicron USB/ATA bridge
+
+Here's the patch that implements the fix you suggested to avoid the
+I/O errors that I was running into with my new USB enclosure with a
+JMicron USB/ATA bridge, while issuing scsi-io USN or other such
+queries used by Fedora's mkinitrd.
+http://bugzilla.kernel.org/show_bug.cgi?id=9638#c85
+
+
+/proc/bus/usb/devices:
+T: Bus=01 Lev=01 Prnt=01 Port=07 Cnt=04 Dev#= 5 Spd=480 MxCh= 0
+D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=152d ProdID=2329 Rev= 1.00
+S: Manufacturer=JMicron
+S: Product=USB to ATA/ATAPI Bridge
+S: SerialNumber=DE5088854FFF
+C:* #Ifs= 1 Cfg#= 1 Atr=c0 MxPwr= 2mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=usb-storage
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+(patch applied and retested on a modified 2.6.27.2-libre.24.rc1.fc10)
+
+Signed-off-by: Phil Dibowitz <phil@ipom.com>
+Cc: Alexandre Oliva <oliva@lsd.ic.unicamp.br>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/storage/unusual_devs.h | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1745,6 +1745,15 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
++/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
++ * JMicron responds to USN and several other SCSI ioctls with a
++ * residue that causes subsequent I/O requests to fail. */
++UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
++ "JMicron",
++ "USB to ATA/ATAPI Bridge",
++ US_SC_DEVICE, US_PR_DEVICE, NULL,
++ US_FL_IGNORE_RESIDUE ),
++
+ /* Reported by Robert Schedel <r.schedel@yahoo.de>
+ * Note: this is a 'super top' device like the above 14cd/6600 device */
+ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:28:40 2008
+From: David Rientjes <rientjes@google.com>
+Date: Sun, 26 Oct 2008 18:13:59 -0400
+Subject: x86: avoid dereferencing beyond stack + THREAD_SIZE
+To: stable@kernel.org
+Cc: Ingo Molnar <mingo@elte.hu>
+Message-ID: <20081026181359.246d413d@redhat.com>
+
+
+From: David Rientjes <rientjes@google.com>
+
+commit e1e23bb0513520035ec934fa3483507cb6648b7c upstream
+
+x86: avoid dereferencing beyond stack + THREAD_SIZE
+
+It's possible for get_wchan() to dereference past task->stack + THREAD_SIZE
+while iterating through instruction pointers if fp equals the upper boundary,
+causing a kernel panic.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/process_64.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -729,12 +729,12 @@ unsigned long get_wchan(struct task_stru
+ if (!p || p == current || p->state==TASK_RUNNING)
+ return 0;
+ stack = (unsigned long)task_stack_page(p);
+- if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
++ if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+ return 0;
+ fp = *(u64 *)(p->thread.sp);
+ do {
+ if (fp < (unsigned long)stack ||
+- fp > (unsigned long)stack+THREAD_SIZE)
++ fp >= (unsigned long)stack+THREAD_SIZE)
+ return 0;
+ ip = *(u64 *)(fp+8);
+ if (!in_sched_functions(ip))
--- /dev/null
+From cebbert@redhat.com Tue Nov 4 14:31:31 2008
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Date: Sun, 26 Oct 2008 18:57:33 -0400
+Subject: x86: register a platform RTC device if PNP doesn't describe it
+To: stable@kernel.org
+Cc: David Brownell <dbrownell@users.sourceforge.net>, Bjorn Helgaas <bjorn.helgaas@hp.com>
+Message-ID: <20081026185733.6ad3746f@redhat.com>
+
+From: Bjorn Helgaas <bjorn.helgaas@hp.com>
+
+commit 758a7f7bb86b520aadc484f23da85e547b3bf3d8 upstream
+
+x86: register a platform RTC device if PNP doesn't describe it
+
+Most if not all x86 platforms have an RTC device, but sometimes the RTC
+is not exposed as a PNP0b00/PNP0b01/PNP0b02 device in PNPBIOS or ACPI:
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=11580
+ https://bugzilla.redhat.com/show_bug.cgi?id=451188
+
+It's best if we can discover the RTC via PNP because then we know
+which flavor of device it is, where it lives, and which IRQ it uses.
+
+But if we can't, we should register a platform device using the
+compiled-in RTC_PORT/RTC_IRQ resource assumptions.
+
+Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
+Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
+Acked-by: David Brownell <dbrownell@users.sourceforge.net>
+Reported-by: Rik Theys <rik.theys@esat.kuleuven.be>
+Reported-by: shr_msn@yahoo.com.tw
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/rtc.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -223,11 +223,25 @@ static struct platform_device rtc_device
+ static __init int add_rtc_cmos(void)
+ {
+ #ifdef CONFIG_PNP
+- if (!pnp_platform_devices)
+- platform_device_register(&rtc_device);
+-#else
++ static const char *ids[] __initconst =
++ { "PNP0b00", "PNP0b01", "PNP0b02", };
++ struct pnp_dev *dev;
++ struct pnp_id *id;
++ int i;
++
++ pnp_for_each_dev(dev) {
++ for (id = dev->id; id; id = id->next) {
++ for (i = 0; i < ARRAY_SIZE(ids); i++) {
++ if (compare_pnp_id(id, ids[i]) != 0)
++ return 0;
++ }
++ }
++ }
++#endif
++
+ platform_device_register(&rtc_device);
+-#endif /* CONFIG_PNP */
++ dev_info(&rtc_device.dev,
++ "registered platform RTC device (no PNP device found)\n");
+ return 0;
+ }
+ device_initcall(add_rtc_cmos);