--- /dev/null
+From 40599072dca3ec7d4c9ff8271978be169f974638 Mon Sep 17 00:00:00 2001
+From: Pavel Machek <pavel@suse.cz>
+Date: Tue, 25 Nov 2008 12:05:08 +0100
+Subject: ACPI: scheduling in atomic via acpi_evaluate_integer ()
+
+From: Pavel Machek <pavel@suse.cz>
+
+commit 40599072dca3ec7d4c9ff8271978be169f974638 upstream.
+
+Now I know why I had strange "scheduling in atomic" problems:
+acpi_evaluate_integer() does malloc(..., irqs_disabled() ? GFP_ATOMIC
+: GFP_KERNEL)... which is (of course) broken.
+
+There's no way to reliably tell if we need GFP_ATOMIC or not from
+code, this one for example fails to detect spinlocks held.
+
+Fortunately, allocation seems small enough to be done on stack.
+
+Signed-off-by: Pavel Machek <pavel@suse.cz>
+Acked-by: Bob Moore <robert.moore@intel.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/utils.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+--- a/drivers/acpi/utils.c
++++ b/drivers/acpi/utils.c
+@@ -259,34 +259,26 @@ acpi_evaluate_integer(acpi_handle handle
+ struct acpi_object_list *arguments, unsigned long *data)
+ {
+ acpi_status status = AE_OK;
+- union acpi_object *element;
++ union acpi_object element;
+ struct acpi_buffer buffer = { 0, NULL };
+
+-
+ if (!data)
+ return AE_BAD_PARAMETER;
+
+- element = kzalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
+- if (!element)
+- return AE_NO_MEMORY;
+-
+ buffer.length = sizeof(union acpi_object);
+- buffer.pointer = element;
++ buffer.pointer = &element;
+ status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_util_eval_error(handle, pathname, status);
+- kfree(element);
+ return status;
+ }
+
+- if (element->type != ACPI_TYPE_INTEGER) {
++ if (element.type != ACPI_TYPE_INTEGER) {
+ acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
+- kfree(element);
+ return AE_BAD_DATA;
+ }
+
+- *data = element->integer.value;
+- kfree(element);
++ *data = element.integer.value;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%lu]\n", *data));
+
--- /dev/null
+From 65df78473ffbf3bff5e2034df1638acc4f3ddd50 Mon Sep 17 00:00:00 2001
+From: Rafael J. Wysocki <rjw@sisk.pl>
+Date: Wed, 26 Nov 2008 17:53:13 -0500
+Subject: ACPI suspend: Blacklist boxes that require us to set SCI_EN directly on resume
+
+From: Rafael J. Wysocki <rjw@sisk.pl>
+
+commit 65df78473ffbf3bff5e2034df1638acc4f3ddd50 upstream.
+
+Some Apple boxes evidently require us to set SCI_EN on resume
+directly, because if we don't do that, they hung somewhere in the
+resume code path. Moreover, on these boxes it is not sufficient to
+use acpi_enable() to turn ACPI on during resume. All of this is
+against the ACPI specification which states that (1) the BIOS is
+supposed to return from the S3 sleep state with ACPI enabled
+(SCI_EN set) and (2) the SCI_EN bit is owned by the hardware and we
+are not supposed to change it.
+
+For this reason, blacklist the affected systems so that the SCI_EN
+bit is set during resume on them.
+
+[NOTE: Unconditional setting SCI_EN for all system on resume doesn't
+ work, because it makes some other systems crash (that's to be
+ expected). Also, it is not entirely clear right now if all of the
+ Apple boxes require this workaround.]
+
+This patch fixes the recent regression tracked as
+http://bugzilla.kernel.org/show_bug.cgi?id=12038
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Tested-by: Tino Keitel <tino.keitel@gmx.de>
+Tested-by: Bob Copeland <me@bobcopeland.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/acpi/sleep/main.c | 40 +++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 39 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/sleep/main.c
++++ b/drivers/acpi/sleep/main.c
+@@ -60,6 +60,18 @@ void __init acpi_old_suspend_ordering(vo
+ old_suspend_ordering = true;
+ }
+
++/*
++ * According to the ACPI specification the BIOS should make sure that ACPI is
++ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
++ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
++ * on such systems during resume. Unfortunately that doesn't help in
++ * particularly pathological cases in which SCI_EN has to be set directly on
++ * resume, although the specification states very clearly that this flag is
++ * owned by the hardware. The set_sci_en_on_resume variable will be set in such
++ * cases.
++ */
++static bool set_sci_en_on_resume;
++
+ /**
+ * acpi_pm_disable_gpes - Disable the GPEs.
+ */
+@@ -201,7 +213,11 @@ static int acpi_suspend_enter(suspend_st
+ }
+
+ /* If ACPI is not enabled by the BIOS, we need to enable it here. */
+- acpi_enable();
++ if (set_sci_en_on_resume)
++ acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
++ else
++ acpi_enable();
++
+ /* Reprogram control registers and execute _BFS */
+ acpi_leave_sleep_state_prep(acpi_state);
+
+@@ -289,6 +305,12 @@ static int __init init_old_suspend_order
+ return 0;
+ }
+
++static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
++{
++ set_sci_en_on_resume = true;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ {
+ .callback = init_old_suspend_ordering,
+@@ -306,6 +328,22 @@ static struct dmi_system_id __initdata a
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+ },
+ },
++ {
++ .callback = init_set_sci_en_on_resume,
++ .ident = "Apple MacBook 1,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
++ },
++ },
++ {
++ .callback = init_set_sci_en_on_resume,
++ .ident = "Apple MacMini 1,1",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
++ },
++ },
+ {},
+ };
+ #endif /* CONFIG_SUSPEND */
--- /dev/null
+From f2f1fa78a155524b849edf359e42a3001ea652c0 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 5 Dec 2008 14:49:18 -0800
+Subject: Enforce a minimum SG_IO timeout
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit f2f1fa78a155524b849edf359e42a3001ea652c0 upstream.
+
+There's no point in having too short SG_IO timeouts, since if the
+command does end up timing out, we'll end up through the reset sequence
+that is several seconds long in order to abort the command that timed
+out.
+
+As a result, shorter timeouts than a few seconds simply do not make
+sense, as the recovery would be longer than the timeout itself.
+
+Add a BLK_MIN_SG_TIMEOUT to match the existign BLK_DEFAULT_SG_TIMEOUT.
+
+Suggested-by: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Acked-by: Tejun Heo <tj@kernel.org>
+Acked-by: Jens Axboe <jens.axboe@oracle.com>
+Cc: Jeff Garzik <jeff@garzik.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/bsg.c | 2 ++
+ block/scsi_ioctl.c | 2 ++
+ include/linux/blkdev.h | 1 +
+ 3 files changed, 5 insertions(+)
+
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct r
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
++ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
++ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+ }
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct requ
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
++ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
++ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+ }
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -623,6 +623,7 @@ extern unsigned long blk_max_low_pfn, bl
+ * default timeout for SG_IO if none specified
+ */
+ #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
++#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+
+ #ifdef CONFIG_BOUNCE
+ extern int init_emergency_isa_pool(void);
--- /dev/null
+From 218d11a8b071b23b76c484fd5f72a4fe3306801e Mon Sep 17 00:00:00 2001
+From: Jonathan Corbet <corbet@lwn.net>
+Date: Fri, 5 Dec 2008 16:12:48 -0700
+Subject: Fix a race condition in FASYNC handling
+
+From: Jonathan Corbet <corbet@lwn.net>
+
+commit 218d11a8b071b23b76c484fd5f72a4fe3306801e upstream.
+
+Changeset a238b790d5f99c7832f9b73ac8847025815b85f7 (Call fasync()
+functions without the BKL) introduced a race which could leave
+file->f_flags in a state inconsistent with what the underlying
+driver/filesystem believes. Revert that change, and also fix the same
+races in ioctl_fioasync() and ioctl_fionbio().
+
+This is a minimal, short-term fix; the real fix will not involve the
+BKL.
+
+Reported-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fcntl.c | 7 +++++++
+ fs/ioctl.c | 12 ++++++++----
+ 2 files changed, 15 insertions(+), 4 deletions(-)
+
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -19,6 +19,7 @@
+ #include <linux/signal.h>
+ #include <linux/rcupdate.h>
+ #include <linux/pid_namespace.h>
++#include <linux/smp_lock.h>
+
+ #include <asm/poll.h>
+ #include <asm/siginfo.h>
+@@ -175,6 +176,11 @@ static int setfl(int fd, struct file * f
+ if (error)
+ return error;
+
++ /*
++ * We still need a lock here for now to keep multiple FASYNC calls
++ * from racing with each other.
++ */
++ lock_kernel();
+ if ((arg ^ filp->f_flags) & FASYNC) {
+ if (filp->f_op && filp->f_op->fasync) {
+ error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
+@@ -185,6 +191,7 @@ static int setfl(int fd, struct file * f
+
+ filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ out:
++ unlock_kernel();
+ return error;
+ }
+
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -123,11 +123,9 @@ static int ioctl_fioasync(unsigned int f
+
+ /* Did FASYNC state change ? */
+ if ((flag ^ filp->f_flags) & FASYNC) {
+- if (filp->f_op && filp->f_op->fasync) {
+- lock_kernel();
++ if (filp->f_op && filp->f_op->fasync)
+ error = filp->f_op->fasync(fd, filp, on);
+- unlock_kernel();
+- } else
++ else
+ error = -ENOTTY;
+ }
+ if (error)
+@@ -163,11 +161,17 @@ int do_vfs_ioctl(struct file *filp, unsi
+ break;
+
+ case FIONBIO:
++ /* BKL needed to avoid races tweaking f_flags */
++ lock_kernel();
+ error = ioctl_fionbio(filp, argp);
++ unlock_kernel();
+ break;
+
+ case FIOASYNC:
++ /* BKL needed to avoid races tweaking f_flags */
++ lock_kernel();
+ error = ioctl_fioasync(fd, filp, argp);
++ unlock_kernel();
+ break;
+
+ case FIOQSIZE:
--- /dev/null
+From 4afe978530702c934dfdb11f54073136818b2119 Mon Sep 17 00:00:00 2001
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Date: Wed, 22 Oct 2008 14:15:00 -0700
+Subject: jbd: fix error handling for checkpoint io
+
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+
+commit 4afe978530702c934dfdb11f54073136818b2119 upstream.
+
+When a checkpointing IO fails, current JBD code doesn't check the error
+and continue journaling. This means latest metadata can be lost from both
+the journal and filesystem.
+
+This patch leaves the failed metadata blocks in the journal space and
+aborts journaling in the case of log_do_checkpoint(). To achieve this, we
+need to do:
+
+1. don't remove the failed buffer from the checkpoint list where in
+ the case of __try_to_free_cp_buf() because it may be released or
+ overwritten by a later transaction
+2. log_do_checkpoint() is the last chance, remove the failed buffer
+ from the checkpoint list and abort the journal
+3. when checkpointing fails, don't update the journal super block to
+ prevent the journaled contents from being cleaned. For safety,
+ don't update j_tail and j_tail_sequence either
+4. when checkpointing fails, notify this error to the ext3 layer so
+ that ext3 don't clear the needs_recovery flag, otherwise the
+ journaled contents are ignored and cleaned in the recovery phase
+5. if the recovery fails, keep the needs_recovery flag
+6. prevent cleanup_journal_tail() from being called between
+ __journal_drop_transaction() and journal_abort() (a race issue
+ between journal_flush() and __log_wait_for_space()
+
+Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Acked-by: Jan Kara <jack@suse.cz>
+Cc: <linux-ext4@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/jbd/checkpoint.c | 49 +++++++++++++++++++++++++++++++++++++------------
+ fs/jbd/journal.c | 28 ++++++++++++++++++++++------
+ fs/jbd/recovery.c | 7 +++++--
+ include/linux/jbd.h | 2 +-
+ 4 files changed, 65 insertions(+), 21 deletions(-)
+
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -93,7 +93,8 @@ static int __try_to_free_cp_buf(struct j
+ int ret = 0;
+ struct buffer_head *bh = jh2bh(jh);
+
+- if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
++ if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
++ !buffer_dirty(bh) && buffer_uptodate(bh)) {
+ JBUFFER_TRACE(jh, "remove from checkpoint list");
+ ret = __journal_remove_checkpoint(jh) + 1;
+ jbd_unlock_bh_state(bh);
+@@ -160,21 +161,25 @@ static void jbd_sync_bh(journal_t *journ
+ * buffers. Note that we take the buffers in the opposite ordering
+ * from the one in which they were submitted for IO.
+ *
++ * Return 0 on success, and return <0 if some buffers have failed
++ * to be written out.
++ *
+ * Called with j_list_lock held.
+ */
+-static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
++static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
+ {
+ struct journal_head *jh;
+ struct buffer_head *bh;
+ tid_t this_tid;
+ int released = 0;
++ int ret = 0;
+
+ this_tid = transaction->t_tid;
+ restart:
+ /* Did somebody clean up the transaction in the meanwhile? */
+ if (journal->j_checkpoint_transactions != transaction ||
+ transaction->t_tid != this_tid)
+- return;
++ return ret;
+ while (!released && transaction->t_checkpoint_io_list) {
+ jh = transaction->t_checkpoint_io_list;
+ bh = jh2bh(jh);
+@@ -194,6 +199,9 @@ restart:
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
++ if (unlikely(!buffer_uptodate(bh)))
++ ret = -EIO;
++
+ /*
+ * Now in whatever state the buffer currently is, we know that
+ * it has been written out and so we can drop it from the list
+@@ -203,6 +211,8 @@ restart:
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ }
++
++ return ret;
+ }
+
+ #define NR_BATCH 64
+@@ -226,7 +236,8 @@ __flush_batch(journal_t *journal, struct
+ * Try to flush one buffer from the checkpoint list to disk.
+ *
+ * Return 1 if something happened which requires us to abort the current
+- * scan of the checkpoint list.
++ * scan of the checkpoint list. Return <0 if the buffer has failed to
++ * be written out.
+ *
+ * Called with j_list_lock held and drops it if 1 is returned
+ * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
+@@ -256,6 +267,9 @@ static int __process_buffer(journal_t *j
+ log_wait_commit(journal, tid);
+ ret = 1;
+ } else if (!buffer_dirty(bh)) {
++ ret = 1;
++ if (unlikely(!buffer_uptodate(bh)))
++ ret = -EIO;
+ J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+ BUFFER_TRACE(bh, "remove from checkpoint");
+ __journal_remove_checkpoint(jh);
+@@ -263,7 +277,6 @@ static int __process_buffer(journal_t *j
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+- ret = 1;
+ } else {
+ /*
+ * Important: we are about to write the buffer, and
+@@ -295,6 +308,7 @@ static int __process_buffer(journal_t *j
+ * to disk. We submit larger chunks of data at once.
+ *
+ * The journal should be locked before calling this function.
++ * Called with j_checkpoint_mutex held.
+ */
+ int log_do_checkpoint(journal_t *journal)
+ {
+@@ -318,6 +332,7 @@ int log_do_checkpoint(journal_t *journal
+ * OK, we need to start writing disk blocks. Take one transaction
+ * and write it.
+ */
++ result = 0;
+ spin_lock(&journal->j_list_lock);
+ if (!journal->j_checkpoint_transactions)
+ goto out;
+@@ -334,7 +349,7 @@ restart:
+ int batch_count = 0;
+ struct buffer_head *bhs[NR_BATCH];
+ struct journal_head *jh;
+- int retry = 0;
++ int retry = 0, err;
+
+ while (!retry && transaction->t_checkpoint_list) {
+ struct buffer_head *bh;
+@@ -347,6 +362,8 @@ restart:
+ break;
+ }
+ retry = __process_buffer(journal, jh, bhs,&batch_count);
++ if (retry < 0 && !result)
++ result = retry;
+ if (!retry && (need_resched() ||
+ spin_needbreak(&journal->j_list_lock))) {
+ spin_unlock(&journal->j_list_lock);
+@@ -371,14 +388,18 @@ restart:
+ * Now we have cleaned up the first transaction's checkpoint
+ * list. Let's clean up the second one
+ */
+- __wait_cp_io(journal, transaction);
++ err = __wait_cp_io(journal, transaction);
++ if (!result)
++ result = err;
+ }
+ out:
+ spin_unlock(&journal->j_list_lock);
+- result = cleanup_journal_tail(journal);
+ if (result < 0)
+- return result;
+- return 0;
++ journal_abort(journal, result);
++ else
++ result = cleanup_journal_tail(journal);
++
++ return (result < 0) ? result : 0;
+ }
+
+ /*
+@@ -394,8 +415,9 @@ out:
+ * This is the only part of the journaling code which really needs to be
+ * aware of transaction aborts. Checkpointing involves writing to the
+ * main filesystem area rather than to the journal, so it can proceed
+- * even in abort state, but we must not update the journal superblock if
+- * we have an abort error outstanding.
++ * even in abort state, but we must not update the super block if
++ * checkpointing may have failed. Otherwise, we would lose some metadata
++ * buffers which should be written-back to the filesystem.
+ */
+
+ int cleanup_journal_tail(journal_t *journal)
+@@ -404,6 +426,9 @@ int cleanup_journal_tail(journal_t *jour
+ tid_t first_tid;
+ unsigned long blocknr, freed;
+
++ if (is_journal_aborted(journal))
++ return 1;
++
+ /* OK, work out the oldest transaction remaining in the log, and
+ * the log block it starts at.
+ *
+--- a/fs/jbd/journal.c
++++ b/fs/jbd/journal.c
+@@ -1121,9 +1121,12 @@ recovery_error:
+ *
+ * Release a journal_t structure once it is no longer in use by the
+ * journaled object.
++ * Return <0 if we couldn't clean up the journal.
+ */
+-void journal_destroy(journal_t *journal)
++int journal_destroy(journal_t *journal)
+ {
++ int err = 0;
++
+ /* Wait for the commit thread to wake up and die. */
+ journal_kill_thread(journal);
+
+@@ -1146,11 +1149,16 @@ void journal_destroy(journal_t *journal)
+ J_ASSERT(journal->j_checkpoint_transactions == NULL);
+ spin_unlock(&journal->j_list_lock);
+
+- /* We can now mark the journal as empty. */
+- journal->j_tail = 0;
+- journal->j_tail_sequence = ++journal->j_transaction_sequence;
+ if (journal->j_sb_buffer) {
+- journal_update_superblock(journal, 1);
++ if (!is_journal_aborted(journal)) {
++ /* We can now mark the journal as empty. */
++ journal->j_tail = 0;
++ journal->j_tail_sequence =
++ ++journal->j_transaction_sequence;
++ journal_update_superblock(journal, 1);
++ } else {
++ err = -EIO;
++ }
+ brelse(journal->j_sb_buffer);
+ }
+
+@@ -1160,6 +1168,8 @@ void journal_destroy(journal_t *journal)
+ journal_destroy_revoke(journal);
+ kfree(journal->j_wbuf);
+ kfree(journal);
++
++ return err;
+ }
+
+
+@@ -1359,10 +1369,16 @@ int journal_flush(journal_t *journal)
+ spin_lock(&journal->j_list_lock);
+ while (!err && journal->j_checkpoint_transactions != NULL) {
+ spin_unlock(&journal->j_list_lock);
++ mutex_lock(&journal->j_checkpoint_mutex);
+ err = log_do_checkpoint(journal);
++ mutex_unlock(&journal->j_checkpoint_mutex);
+ spin_lock(&journal->j_list_lock);
+ }
+ spin_unlock(&journal->j_list_lock);
++
++ if (is_journal_aborted(journal))
++ return -EIO;
++
+ cleanup_journal_tail(journal);
+
+ /* Finally, mark the journal as really needing no recovery.
+@@ -1384,7 +1400,7 @@ int journal_flush(journal_t *journal)
+ J_ASSERT(journal->j_head == journal->j_tail);
+ J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+ spin_unlock(&journal->j_state_lock);
+- return err;
++ return 0;
+ }
+
+ /**
+--- a/fs/jbd/recovery.c
++++ b/fs/jbd/recovery.c
+@@ -223,7 +223,7 @@ do { \
+ */
+ int journal_recover(journal_t *journal)
+ {
+- int err;
++ int err, err2;
+ journal_superblock_t * sb;
+
+ struct recovery_info info;
+@@ -261,7 +261,10 @@ int journal_recover(journal_t *journal)
+ journal->j_transaction_sequence = ++info.end_transaction;
+
+ journal_clear_revoke(journal);
+- sync_blockdev(journal->j_fs_dev);
++ err2 = sync_blockdev(journal->j_fs_dev);
++ if (!err)
++ err = err2;
++
+ return err;
+ }
+
+--- a/include/linux/jbd.h
++++ b/include/linux/jbd.h
+@@ -908,7 +908,7 @@ extern int journal_set_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+ extern int journal_create (journal_t *);
+ extern int journal_load (journal_t *journal);
+-extern void journal_destroy (journal_t *);
++extern int journal_destroy (journal_t *);
+ extern int journal_recover (journal_t *journal);
+ extern int journal_wipe (journal_t *, int);
+ extern int journal_skip_recovery (journal_t *);
--- /dev/null
+From 9f818b4ac04f53458d0354950b4f229f54be4dbf Mon Sep 17 00:00:00 2001
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Date: Wed, 22 Oct 2008 14:15:02 -0700
+Subject: jbd: test BH_Write_EIO to detect errors on metadata buffers
+
+From: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+
+commit 9f818b4ac04f53458d0354950b4f229f54be4dbf upstream.
+
+__try_to_free_cp_buf(), __process_buffer(), and __wait_cp_io() test
+BH_Uptodate flag to detect write I/O errors on metadata buffers. But by
+commit 95450f5a7e53d5752ce1a0d0b8282e10fe745ae0 "ext3: don't read inode
+block if the buffer has a write error"(*), BH_Uptodate flag can be set to
+inode buffers with BH_Write_EIO in order to avoid reading old inode data.
+So now, we have to test BH_Write_EIO flag of checkpointing inode buffers
+instead of BH_Uptodate. This patch does it.
+
+Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@hitachi.com>
+Acked-by: Jan Kara <jack@suse.cz>
+Acked-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/jbd/checkpoint.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -94,7 +94,7 @@ static int __try_to_free_cp_buf(struct j
+ struct buffer_head *bh = jh2bh(jh);
+
+ if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
+- !buffer_dirty(bh) && buffer_uptodate(bh)) {
++ !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
+ JBUFFER_TRACE(jh, "remove from checkpoint list");
+ ret = __journal_remove_checkpoint(jh) + 1;
+ jbd_unlock_bh_state(bh);
+@@ -199,7 +199,7 @@ restart:
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
+- if (unlikely(!buffer_uptodate(bh)))
++ if (unlikely(buffer_write_io_error(bh)))
+ ret = -EIO;
+
+ /*
+@@ -268,7 +268,7 @@ static int __process_buffer(journal_t *j
+ ret = 1;
+ } else if (!buffer_dirty(bh)) {
+ ret = 1;
+- if (unlikely(!buffer_uptodate(bh)))
++ if (unlikely(buffer_write_io_error(bh)))
+ ret = -EIO;
+ J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+ BUFFER_TRACE(bh, "remove from checkpoint");
0004-sparc64-Fix-bug-in-PTRACE_SETFPREGS64-handling.patch
0005-sparc64-Fix-VIS-emulation-bugs.patch
0006-sparc64-Sync-FPU-state-in-VIS-emulation-handler.patch
+enforce-a-minimum-sg_io-timeout.patch
+fix-a-race-condition-in-fasync-handling.patch
+acpi-scheduling-in-atomic-via-acpi_evaluate_integer.patch
+acpi-suspend-blacklist-boxes-that-require-us-to-set-sci_en-directly-on-resume.patch
+jbd-fix-error-handling-for-checkpoint-io.patch
+jbd-test-bh_write_eio-to-detect-errors-on-metadata-buffers.patch