]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.39 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Mon, 30 May 2011 00:25:50 +0000 (08:25 +0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Mon, 30 May 2011 00:25:50 +0000 (08:25 +0800)
queue-2.6.39/block-add-a-non-queueable-flush-flag.patch [new file with mode: 0644]
queue-2.6.39/block-add-proper-state-guards-to-__elv_next_request.patch [new file with mode: 0644]
queue-2.6.39/block-always-allocate-genhd-ev-if-check_events-is.patch [new file with mode: 0644]
queue-2.6.39/block-fix-discard-topology-stacking-and-reporting.patch [new file with mode: 0644]
queue-2.6.39/block-hold-queue-if-flush-is-running-for-non-queueable.patch [new file with mode: 0644]
queue-2.6.39/block-move-bd_set_size-above-rescan_partitions-in.patch [new file with mode: 0644]
queue-2.6.39/ext4-don-t-set-pageuptodate-in-ext4_end_bio.patch [new file with mode: 0644]
queue-2.6.39/mtd-mtdconcat-fix-nand-oob-write.patch [new file with mode: 0644]
queue-2.6.39/mtd-omap-fix-subpage-ecc-issue-with-prefetch.patch [new file with mode: 0644]
queue-2.6.39/mtd-return-badblockbits-back.patch [new file with mode: 0644]
queue-2.6.39/series

diff --git a/queue-2.6.39/block-add-a-non-queueable-flush-flag.patch b/queue-2.6.39/block-add-a-non-queueable-flush-flag.patch
new file mode 100644 (file)
index 0000000..44b51f3
--- /dev/null
@@ -0,0 +1,70 @@
+From f3876930952390a31c3a7fd68dd621464a36eb80 Mon Sep 17 00:00:00 2001
+From: "shaohua.li@intel.com" <shaohua.li@intel.com>
+Date: Fri, 6 May 2011 11:34:32 -0600
+Subject: block: add a non-queueable flush flag
+
+From: "shaohua.li@intel.com" <shaohua.li@intel.com>
+
+commit f3876930952390a31c3a7fd68dd621464a36eb80 upstream.
+
+flush request isn't queueable in some drives. Add a flag to let driver
+notify block layer about this. We can optimize flush performance with the
+knowledge.
+
+Stable: 2.6.39 only
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-settings.c   |    6 ++++++
+ include/linux/blkdev.h |    7 +++++++
+ 2 files changed, 13 insertions(+)
+
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -790,6 +790,12 @@ void blk_queue_flush(struct request_queu
+ }
+ EXPORT_SYMBOL_GPL(blk_queue_flush);
++void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
++{
++      q->flush_not_queueable = !queueable;
++}
++EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
++
+ static int __init blk_settings_init(void)
+ {
+       blk_max_low_pfn = max_low_pfn - 1;
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -364,6 +364,7 @@ struct request_queue
+        * for flush operations
+        */
+       unsigned int            flush_flags;
++      unsigned int            flush_not_queueable:1;
+       unsigned int            flush_pending_idx:1;
+       unsigned int            flush_running_idx:1;
+       unsigned long           flush_pending_since;
+@@ -843,6 +844,7 @@ extern void blk_queue_softirq_done(struc
+ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+ extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
+ extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
++extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+@@ -1111,6 +1113,11 @@ static inline unsigned int block_size(st
+       return bdev->bd_block_size;
+ }
++static inline bool queue_flush_queueable(struct request_queue *q)
++{
++      return !q->flush_not_queueable;
++}
++
+ typedef struct {struct page *v;} Sector;
+ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
diff --git a/queue-2.6.39/block-add-proper-state-guards-to-__elv_next_request.patch b/queue-2.6.39/block-add-proper-state-guards-to-__elv_next_request.patch
new file mode 100644 (file)
index 0000000..dce9a29
--- /dev/null
@@ -0,0 +1,36 @@
+From 0a58e077eb600d1efd7e54ad9926a75a39d7f8ae Mon Sep 17 00:00:00 2001
+From: James Bottomley <James.Bottomley@suse.de>
+Date: Wed, 18 May 2011 16:20:10 +0200
+Subject: block: add proper state guards to __elv_next_request
+
+From: James Bottomley <James.Bottomley@suse.de>
+
+commit 0a58e077eb600d1efd7e54ad9926a75a39d7f8ae upstream.
+
+blk_cleanup_queue() calls elevator_exit() and after this, we can't
+touch the elevator without oopsing.  __elv_next_request() must check
+for this state because in the refcounted queue model, we can still
+call it after blk_cleanup_queue() has been called.
+
+This was reported as causing an oops attributable to scsi.
+
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -81,7 +81,8 @@ static inline struct request *__elv_next
+                       q->flush_queue_delayed = 1;
+                       return NULL;
+               }
+-              if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
++              if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
++                  !q->elevator->ops->elevator_dispatch_fn(q, 0))
+                       return NULL;
+       }
+ }
diff --git a/queue-2.6.39/block-always-allocate-genhd-ev-if-check_events-is.patch b/queue-2.6.39/block-always-allocate-genhd-ev-if-check_events-is.patch
new file mode 100644 (file)
index 0000000..7114e40
--- /dev/null
@@ -0,0 +1,50 @@
+From 75e3f3ee3c64968d42f4843ec49e579f84b5aa0c Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 26 May 2011 21:06:50 +0200
+Subject: block: always allocate genhd->ev if check_events is
+ implemented
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 75e3f3ee3c64968d42f4843ec49e579f84b5aa0c upstream.
+
+9fd097b149 (block: unexport DISK_EVENT_MEDIA_CHANGE for legacy/fringe
+drivers) removed DISK_EVENT_MEDIA_CHANGE from legacy/fringe block
+drivers which have inadequate ->check_events().  Combined with earlier
+change 7c88a168da (block: don't propagate unlisted DISK_EVENTs to
+userland), this enables using ->check_events() for internal processing
+while avoiding enabling in-kernel block event polling which can lead
+to infinite event loop.
+
+Unfortunately, this made many drivers including floppy without any bit
+set in disk->events and ->async_events in which case disk_add_events()
+simply skipped allocation of disk->ev, which disables whole event
+handling.  As ->check_events() is still used during open processing
+for revalidation, this can lead to open failure.
+
+This patch always allocates disk->ev if ->check_events is implemented.
+In the long term, it would make sense to simply include the event
+structure inline into genhd as it's now used by virtually all block
+devices.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Ondrej Zary <linux@rainbow-software.org>
+Reported-by: Alex Villacis Lasso <avillaci@ceibo.fiec.espol.edu.ec>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/genhd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendi
+ {
+       struct disk_events *ev;
+-      if (!disk->fops->check_events || !(disk->events | disk->async_events))
++      if (!disk->fops->check_events)
+               return;
+       ev = kzalloc(sizeof(*ev), GFP_KERNEL);
diff --git a/queue-2.6.39/block-fix-discard-topology-stacking-and-reporting.patch b/queue-2.6.39/block-fix-discard-topology-stacking-and-reporting.patch
new file mode 100644 (file)
index 0000000..0c37fbe
--- /dev/null
@@ -0,0 +1,91 @@
+From a934a00a69e940b126b9bdbf83e630ef5fe43523 Mon Sep 17 00:00:00 2001
+From: "Martin K. Petersen" <martin.petersen@oracle.com>
+Date: Wed, 18 May 2011 10:37:35 +0200
+Subject: block: Fix discard topology stacking and reporting
+
+From: "Martin K. Petersen" <martin.petersen@oracle.com>
+
+commit a934a00a69e940b126b9bdbf83e630ef5fe43523 upstream.
+
+In some cases we would end up stacking discard_zeroes_data incorrectly.
+Fix this by enabling the feature by default for stacking drivers and
+clearing it for low-level drivers. Incorporating a device that does not
+support dzd will then cause the feature to be disabled in the stacking
+driver.
+
+Also ensure that the maximum discard value does not overflow when
+exported in sysfs and return 0 in the alignment and dzd fields for
+devices that don't support discard.
+
+Reported-by: Lukas Czerner <lczerner@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-settings.c   |    3 ++-
+ block/blk-sysfs.c      |    3 ++-
+ include/linux/blkdev.h |    7 +++++--
+ 3 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue
+       lim->discard_granularity = 0;
+       lim->discard_alignment = 0;
+       lim->discard_misaligned = 0;
+-      lim->discard_zeroes_data = -1;
++      lim->discard_zeroes_data = 1;
+       lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+       lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+       lim->alignment_offset = 0;
+@@ -166,6 +166,7 @@ void blk_queue_make_request(struct reque
+       blk_set_default_limits(&q->limits);
+       blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
++      q->limits.discard_zeroes_data = 0;
+       /*
+        * by default assume old behaviour and bounce for any highmem page
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity
+ static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
+ {
+-      return queue_var_show(q->limits.max_discard_sectors << 9, page);
++      return sprintf(page, "%llu\n",
++                     (unsigned long long)q->limits.max_discard_sectors << 9);
+ }
+ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -257,7 +257,7 @@ struct queue_limits {
+       unsigned char           misaligned;
+       unsigned char           discard_misaligned;
+       unsigned char           cluster;
+-      signed char             discard_zeroes_data;
++      unsigned char           discard_zeroes_data;
+ };
+ struct request_queue
+@@ -1069,13 +1069,16 @@ static inline int queue_limit_discard_al
+ {
+       unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
++      if (!lim->max_discard_sectors)
++              return 0;
++
+       return (lim->discard_granularity + lim->discard_alignment - alignment)
+               & (lim->discard_granularity - 1);
+ }
+ static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+ {
+-      if (q->limits.discard_zeroes_data == 1)
++      if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
+               return 1;
+       return 0;
diff --git a/queue-2.6.39/block-hold-queue-if-flush-is-running-for-non-queueable.patch b/queue-2.6.39/block-hold-queue-if-flush-is-running-for-non-queueable.patch
new file mode 100644 (file)
index 0000000..87cc868
--- /dev/null
@@ -0,0 +1,111 @@
+From 3ac0cc4508709d42ec9aa351086c7d38bfc0660c Mon Sep 17 00:00:00 2001
+From: "shaohua.li@intel.com" <shaohua.li@intel.com>
+Date: Fri, 6 May 2011 11:34:41 -0600
+Subject: block: hold queue if flush is running for non-queueable
+ flush drive
+
+From: "shaohua.li@intel.com" <shaohua.li@intel.com>
+
+commit 3ac0cc4508709d42ec9aa351086c7d38bfc0660c upstream.
+
+In some drives, flush requests are non-queueable. When flush request is
+running, normal read/write requests can't run. If block layer dispatches
+such request, driver can't handle it and requeue it.  Tejun suggested we
+can hold the queue when flush is running. This can avoid unnecessary
+requeue.  Also this can improve performance. For example, we have
+request flush1, write1, flush 2. flush1 is dispatched, then queue is
+hold, write1 isn't inserted to queue. After flush1 is finished, flush2
+will be dispatched. Since disk cache is already clean, flush2 will be
+finished very soon, so looks like flush2 is folded to flush1.
+
+In my test, the queue holding completely solves a regression introduced by
+commit 53d63e6b0dfb95882ec0219ba6bbd50cde423794:
+
+    block: make the flush insertion use the tail of the dispatch list
+
+    It's not a preempt type request, in fact we have to insert it
+    behind requests that do specify INSERT_FRONT.
+
+which causes about 20% regression running a sysbench fileio
+workload.
+
+Stable: 2.6.39 only
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-flush.c      |   16 +++++++++++-----
+ block/blk.h            |   21 ++++++++++++++++++++-
+ include/linux/blkdev.h |    1 +
+ 3 files changed, 32 insertions(+), 6 deletions(-)
+
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -212,13 +212,19 @@ static void flush_end_io(struct request
+       }
+       /*
+-       * Moving a request silently to empty queue_head may stall the
+-       * queue.  Kick the queue in those cases.  This function is called
+-       * from request completion path and calling directly into
+-       * request_fn may confuse the driver.  Always use kblockd.
++       * Kick the queue to avoid stall for two cases:
++       * 1. Moving a request silently to empty queue_head may stall the
++       * queue.
++       * 2. When flush request is running in non-queueable queue, the
++       * queue is hold. Restart the queue after flush request is finished
++       * to avoid stall.
++       * This function is called from request completion path and calling
++       * directly into request_fn may confuse the driver.  Always use
++       * kblockd.
+        */
+-      if (queued)
++      if (queued || q->flush_queue_delayed)
+               blk_run_queue_async(q);
++      q->flush_queue_delayed = 0;
+ }
+ /**
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -61,7 +61,26 @@ static inline struct request *__elv_next
+                       rq = list_entry_rq(q->queue_head.next);
+                       return rq;
+               }
+-
++              /*
++               * Flush request is running and flush request isn't queueable
++               * in the drive, we can hold the queue till flush request is
++               * finished. Even we don't do this, driver can't dispatch next
++               * requests and will requeue them. And this can improve
++               * throughput too. For example, we have request flush1, write1,
++               * flush 2. flush1 is dispatched, then queue is hold, write1
++               * isn't inserted to queue. After flush1 is finished, flush2
++               * will be dispatched. Since disk cache is already clean,
++               * flush2 will be finished very soon, so looks like flush2 is
++               * folded to flush1.
++               * Since the queue is hold, a flag is set to indicate the queue
++               * should be restarted later. Please see flush_end_io() for
++               * details.
++               */
++              if (q->flush_pending_idx != q->flush_running_idx &&
++                              !queue_flush_queueable(q)) {
++                      q->flush_queue_delayed = 1;
++                      return NULL;
++              }
+               if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+                       return NULL;
+       }
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -365,6 +365,7 @@ struct request_queue
+        */
+       unsigned int            flush_flags;
+       unsigned int            flush_not_queueable:1;
++      unsigned int            flush_queue_delayed:1;
+       unsigned int            flush_pending_idx:1;
+       unsigned int            flush_running_idx:1;
+       unsigned long           flush_pending_since;
diff --git a/queue-2.6.39/block-move-bd_set_size-above-rescan_partitions-in.patch b/queue-2.6.39/block-move-bd_set_size-above-rescan_partitions-in.patch
new file mode 100644 (file)
index 0000000..a96b9d6
--- /dev/null
@@ -0,0 +1,70 @@
+From 7e69723fef8771a9d57bd27d36281d756130b4b5 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 23 May 2011 13:26:07 +0200
+Subject: block: move bd_set_size() above rescan_partitions() in
+ __blkdev_get()
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 7e69723fef8771a9d57bd27d36281d756130b4b5 upstream.
+
+02e352287a4 (block: rescan partitions on invalidated devices on
+-ENOMEDIA too) relocated partition rescan above explicit bd_set_size()
+to simplify condition check.  As rescan_partitions() does its own bdev
+size setting, this doesn't break anything; however,
+rescan_partitions() prints out the following messages when adjusting
+bdev size, which can be confusing.
+
+  sda: detected capacity change from 0 to 146815737856
+  sdb: detected capacity change from 0 to 146815737856
+
+This patch restores the original order and remove the warning
+messages.
+
+stable: Please apply together with 02e352287a4 (block: rescan
+        partitions on invalidated devices on -ENOMEDIA too).
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Tony Luck <tony.luck@gmail.com>
+Tested-by: Tony Luck <tony.luck@gmail.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ fs/block_dev.c |   17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1120,6 +1120,15 @@ static int __blkdev_get(struct block_dev
+                                       goto restart;
+                               }
+                       }
++
++                      if (!ret && !bdev->bd_openers) {
++                              bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
++                              bdi = blk_get_backing_dev_info(bdev);
++                              if (bdi == NULL)
++                                      bdi = &default_backing_dev_info;
++                              bdev_inode_switch_bdi(bdev->bd_inode, bdi);
++                      }
++
+                       /*
+                        * If the device is invalidated, rescan partition
+                        * if open succeeded or failed with -ENOMEDIUM.
+@@ -1130,14 +1139,6 @@ static int __blkdev_get(struct block_dev
+                               rescan_partitions(disk, bdev);
+                       if (ret)
+                               goto out_clear;
+-
+-                      if (!bdev->bd_openers) {
+-                              bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+-                              bdi = blk_get_backing_dev_info(bdev);
+-                              if (bdi == NULL)
+-                                      bdi = &default_backing_dev_info;
+-                              bdev_inode_switch_bdi(bdev->bd_inode, bdi);
+-                      }
+               } else {
+                       struct block_device *whole;
+                       whole = bdget_disk(disk, 0);
diff --git a/queue-2.6.39/ext4-don-t-set-pageuptodate-in-ext4_end_bio.patch b/queue-2.6.39/ext4-don-t-set-pageuptodate-in-ext4_end_bio.patch
new file mode 100644 (file)
index 0000000..f83ce26
--- /dev/null
@@ -0,0 +1,103 @@
+From 39db00f1c45e770856264bdb3ceca27980b01965 Mon Sep 17 00:00:00 2001
+From: Curt Wohlgemuth <curtw@google.com>
+Date: Sat, 30 Apr 2011 13:26:26 -0400
+Subject: ext4: don't set PageUptodate in ext4_end_bio()
+
+From: Curt Wohlgemuth <curtw@google.com>
+
+commit 39db00f1c45e770856264bdb3ceca27980b01965 upstream.
+
+In the bio completion routine, we should not be setting
+PageUptodate at all -- it's set at sys_write() time, and is
+unaffected by success/failure of the write to disk.
+
+This can cause a page corruption bug when the file system's
+block size is less than the architecture's VM page size.
+
+if we have only written a single block -- we might end up
+setting the page's PageUptodate flag, indicating that page
+is completely read into memory, which may not be true.
+This could cause subsequent reads to get bad data.
+
+This commit also takes the opportunity to clean up error
+handling in ext4_end_bio(), and remove some extraneous code:
+
+   - fixes ext4_end_bio() to set AS_EIO in the
+     page->mapping->flags on error, which was left out by
+     mistake.  This is needed so that fsync() will
+     return an error if there was an I/O error.
+   - remove the clear_buffer_dirty() call on unmapped
+     buffers for each page.
+   - consolidate page/buffer error handling in a single
+     section.
+
+Signed-off-by: Curt Wohlgemuth <curtw@google.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Reported-by: Jim Meyering <jim@meyering.net>
+Reported-by: Hugh Dickins <hughd@google.com>
+Cc: Mingming Cao <cmm@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/ext4/page-io.c |   37 ++++++++++---------------------------
+ 1 file changed, 10 insertions(+), 27 deletions(-)
+
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio
+       for (i = 0; i < io_end->num_io_pages; i++) {
+               struct page *page = io_end->pages[i]->p_page;
+               struct buffer_head *bh, *head;
+-              int partial_write = 0;
++              loff_t offset;
++              loff_t io_end_offset;
+-              head = page_buffers(page);
+-              if (error)
++              if (error) {
+                       SetPageError(page);
+-              BUG_ON(!head);
+-              if (head->b_size != PAGE_CACHE_SIZE) {
+-                      loff_t offset;
+-                      loff_t io_end_offset = io_end->offset + io_end->size;
++                      set_bit(AS_EIO, &page->mapping->flags);
++                      head = page_buffers(page);
++                      BUG_ON(!head);
++
++                      io_end_offset = io_end->offset + io_end->size;
+                       offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
+                       bh = head;
+                       do {
+                               if ((offset >= io_end->offset) &&
+-                                  (offset+bh->b_size <= io_end_offset)) {
+-                                      if (error)
+-                                              buffer_io_error(bh);
++                                  (offset+bh->b_size <= io_end_offset))
++                                      buffer_io_error(bh);
+-                              }
+-                              if (buffer_delay(bh))
+-                                      partial_write = 1;
+-                              else if (!buffer_mapped(bh))
+-                                      clear_buffer_dirty(bh);
+-                              else if (buffer_dirty(bh))
+-                                      partial_write = 1;
+                               offset += bh->b_size;
+                               bh = bh->b_this_page;
+                       } while (bh != head);
+               }
+-              /*
+-               * If this is a partial write which happened to make
+-               * all buffers uptodate then we can optimize away a
+-               * bogus readpage() for the next read(). Here we
+-               * 'discover' whether the page went uptodate as a
+-               * result of this (potentially partial) write.
+-               */
+-              if (!partial_write)
+-                      SetPageUptodate(page);
+-
+               put_io_page(io_end->pages[i]);
+       }
+       io_end->num_io_pages = 0;
diff --git a/queue-2.6.39/mtd-mtdconcat-fix-nand-oob-write.patch b/queue-2.6.39/mtd-mtdconcat-fix-nand-oob-write.patch
new file mode 100644 (file)
index 0000000..810a5a2
--- /dev/null
@@ -0,0 +1,42 @@
+From 431e1ecabddcd7cbba237182ddf431771f98bb4c Mon Sep 17 00:00:00 2001
+From: Felix Radensky <felix@embedded-sol.com>
+Date: Mon, 25 Apr 2011 01:57:12 +0300
+Subject: mtd: mtdconcat: fix NAND OOB write
+
+From: Felix Radensky <felix@embedded-sol.com>
+
+commit 431e1ecabddcd7cbba237182ddf431771f98bb4c upstream.
+
+Currently mtdconcat is broken for NAND. An attemtpt to create
+JFFS2 filesystem on concatenation of several NAND devices fails
+with OOB write errors. This patch fixes that problem.
+
+Signed-off-by: Felix Radensky <felix@embedded-sol.com>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/mtdconcat.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, l
+       if (!(mtd->flags & MTD_WRITEABLE))
+               return -EROFS;
+-      ops->retlen = 0;
++      ops->retlen = ops->oobretlen = 0;
+       for (i = 0; i < concat->num_subdev; i++) {
+               struct mtd_info *subdev = concat->subdev[i];
+@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, l
+                       devops.len = subdev->size - to;
+               err = subdev->write_oob(subdev, to, &devops);
+-              ops->retlen += devops.retlen;
++              ops->retlen += devops.oobretlen;
+               if (err)
+                       return err;
diff --git a/queue-2.6.39/mtd-omap-fix-subpage-ecc-issue-with-prefetch.patch b/queue-2.6.39/mtd-omap-fix-subpage-ecc-issue-with-prefetch.patch
new file mode 100644 (file)
index 0000000..63d5265
--- /dev/null
@@ -0,0 +1,72 @@
+From c5d8c0cae4af7d78823d32fcd1c458ee1a1b5489 Mon Sep 17 00:00:00 2001
+From: Kishore Kadiyala <kishore.kadiyala@ti.com>
+Date: Wed, 11 May 2011 21:17:27 +0530
+Subject: mtd: omap: fix subpage ecc issue with prefetch
+
+From: Kishore Kadiyala <kishore.kadiyala@ti.com>
+
+commit c5d8c0cae4af7d78823d32fcd1c458ee1a1b5489 upstream.
+
+When reading/writing a subpage (When HW ECC is not available/enabled)
+for number of bytes not aligned to 4, the mis-aligned bytes are handled
+first (by cpu copy method) before enabling the Prefetch engine to/from
+'p'(start of buffer 'buf'). Then it reads/writes rest of the bytes with
+the help of Prefetch engine, if available, or again using cpu copy method.
+Currently, reading/writing of rest of bytes, is not done correctly since
+its trying to read/write again to/from begining of buffer 'buf',
+overwriting the mis-aligned bytes.
+
+Read & write using prefetch engine got broken in commit '2c01946c'.
+We never hit a scenario of not getting 'gpmc_prefetch_enable' call
+success. So, problem did not get caught up.
+
+Signed-off-by: Kishore Kadiyala <kishore.kadiyala@ti.com>
+Signed-off-by: Vimal Singh <vimal.newwork@gmail.com>
+Reported-by: Bryan DE FARIA <bdefaria@adeneo-embedded.com>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/nand/omap2.c |   12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -263,11 +263,10 @@ static void omap_read_buf_pref(struct mt
+       if (ret) {
+               /* PFPW engine is busy, use cpu copy method */
+               if (info->nand.options & NAND_BUSWIDTH_16)
+-                      omap_read_buf16(mtd, buf, len);
++                      omap_read_buf16(mtd, (u_char *)p, len);
+               else
+-                      omap_read_buf8(mtd, buf, len);
++                      omap_read_buf8(mtd, (u_char *)p, len);
+       } else {
+-              p = (u32 *) buf;
+               do {
+                       r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+                       r_count = r_count >> 2;
+@@ -293,7 +292,7 @@ static void omap_write_buf_pref(struct m
+                                               struct omap_nand_info, mtd);
+       uint32_t w_count = 0;
+       int i = 0, ret = 0;
+-      u16 *p;
++      u16 *p = (u16 *)buf;
+       unsigned long tim, limit;
+       /* take care of subpage writes */
+@@ -309,11 +308,10 @@ static void omap_write_buf_pref(struct m
+       if (ret) {
+               /* PFPW engine is busy, use cpu copy method */
+               if (info->nand.options & NAND_BUSWIDTH_16)
+-                      omap_write_buf16(mtd, buf, len);
++                      omap_write_buf16(mtd, (u_char *)p, len);
+               else
+-                      omap_write_buf8(mtd, buf, len);
++                      omap_write_buf8(mtd, (u_char *)p, len);
+       } else {
+-              p = (u16 *) buf;
+               while (len) {
+                       w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+                       w_count = w_count >> 1;
diff --git a/queue-2.6.39/mtd-return-badblockbits-back.patch b/queue-2.6.39/mtd-return-badblockbits-back.patch
new file mode 100644 (file)
index 0000000..da7cef0
--- /dev/null
@@ -0,0 +1,39 @@
+From 26d9be11485ea8c1102c3e8eaa7667412eef4950 Mon Sep 17 00:00:00 2001
+From: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Date: Thu, 28 Apr 2011 20:26:59 +0300
+Subject: mtd: return badblockbits back
+
+From: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+
+commit 26d9be11485ea8c1102c3e8eaa7667412eef4950 upstream.
+
+In commit c7b28e25cb9beb943aead770ff14551b55fa8c79 the initialization of
+the backblockbits was accidentally removed. This patch returns it back,
+because otherwise some NAND drivers are broken.
+
+This problem was reported by "Saxena, Parth <parth.saxena@ti.com>" here:
+http://lists.infradead.org/pipermail/linux-mtd/2011-April/035221.html
+
+Reported-by: Saxena, Parth <parth.saxena@ti.com>
+Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
+Tested-by: Saxena, Parth <parth.saxena@ti.com>
+Acked-by: Saxena, Parth <parth.saxena@ti.com>
+Acked-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/mtd/nand/nand_base.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3112,6 +3112,8 @@ ident_done:
+               chip->chip_shift += 32 - 1;
+       }
++      chip->badblockbits = 8;
++
+       /* Set the bad block position */
+       if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
+               chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
index 30951c7540a70201436869a4735039b6bca768e8..47ccaa28abf9e1d554656df91365acc000d4bb68 100644 (file)
@@ -20,3 +20,13 @@ ath9k_hw-fix-dual-band-assumption-for-xb113.patch
 ath9k_hw-fix-sta-connection-issues-with-ar9380-xb113.patch
 powerpc-oprofile-handle-events-that-raise-an-exception-without-overflowing.patch
 iwlagn-fix-iwl_is_any_associated.patch
+ext4-don-t-set-pageuptodate-in-ext4_end_bio.patch
+block-move-bd_set_size-above-rescan_partitions-in.patch
+block-add-a-non-queueable-flush-flag.patch
+block-hold-queue-if-flush-is-running-for-non-queueable.patch
+block-fix-discard-topology-stacking-and-reporting.patch
+block-add-proper-state-guards-to-__elv_next_request.patch
+block-always-allocate-genhd-ev-if-check_events-is.patch
+mtd-mtdconcat-fix-nand-oob-write.patch
+mtd-return-badblockbits-back.patch
+mtd-omap-fix-subpage-ecc-issue-with-prefetch.patch