--- /dev/null
+From 9284bcf4e335e5f18a8bc7b26461c33ab60d0689 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <jaxboe@fusionio.com>
+Date: Fri, 29 Oct 2010 08:10:18 -0600
+Subject: block: check for proper length of iov entries in blk_rq_map_user_iov()
+
+From: Jens Axboe <jaxboe@fusionio.com>
+
+commit 9284bcf4e335e5f18a8bc7b26461c33ab60d0689 upstream.
+
+Ensure that we pass down properly validated iov segments before
+calling into the mapping or copy functions.
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-map.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_q
+ unaligned = 1;
+ break;
+ }
++ if (!iov[i].iov_len)
++ return -EINVAL;
+ }
+
+ if (unaligned || (q->dma_pad_mask & len) || map_data)
--- /dev/null
+From 892b6f90db81cccb723d5d92f4fddc2d68b206e1 Mon Sep 17 00:00:00 2001
+From: Martin K. Petersen <martin.petersen@oracle.com>
+Date: Wed, 13 Oct 2010 21:18:03 +0200
+Subject: block: Ensure physical block size is unsigned int
+
+From: Martin K. Petersen <martin.petersen@oracle.com>
+
+commit 892b6f90db81cccb723d5d92f4fddc2d68b206e1 upstream.
+
+Physical block size was declared unsigned int to accomodate the maximum
+size reported by READ CAPACITY(16). Make sure we use the right type in
+the related functions.
+
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-settings.c | 2 +-
+ include/linux/blkdev.h | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -343,7 +343,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_si
+ * hardware can operate on without reverting to read-modify-write
+ * operations.
+ */
+-void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
++void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
+ {
+ q->limits.physical_block_size = size;
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -851,7 +851,7 @@ extern void blk_queue_max_segment_size(s
+ extern void blk_queue_max_discard_sectors(struct request_queue *q,
+ unsigned int max_discard_sectors);
+ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+-extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
++extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+ extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
+@@ -1004,7 +1004,7 @@ static inline unsigned int queue_physica
+ return q->limits.physical_block_size;
+ }
+
+-static inline int bdev_physical_block_size(struct block_device *bdev)
++static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
+ {
+ return queue_physical_block_size(bdev_get_queue(bdev));
+ }
--- /dev/null
+From 7681bfeeccff5efa9eb29bf09249a3c400b15327 Mon Sep 17 00:00:00 2001
+From: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Date: Tue, 19 Oct 2010 09:05:00 +0200
+Subject: block: fix accounting bug on cross partition merges
+
+From: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+
+commit 7681bfeeccff5efa9eb29bf09249a3c400b15327 upstream.
+
+/proc/diskstats would display a strange output as follows.
+
+$ cat /proc/diskstats |grep sda
+ 8 0 sda 90524 7579 102154 20464 0 0 0 0 0 14096 20089
+ 8 1 sda1 19085 1352 21841 4209 0 0 0 0 4294967064 15689 4293424691
+ ~~~~~~~~~~
+ 8 2 sda2 71252 3624 74891 15950 0 0 0 0 232 23995 1562390
+ 8 3 sda3 54 487 2188 92 0 0 0 0 0 88 92
+ 8 4 sda4 4 0 8 0 0 0 0 0 0 0 0
+ 8 5 sda5 81 2027 2130 138 0 0 0 0 0 87 137
+
+Its reason is the wrong way of accounting hd_struct->in_flight. When a bio is
+merged into a request belongs to different partition by ELEVATOR_FRONT_MERGE.
+
+The detailed root cause is as follows.
+
+Assuming that there are two partition, sda1 and sda2.
+
+1. A request for sda2 is in request_queue. Hence sda1's hd_struct->in_flight
+ is 0 and sda2's one is 1.
+
+ | hd_struct->in_flight
+ ---------------------------
+ sda1 | 0
+ sda2 | 1
+ ---------------------------
+
+2. A bio belongs to sda1 is issued and is merged into the request mentioned on
+ step1 by ELEVATOR_BACK_MERGE. The first sector of the request is changed
+ from sda2 region to sda1 region. However the two partition's
+ hd_struct->in_flight are not changed.
+
+ | hd_struct->in_flight
+ ---------------------------
+ sda1 | 0
+ sda2 | 1
+ ---------------------------
+
+3. The request is finished and blk_account_io_done() is called. In this case,
+ sda2's hd_struct->in_flight, not a sda1's one, is decremented.
+
+ | hd_struct->in_flight
+ ---------------------------
+ sda1 | -1
+ sda2 | 1
+ ---------------------------
+
+The patch fixes the problem by caching the partition lookup
+inside the request structure, hence making sure that the increment
+and decrement will always happen on the same partition struct. This
+also speeds up IO with accounting enabled, since it cuts down on
+the number of lookups we have to do.
+
+When reloading partition tables, quiesce IO to ensure that no
+request references to the partition struct exists. When it is safe
+to free the partition table, the IO for that device is restarted
+again.
+
+Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/blk-core.c | 24 ++++++++++++++++--------
+ block/blk-merge.c | 2 +-
+ block/blk.h | 4 ----
+ block/genhd.c | 14 ++++++++++++++
+ fs/partitions/check.c | 12 ++++++++++++
+ include/linux/blkdev.h | 1 +
+ include/linux/elevator.h | 2 ++
+ include/linux/genhd.h | 1 +
+ 8 files changed, 47 insertions(+), 13 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -64,13 +64,15 @@ static void drive_stat_acct(struct reque
+ return;
+
+ cpu = part_stat_lock();
+- part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+
+- if (!new_io)
++ if (!new_io) {
++ part = rq->part;
+ part_stat_inc(cpu, part, merges[rw]);
+- else {
++ } else {
++ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+ part_round_stats(cpu, part);
+ part_inc_in_flight(part, rw);
++ rq->part = part;
+ }
+
+ part_stat_unlock();
+@@ -128,6 +130,7 @@ void blk_rq_init(struct request_queue *q
+ rq->ref_count = 1;
+ rq->start_time = jiffies;
+ set_start_time_ns(rq);
++ rq->part = NULL;
+ }
+ EXPORT_SYMBOL(blk_rq_init);
+
+@@ -796,11 +799,16 @@ static struct request *get_request(struc
+ rl->starved[is_sync] = 0;
+
+ priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+- if (priv)
++ if (priv) {
+ rl->elvpriv++;
+
+- if (blk_queue_io_stat(q))
+- rw_flags |= REQ_IO_STAT;
++ /*
++ * Don't do stats for non-priv requests
++ */
++ if (blk_queue_io_stat(q))
++ rw_flags |= REQ_IO_STAT;
++ }
++
+ spin_unlock_irq(q->queue_lock);
+
+ rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+@@ -1759,7 +1767,7 @@ static void blk_account_io_completion(st
+ int cpu;
+
+ cpu = part_stat_lock();
+- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
++ part = req->part;
+ part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ part_stat_unlock();
+ }
+@@ -1779,7 +1787,7 @@ static void blk_account_io_done(struct r
+ int cpu;
+
+ cpu = part_stat_lock();
+- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
++ part = req->part;
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -343,7 +343,7 @@ static void blk_account_io_merge(struct
+ int cpu;
+
+ cpu = part_stat_lock();
+- part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
++ part = req->part;
+
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part, rq_data_dir(req));
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -110,10 +110,6 @@ void blk_queue_congestion_threshold(stru
+
+ int blk_dev_init(void);
+
+-void elv_quiesce_start(struct request_queue *q);
+-void elv_quiesce_end(struct request_queue *q);
+-
+-
+ /*
+ * Return the threshold (number of used requests) at which the queue is
+ * considered to be congested. It include a little hysteresis to keep the
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -925,8 +925,15 @@ static void disk_free_ptbl_rcu_cb(struct
+ {
+ struct disk_part_tbl *ptbl =
+ container_of(head, struct disk_part_tbl, rcu_head);
++ struct gendisk *disk = ptbl->disk;
++ struct request_queue *q = disk->queue;
++ unsigned long flags;
+
+ kfree(ptbl);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ elv_quiesce_end(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ /**
+@@ -944,11 +951,17 @@ static void disk_replace_part_tbl(struct
+ struct disk_part_tbl *new_ptbl)
+ {
+ struct disk_part_tbl *old_ptbl = disk->part_tbl;
++ struct request_queue *q = disk->queue;
+
+ rcu_assign_pointer(disk->part_tbl, new_ptbl);
+
+ if (old_ptbl) {
+ rcu_assign_pointer(old_ptbl->last_lookup, NULL);
++
++ spin_lock_irq(q->queue_lock);
++ elv_quiesce_start(q);
++ spin_unlock_irq(q->queue_lock);
++
+ call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
+ }
+ }
+@@ -989,6 +1002,7 @@ int disk_expand_part_tbl(struct gendisk
+ return -ENOMEM;
+
+ new_ptbl->len = target;
++ new_ptbl->disk = disk;
+
+ for (i = 0; i < len; i++)
+ rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
+--- a/fs/partitions/check.c
++++ b/fs/partitions/check.c
+@@ -364,17 +364,25 @@ struct device_type part_type = {
+ static void delete_partition_rcu_cb(struct rcu_head *head)
+ {
+ struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
++ struct gendisk *disk = part_to_disk(part);
++ struct request_queue *q = disk->queue;
++ unsigned long flags;
+
+ part->start_sect = 0;
+ part->nr_sects = 0;
+ part_stat_set_all(part, 0);
+ put_device(part_to_dev(part));
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ elv_quiesce_end(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ void delete_partition(struct gendisk *disk, int partno)
+ {
+ struct disk_part_tbl *ptbl = disk->part_tbl;
+ struct hd_struct *part;
++ struct request_queue *q = disk->queue;
+
+ if (partno >= ptbl->len)
+ return;
+@@ -389,6 +397,10 @@ void delete_partition(struct gendisk *di
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
+
++ spin_lock_irq(q->queue_lock);
++ elv_quiesce_start(q);
++ spin_unlock_irq(q->queue_lock);
++
+ call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+ }
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -115,6 +115,7 @@ struct request {
+ void *elevator_private3;
+
+ struct gendisk *rq_disk;
++ struct hd_struct *part;
+ unsigned long start_time;
+ #ifdef CONFIG_BLK_CGROUP
+ unsigned long long start_time_ns;
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -122,6 +122,8 @@ extern void elv_completed_request(struct
+ extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+ extern void elv_put_request(struct request_queue *, struct request *);
+ extern void elv_drain_elevator(struct request_queue *);
++extern void elv_quiesce_start(struct request_queue *);
++extern void elv_quiesce_end(struct request_queue *);
+
+ /*
+ * io scheduler registration
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -130,6 +130,7 @@ struct disk_part_tbl {
+ struct rcu_head rcu_head;
+ int len;
+ struct hd_struct *last_lookup;
++ struct gendisk *disk;
+ struct hd_struct *part[];
+ };
+
--- /dev/null
+From 01ea50638bc04ca5259f5711fcdedefcdde1cf43 Mon Sep 17 00:00:00 2001
+From: Signed-off-by: Jan Kara <jack@suse.cz>
+Date: Thu, 16 Sep 2010 20:36:36 +0200
+Subject: block: Fix race during disk initialization
+
+From: Signed-off-by: Jan Kara <jack@suse.cz>
+
+commit 01ea50638bc04ca5259f5711fcdedefcdde1cf43 upstream.
+
+When a new disk is being discovered, add_disk() first ties the bdev to gendisk
+(via register_disk()->blkdev_get()) and only after that calls
+bdi_register_bdev(). Because register_disk() also creates disk's kobject, it
+can happen that userspace manages to open and modify the device's data (or
+inode) before its BDI is properly initialized leading to a warning in
+__mark_inode_dirty().
+
+Fix the problem by registering BDI early enough.
+
+This patch addresses https://bugzilla.kernel.org/show_bug.cgi?id=16312
+
+Reported-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/genhd.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -541,13 +541,15 @@ void add_disk(struct gendisk *disk)
+ disk->major = MAJOR(devt);
+ disk->first_minor = MINOR(devt);
+
++ /* Register BDI before referencing it from bdev */
++ bdi = &disk->queue->backing_dev_info;
++ bdi_register_dev(bdi, disk_devt(disk));
++
+ blk_register_region(disk_devt(disk), disk->minors, NULL,
+ exact_match, exact_lock, disk);
+ register_disk(disk);
+ blk_register_queue(disk);
+
+- bdi = &disk->queue->backing_dev_info;
+- bdi_register_dev(bdi, disk_devt(disk));
+ retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
+ "bdi");
+ WARN_ON(retval);
--- /dev/null
+From f3f63c1c28bc861a931fac283b5bc3585efb8967 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <jaxboe@fusionio.com>
+Date: Fri, 29 Oct 2010 11:46:56 -0600
+Subject: block: limit vec count in bio_kmalloc() and bio_alloc_map_data()
+
+From: Jens Axboe <jaxboe@fusionio.com>
+
+commit f3f63c1c28bc861a931fac283b5bc3585efb8967 upstream.
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/bio.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask,
+ {
+ struct bio *bio;
+
++ if (nr_iovecs > UIO_MAXIOV)
++ return NULL;
++
+ bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ if (unlikely(!bio))
+@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio
+ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
+ gfp_t gfp_mask)
+ {
+- struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
++ struct bio_map_data *bmd;
++
++ if (iov_count > UIO_MAXIOV)
++ return NULL;
+
++ bmd = kmalloc(sizeof(*bmd), gfp_mask);
+ if (!bmd)
+ return NULL;
+
--- /dev/null
+From 9f864c80913467312c7b8690e41fb5ebd1b50e92 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <jaxboe@fusionio.com>
+Date: Fri, 29 Oct 2010 11:31:42 -0600
+Subject: block: take care not to overflow when calculating total iov length
+
+From: Jens Axboe <jaxboe@fusionio.com>
+
+commit 9f864c80913467312c7b8690e41fb5ebd1b50e92 upstream.
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ block/scsi_ioctl.c | 34 ++++++++++++++++++++++++----------
+ 1 file changed, 24 insertions(+), 10 deletions(-)
+
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q
+ if (hdr->iovec_count) {
+ const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+ size_t iov_data_len;
+- struct sg_iovec *iov;
++ struct sg_iovec *sg_iov;
++ struct iovec *iov;
++ int i;
+
+- iov = kmalloc(size, GFP_KERNEL);
+- if (!iov) {
++ sg_iov = kmalloc(size, GFP_KERNEL);
++ if (!sg_iov) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- if (copy_from_user(iov, hdr->dxferp, size)) {
+- kfree(iov);
++ if (copy_from_user(sg_iov, hdr->dxferp, size)) {
++ kfree(sg_iov);
+ ret = -EFAULT;
+ goto out;
+ }
+
++ /*
++ * Sum up the vecs, making sure they don't overflow
++ */
++ iov = (struct iovec *) sg_iov;
++ iov_data_len = 0;
++ for (i = 0; i < hdr->iovec_count; i++) {
++ if (iov_data_len + iov[i].iov_len < iov_data_len) {
++ kfree(sg_iov);
++ ret = -EINVAL;
++ goto out;
++ }
++ iov_data_len += iov[i].iov_len;
++ }
++
+ /* SG_IO howto says that the shorter of the two wins */
+- iov_data_len = iov_length((struct iovec *)iov,
+- hdr->iovec_count);
+ if (hdr->dxfer_len < iov_data_len) {
+- hdr->iovec_count = iov_shorten((struct iovec *)iov,
++ hdr->iovec_count = iov_shorten(iov,
+ hdr->iovec_count,
+ hdr->dxfer_len);
+ iov_data_len = hdr->dxfer_len;
+ }
+
+- ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
++ ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
+ iov_data_len, GFP_KERNEL);
+- kfree(iov);
++ kfree(sg_iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL);
--- /dev/null
+From a0ae5864d42b41c411368bd689462bf063c029c8 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexdeucher@gmail.com>
+Date: Tue, 2 Nov 2010 05:26:48 +0000
+Subject: drm/radeon/kms: don't disable shared encoders on pre-DCE3 display blocks
+
+From: Alex Deucher <alexdeucher@gmail.com>
+
+commit a0ae5864d42b41c411368bd689462bf063c029c8 upstream.
+
+The A/B links aren't independantly useable on these blocks so when
+we disable the encoders, make sure to only disable the encoder when
+there is no connector using it.
+
+Should fix:
+https://bugs.freedesktop.org/show_bug.cgi?id=18564
+
+Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/radeon/radeon_encoders.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -1547,6 +1547,23 @@ static void radeon_atom_encoder_disable(
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
++
++ /* check for pre-DCE3 cards with shared encoders;
++ * can't really use the links individually, so don't disable
++ * the encoder if it's in use by another connector
++ */
++ if (!ASIC_IS_DCE3(rdev)) {
++ struct drm_encoder *other_encoder;
++ struct radeon_encoder *other_radeon_encoder;
++
++ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
++ other_radeon_encoder = to_radeon_encoder(other_encoder);
++ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
++ drm_helper_encoder_in_use(other_encoder))
++ goto disable_done;
++ }
++ }
++
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_encoder->encoder_id) {
+@@ -1586,6 +1603,7 @@ static void radeon_atom_encoder_disable(
+ break;
+ }
+
++disable_done:
+ if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
--- /dev/null
+From 37f9fc452d138dfc4da2ee1ce5ae85094efc3606 Mon Sep 17 00:00:00 2001
+From: Samuel Ortiz <samuel@sortiz.org>
+Date: Wed, 6 Oct 2010 01:03:12 +0200
+Subject: irda: Fix heap memory corruption in iriap.c
+
+From: Samuel Ortiz <samuel@sortiz.org>
+
+commit 37f9fc452d138dfc4da2ee1ce5ae85094efc3606 upstream.
+
+While parsing the GetValuebyClass command frame, we could potentially write
+passed the skb->data pointer.
+
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Signed-off-by: Samuel Ortiz <samuel@sortiz.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/irda/iriap.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/irda/iriap.c
++++ b/net/irda/iriap.c
+@@ -502,7 +502,8 @@ static void iriap_getvaluebyclass_confir
+ IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
+
+ /* Make sure the string is null-terminated */
+- fp[n+value_len] = 0x00;
++ if (n + value_len < skb->len)
++ fp[n + value_len] = 0x00;
+ IRDA_DEBUG(4, "Got string %s\n", fp+n);
+
+ /* Will truncate to IAS_MAX_STRING bytes */
--- /dev/null
+From efc463eb508798da4243625b08c7396462cabf9f Mon Sep 17 00:00:00 2001
+From: Samuel Ortiz <samuel@sortiz.org>
+Date: Mon, 11 Oct 2010 01:17:56 +0200
+Subject: irda: Fix parameter extraction stack overflow
+
+From: Samuel Ortiz <samuel@sortiz.org>
+
+commit efc463eb508798da4243625b08c7396462cabf9f upstream.
+
+Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
+Signed-off-by: Samuel Ortiz <samuel@sortiz.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/irda/parameters.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/irda/parameters.c
++++ b/net/irda/parameters.c
+@@ -298,6 +298,8 @@ static int irda_extract_string(void *sel
+
+ p.pi = pi; /* In case handler needs to know */
+ p.pl = buf[1]; /* Extract length of value */
++ if (p.pl > 32)
++ p.pl = 32;
+
+ IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__,
+ p.pi, p.pl);
+@@ -318,7 +320,7 @@ static int irda_extract_string(void *sel
+ (__u8) str[0], (__u8) str[1]);
+
+ /* Null terminate string */
+- str[p.pl+1] = '\0';
++ str[p.pl] = '\0';
+
+ p.pv.c = str; /* Handler will need to take a copy */
+
--- /dev/null
+From c8a8684d5cfb0f110a962c93586630c0bf91ebc1 Mon Sep 17 00:00:00 2001
+From: Guo-Fu Tseng <cooldavid@cooldavid.org>
+Date: Mon, 18 Oct 2010 14:10:40 +0000
+Subject: jme: Fix PHY power-off error
+
+From: Guo-Fu Tseng <cooldavid@cooldavid.org>
+
+commit c8a8684d5cfb0f110a962c93586630c0bf91ebc1 upstream.
+
+Adding phy_on in opposition to phy_off.
+
+Signed-off-by: Guo-Fu Tseng <cooldavid@cooldavid.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/jme.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/jme.c
++++ b/drivers/net/jme.c
+@@ -1575,6 +1575,16 @@ jme_free_irq(struct jme_adapter *jme)
+ }
+ }
+
++static inline void
++jme_phy_on(struct jme_adapter *jme)
++{
++ u32 bmcr;
++
++ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
++ bmcr &= ~BMCR_PDOWN;
++ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
++}
++
+ static int
+ jme_open(struct net_device *netdev)
+ {
+@@ -1595,10 +1605,12 @@ jme_open(struct net_device *netdev)
+
+ jme_start_irq(jme);
+
+- if (test_bit(JME_FLAG_SSET, &jme->flags))
++ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
++ jme_phy_on(jme);
+ jme_set_settings(netdev, &jme->old_ecmd);
+- else
++ } else {
+ jme_reset_phy_processor(jme);
++ }
+
+ jme_reset_link(jme);
+
+@@ -3006,10 +3018,12 @@ jme_resume(struct pci_dev *pdev)
+ jme_clear_pm(jme);
+ pci_restore_state(pdev);
+
+- if (test_bit(JME_FLAG_SSET, &jme->flags))
++ if (test_bit(JME_FLAG_SSET, &jme->flags)) {
++ jme_phy_on(jme);
+ jme_set_settings(netdev, &jme->old_ecmd);
+- else
++ } else {
+ jme_reset_phy_processor(jme);
++ }
+
+ jme_start_irq(jme);
+ netif_device_attach(netdev);