--- /dev/null
+From 084457f284abf6789d90509ee11dae383842b23b Mon Sep 17 00:00:00 2001
+From: Li Zefan <lizefan@huawei.com>
+Date: Tue, 18 Jun 2013 18:40:19 +0800
+Subject: cgroup: fix umount vs cgroup_cfts_commit() race
+
+From: Li Zefan <lizefan@huawei.com>
+
+commit 084457f284abf6789d90509ee11dae383842b23b upstream.
+
+cgroup_cfts_commit() uses dget() to keep cgroup alive after cgroup_mutex
+is dropped, but dget() won't prevent cgroupfs from being umounted. When
+the race happens, vfs will see some dentries with non-zero refcnt while
+umount is in process.
+
+Keep running this:
+ mount -t cgroup -o blkio xxx /cgroup
+ umount /cgroup
+
+And this:
+ modprobe cfq-iosched
+ rmmod cfs-iosched
+
+After a while, the BUG() in shrink_dcache_for_umount_subtree() may
+be triggered:
+
+ BUG: Dentry xxx{i=0,n=blkio.yyy} still in use (1) [umount of cgroup cgroup]
+
+Signed-off-by: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2769,13 +2769,17 @@ static void cgroup_cfts_commit(struct cg
+ {
+ LIST_HEAD(pending);
+ struct cgroup *cgrp, *n;
++ struct super_block *sb = ss->root->sb;
+
+ /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
+- if (cfts && ss->root != &rootnode) {
++ if (cfts && ss->root != &rootnode &&
++ atomic_inc_not_zero(sb->s_active)) {
+ list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
+ dget(cgrp->dentry);
+ list_add_tail(&cgrp->cft_q_node, &pending);
+ }
++ } else {
++ sb = NULL;
+ }
+
+ mutex_unlock(&cgroup_mutex);
+@@ -2798,6 +2802,9 @@ static void cgroup_cfts_commit(struct cg
+ dput(cgrp->dentry);
+ }
+
++ if (sb)
++ deactivate_super(sb);
++
+ mutex_unlock(&cgroup_cft_mutex);
+ }
+
--- /dev/null
+From bcf53de4e60d9000b82f541d654529e2902a4c2c Mon Sep 17 00:00:00 2001
+From: Stéphane Marchesin <marcheu@chromium.org>
+Date: Fri, 12 Jul 2013 13:54:41 -0700
+Subject: drm/i915: Preserve the DDI_A_4_LANES bit from the bios
+
+From: Stéphane Marchesin <marcheu@chromium.org>
+
+commit bcf53de4e60d9000b82f541d654529e2902a4c2c upstream.
+
+Otherwise the DDI_A_4_LANES bit gets lost and we can't use > 2 lanes
+on eDP. This fixes eDP on hsw with > 2 lanes.
+
+Also s/port_reversal/saved_port_bits/ since the current name is
+confusing.
+
+Signed-off-by: Stéphane Marchesin <marcheu@chromium.org>
+Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Zhouping Liu <zliu@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_ddi.c | 10 ++++++----
+ drivers/gpu/drm/i915/intel_drv.h | 2 +-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -684,7 +684,7 @@ static void intel_ddi_mode_set(struct dr
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+- intel_dp->DP = intel_dig_port->port_reversal |
++ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ switch (intel_dp->lane_count) {
+ case 1:
+@@ -1324,7 +1324,8 @@ static void intel_enable_ddi(struct inte
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+- intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
++ intel_dig_port->saved_port_bits |
++ DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+@@ -1543,8 +1544,9 @@ void intel_ddi_init(struct drm_device *d
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+ intel_dig_port->port = port;
+- intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
+- DDI_BUF_PORT_REVERSAL;
++ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
++ (DDI_BUF_PORT_REVERSAL |
++ DDI_A_4_LANES);
+ if (hdmi_connector)
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -426,7 +426,7 @@ struct intel_dp {
+ struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+- u32 port_reversal;
++ u32 saved_port_bits;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
+ };
--- /dev/null
+From de1e0c40aceb9d5bff09c3a3b97b2f1b178af53f Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 8 Jul 2013 15:59:40 -0700
+Subject: fanotify: info leak in copy_event_to_user()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit de1e0c40aceb9d5bff09c3a3b97b2f1b178af53f upstream.
+
+The ->reserved field isn't cleared so we leak one byte of stack
+information to userspace.
+
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Eric Paris <eparis@redhat.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Luis Henriques <luis.henriques@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/notify/fanotify/fanotify_user.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -122,6 +122,7 @@ static int fill_event_metadata(struct fs
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->metadata_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
++ metadata->reserved = 0;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ if (unlikely(event->mask & FAN_Q_OVERFLOW))
pci-pciehp-fix-null-pointer-deref-when-hot-removing-sr-iov-device.patch
pci-retry-allocation-of-only-the-resource-type-that-failed.patch
drm-radeon-disable-dma-rings-for-bo-moves-on-r6xx.patch
+xen-blkfront-use-a-different-scatterlist-for-each-request.patch
+drm-i915-preserve-the-ddi_a_4_lanes-bit-from-the-bios.patch
+fanotify-info-leak-in-copy_event_to_user.patch
+cgroup-fix-umount-vs-cgroup_cfts_commit-race.patch
--- /dev/null
+From b7649158a0d241f8d53d13ff7441858539e16656 Mon Sep 17 00:00:00 2001
+From: Roger Pau Monne <roger.pau@citrix.com>
+Date: Thu, 2 May 2013 10:58:50 +0200
+Subject: xen-blkfront: use a different scatterlist for each request
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+commit b7649158a0d241f8d53d13ff7441858539e16656 upstream.
+
+In blkif_queue_request blkfront iterates over the scatterlist in order
+to set the segments of the request, and in blkif_completion blkfront
+iterates over the raw request, which makes it hard to know the exact
+position of the source and destination memory positions.
+
+This can be solved by allocating a scatterlist for each request, that
+will be keep until the request is finished, allowing us to copy the
+data back to the original memory without having to iterate over the
+raw request.
+
+Oracle-Bug: 16660413 - LARGE ASYNCHRONOUS READS APPEAR BROKEN ON 2.6.39-400
+CC: stable@vger.kernel.org
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reported-and-Tested-by: Anne Milicia <anne.milicia@oracle.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/block/xen-blkfront.c | 36 +++++++++++++++++-------------------
+ 1 file changed, 17 insertions(+), 19 deletions(-)
+
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -75,6 +75,7 @@ struct blk_shadow {
+ struct blkif_request req;
+ struct request *request;
+ struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ };
+
+ static DEFINE_MUTEX(blkfront_mutex);
+@@ -98,7 +99,6 @@ struct blkfront_info
+ enum blkif_state connected;
+ int ring_ref;
+ struct blkif_front_ring ring;
+- struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ unsigned int evtchn, irq;
+ struct request_queue *rq;
+ struct work_struct work;
+@@ -422,11 +422,11 @@ static int blkif_queue_request(struct re
+ ring_req->u.discard.flag = 0;
+ } else {
+ ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
+- info->sg);
++ info->shadow[id].sg);
+ BUG_ON(ring_req->u.rw.nr_segments >
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+- for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
++ for_each_sg(info->shadow[id].sg, sg, ring_req->u.rw.nr_segments, i) {
+ fsect = sg->offset >> 9;
+ lsect = fsect + (sg->length >> 9) - 1;
+
+@@ -867,12 +867,12 @@ static void blkif_completion(struct blk_
+ struct blkif_response *bret)
+ {
+ int i = 0;
+- struct bio_vec *bvec;
+- struct req_iterator iter;
+- unsigned long flags;
++ struct scatterlist *sg;
+ char *bvec_data;
+ void *shared_data;
+- unsigned int offset = 0;
++ int nseg;
++
++ nseg = s->req.u.rw.nr_segments;
+
+ if (bret->operation == BLKIF_OP_READ) {
+ /*
+@@ -881,19 +881,16 @@ static void blkif_completion(struct blk_
+ * than PAGE_SIZE, we have to keep track of the current offset,
+ * to be sure we are copying the data from the right shared page.
+ */
+- rq_for_each_segment(bvec, s->request, iter) {
+- BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
+- if (bvec->bv_offset < offset)
+- i++;
+- BUG_ON(i >= s->req.u.rw.nr_segments);
++ for_each_sg(s->sg, sg, nseg, i) {
++ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+ shared_data = kmap_atomic(
+ pfn_to_page(s->grants_used[i]->pfn));
+- bvec_data = bvec_kmap_irq(bvec, &flags);
+- memcpy(bvec_data, shared_data + bvec->bv_offset,
+- bvec->bv_len);
+- bvec_kunmap_irq(bvec_data, &flags);
++ bvec_data = kmap_atomic(sg_page(sg));
++ memcpy(bvec_data + sg->offset,
++ shared_data + sg->offset,
++ sg->length);
++ kunmap_atomic(bvec_data);
+ kunmap_atomic(shared_data);
+- offset = bvec->bv_offset + bvec->bv_len;
+ }
+ }
+ /* Add the persistent grant into the list of free grants */
+@@ -1022,7 +1019,7 @@ static int setup_blkring(struct xenbus_d
+ struct blkfront_info *info)
+ {
+ struct blkif_sring *sring;
+- int err;
++ int err, i;
+
+ info->ring_ref = GRANT_INVALID_REF;
+
+@@ -1034,7 +1031,8 @@ static int setup_blkring(struct xenbus_d
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+- sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ /* Allocate memory for grants */
+ err = fill_grant_buffer(info, BLK_RING_SIZE *