--- /dev/null
+From 49d45a31b71d7d9da74485922bdb63faf3dc9684 Mon Sep 17 00:00:00 2001
+From: Rafał Miłecki <zajec5@gmail.com>
+Date: Sat, 7 Dec 2013 13:22:42 +0100
+Subject: drm/edid: add quirk for BPC in Samsung NP700G7A-S01PL notebook
+
+From: Rafał Miłecki <zajec5@gmail.com>
+
+commit 49d45a31b71d7d9da74485922bdb63faf3dc9684 upstream.
+
+This bug in EDID was exposed by:
+
+commit eccea7920cfb009c2fa40e9ecdce8c36f61cab66
+Author: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon Mar 26 15:12:54 2012 -0400
+
+ drm/radeon/kms: improve bpc handling (v2)
+
+Which resulted in kind of regression in 3.5. This fixes
+https://bugs.freedesktop.org/show_bug.cgi?id=70934
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_edid.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -68,6 +68,8 @@
+ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+ /* Force reduced-blanking timings for detailed modes */
+ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
++/* Force 8bpc */
++#define EDID_QUIRK_FORCE_8BPC (1 << 8)
+
+ struct detailed_mode_closure {
+ struct drm_connector *connector;
+@@ -128,6 +130,9 @@ static struct edid_quirk {
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
++
++ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
++ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+ };
+
+ /*
+@@ -3236,6 +3241,9 @@ int drm_add_edid_modes(struct drm_connec
+
+ drm_add_display_info(edid, &connector->display_info);
+
++ if (quirks & EDID_QUIRK_FORCE_8BPC)
++ connector->display_info.bpc = 8;
++
+ return num_modes;
+ }
+ EXPORT_SYMBOL(drm_add_edid_modes);
--- /dev/null
+From 6c719faca2aceca72f1bf5b1645c1734ed3e9447 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 10 Dec 2013 13:20:59 +0100
+Subject: drm/i915: don't update the dri1 breadcrumb with modesetting
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 6c719faca2aceca72f1bf5b1645c1734ed3e9447 upstream.
+
+The update is horribly racy since it doesn't protect at all against
+concurrent closing of the master fd. And it can't really since that
+requires us to grab a mutex.
+
+Instead of jumping through hoops and offloading this to a worker
+thread just block this bit of code for the modesetting driver.
+
+Note that the race is fairly easy to hit since we call the breadcrumb
+function for any interrupt. So the vblank interrupt (which usually
+keeps going for a bit) is enough. But even if we'd block this and only
+update the breadcrumb for user interrupts from the CS we could hit
+this race with kms/gem userspace: If a non-master is waiting somewhere
+(and hence has interrupts enabled) and the master closes its fd
+(probably due to crashing).
+
+v2: Add a code comment to explain why fixing this for real isn't
+really worth it. Also improve the commit message a bit.
+
+v3: Fix the spelling in the comment.
+
+Reported-by: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+Cc: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Tested-by: Eugene Shatokhin <eugene.shatokhin@rosalab.ru>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_dma.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
++ /*
++ * The dri breadcrumb update races against the drm master disappearing.
++ * Instead of trying to fix this (this is by far not the only ums issue)
++ * just don't do the update in kms mode.
++ */
++ if (drm_core_check_feature(dev, DRIVER_MODESET))
++ return;
++
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
--- /dev/null
+From acc240d41ea1ab9c488a79219fb313b5b46265ae Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Thu, 5 Dec 2013 15:42:34 +0100
+Subject: drm/i915: Fix use-after-free in do_switch
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit acc240d41ea1ab9c488a79219fb313b5b46265ae upstream.
+
+So apparently under ridiculous amounts of memory pressure we can get
+into trouble in do_switch when we try to move the old hw context
+backing storage object onto the active lists.
+
+With list debugging enabled that usually results in us chasing a
+poisoned pointer - which means we've hit upon a vma that has been
+removed from all lrus with list_del (and then deallocated, so it's a
+real use-after free).
+
+Ian Lister has done some great callchain chasing and noticed that we
+can reenter do_switch:
+
+i915_gem_do_execbuffer()
+
+i915_switch_context()
+
+do_switch()
+ from = ring->last_context;
+ i915_gem_object_pin()
+
+ i915_gem_object_bind_to_gtt()
+ ret = drm_mm_insert_node_in_range_generic();
+ // If the above call fails then it will try i915_gem_evict_something()
+ // If that fails it will call i915_gem_evict_everything() ...
+ i915_gem_evict_everything()
+ i915_gpu_idle()
+ i915_switch_context(DEFAULT_CONTEXT)
+
+Like with everything else where the shrinker or eviction code can
+invalidate pointers we need to reload relevant state.
+
+Note that there's no need to recheck whether a context switch is still
+required because:
+
+- Doing a switch to the same context is harmless (besides wasting a
+ bit of energy).
+
+- This can only happen with the default context. But since that one's
+ pinned we'll never call down into evict_everything under normal
+ circumstances. Note that there's a little driver bringup fun
+ involved namely that we could recourse into do_switch for the
+ initial switch. Atm we're fine since we assign the context pointer
+ only after the call to do_switch at driver load or resume time. And
+ in the gpu reset case we skip the entire setup sequence (which might
+ be a bug on its own, but definitely not this one here).
+
+Cc'ing stable since apparently ChromeOS guys are seeing this in the
+wild (and not just on artificial stress tests), see the reference.
+
+Note that in upstream code doesn't calle evict_everything directly
+from evict_something, that's an extension in this product branch. But
+we can still hit upon this bug (and apparently we do, see the linked
+backtraces). I've noticed this while trying to construct a testcase
+for this bug and utterly failed to provoke it. It looks like we need
+to driver the system squarly into the lowmem wall and provoke the
+shrinker to evict the context object by doing the last-ditch
+evict_everything call.
+
+Aside: There's currently no means to get a badly-fragmenting hw
+context object away from a bad spot in the upstream code. We should
+fix this by at least adding some code to evict_something to handle hw
+contexts.
+
+References: https://code.google.com/p/chromium/issues/detail?id=248191
+Reported-by: Ian Lister <ian.lister@intel.com>
+Cc: Ian Lister <ian.lister@intel.com>
+Cc: Ben Widawsky <benjamin.widawsky@intel.com>
+Cc: Stéphane Marchesin <marcheu@chromium.org>
+Cc: Bloomfield, Jon <jon.bloomfield@intel.com>
+Tested-by: Rafael Barbalho <rafael.barbalho@intel.com>
+Reviewed-by: Ian Lister <ian.lister@intel.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_context.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -402,11 +402,21 @@ static int do_switch(struct i915_hw_cont
+ if (ret)
+ return ret;
+
+- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
++ /*
++ * Pin can switch back to the default context if we end up calling into
++ * evict_everything - as a last ditch gtt defrag effort that also
++ * switches to the default context. Hence we need to reload from here.
++ */
++ from = ring->last_context;
++
++ /*
++ * Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+- * XXX: We need a real interface to do this instead of trickery. */
++ *
++ * XXX: We need a real interface to do this instead of trickery.
++ */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
--- /dev/null
+From 0d1430a3f4b7cfd8779b78740a4182321f3ca7f3 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 4 Dec 2013 14:52:06 +0000
+Subject: drm/i915: Hold mutex across i915_gem_release
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 0d1430a3f4b7cfd8779b78740a4182321f3ca7f3 upstream.
+
+Inorder to serialise the closing of the file descriptor and its
+subsequent release of client requests with i915_gem_free_request(), we
+need to hold the struct_mutex in i915_gem_release(). Failing to do so
+has the potential to trigger an OOPS, later with a use-after-free.
+
+Testcase: igt/gem_close_race
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=70874
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=71029
+Reported-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_dma.c | 2 ++
+ drivers/gpu/drm/i915/i915_gem_context.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_de
+
+ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ {
++ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_close(dev, file_priv);
+ i915_gem_release(dev, file_priv);
++ mutex_unlock(&dev->struct_mutex);
+ }
+
+ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -328,10 +328,8 @@ void i915_gem_context_close(struct drm_d
+ {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+- mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+- mutex_unlock(&dev->struct_mutex);
+ }
+
+ static struct i915_hw_context *
--- /dev/null
+From 027476642811f8559cbe00ef6cc54db230e48a20 Mon Sep 17 00:00:00 2001
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Date: Mon, 2 Dec 2013 11:08:06 +0200
+Subject: drm/i915: Take modeset locks around intel_modeset_setup_hw_state()
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 027476642811f8559cbe00ef6cc54db230e48a20 upstream.
+
+Some lower level things get angry if we don't have modeset locks
+during intel_modeset_setup_hw_state(). Actually the resume and
+lid_notify codepaths alreday hold the locks, but the init codepath
+doesn't, so fix that.
+
+Note: This slipped through since we only disable pipes if the
+plane/pipe linking doesn't match. Which is only relevant on older
+gen3 mobile machines, if the BIOS fails to set up our preferred
+linking.
+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Tested-and-reported-by: Paul Bolle <pebolle@tiscali.nl>
+[danvet: Add note now that I could confirm my theory with the log
+files Paul Bolle provided.]
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_display.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10592,7 +10592,9 @@ void intel_modeset_gem_init(struct drm_d
+
+ intel_setup_overlay(dev);
+
++ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, false);
++ drm_modeset_unlock_all(dev);
+ }
+
+ void intel_modeset_cleanup(struct drm_device *dev)
--- /dev/null
+From 227ae10f17a5f2fd1307b7e582b603ef7bbb7e97 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 11 Dec 2013 11:43:58 -0500
+Subject: drm/radeon: add missing display tiling setup for oland
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 227ae10f17a5f2fd1307b7e582b603ef7bbb7e97 upstream.
+
+Fixes improperly set up display params for 2D tiling on
+oland.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/atombios_crtc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1196,7 +1196,9 @@ static int dce4_crtc_do_set_base(struct
+ } else if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+- else if (rdev->family == CHIP_VERDE)
++ else if ((rdev->family == CHIP_VERDE) ||
++ (rdev->family == CHIP_OLAND) ||
++ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+ switch (radeon_crtc->crtc_id) {
--- /dev/null
+From 8333f0fe133be420ce3fcddfd568c3a559ab274e Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 2 Dec 2013 18:15:51 -0500
+Subject: drm/radeon: Fix sideport problems on certain RS690 boards
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 8333f0fe133be420ce3fcddfd568c3a559ab274e upstream.
+
+Some RS690 boards with 64MB of sideport memory show up as
+having 128MB sideport + 256MB of UMA. In this case,
+just skip the sideport memory and use UMA. This fixes
+rendering corruption and should improve performance.
+
+bug:
+https://bugs.freedesktop.org/show_bug.cgi?id=35457
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/rs690.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++ /* Some boards seem to be configured for 128MB of sideport memory,
++ * but really only have 64MB. Just skip the sideport and use
++ * UMA memory.
++ */
++ if (rdev->mc.igp_sideport_enabled &&
++ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
++ base += 128 * 1024 * 1024;
++ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
++ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
++ }
+
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
--- /dev/null
+From 1b3abef830db98c11d7f916a483abaf2501f3323 Mon Sep 17 00:00:00 2001
+From: Christian König <christian.koenig@amd.com>
+Date: Tue, 10 Dec 2013 17:57:37 +0100
+Subject: drm/radeon: fix typo in cik_copy_dma
+
+From: Christian König <christian.koenig@amd.com>
+
+commit 1b3abef830db98c11d7f916a483abaf2501f3323 upstream.
+
+Otherwise we end up with a rather strange looking result.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Tested-by: Tom Stellard <thomas.stellard@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/cik_sdma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -468,7 +468,7 @@ int cik_copy_dma(struct radeon_device *r
+ radeon_ring_write(ring, 0); /* src/dst endian swap */
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
+- radeon_ring_write(ring, dst_offset & 0xfffffffc);
++ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
--- /dev/null
+From d386735588c3e22129c2bc6eb64fc1d37a8f805c Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Sun, 8 Dec 2013 23:23:57 -0800
+Subject: drm/ttm: Fix accesses through vmas with only partial coverage
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit d386735588c3e22129c2bc6eb64fc1d37a8f805c upstream.
+
+VMAs covering a bo but that didn't start at the same address space offset as
+the bo they were mapping were incorrectly generating SEGFAULT errors in
+the fault handler.
+
+Reported-by: Joseph Dolinak <kanilo2@yahoo.com>
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -116,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_are
+ }
+
+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+- page_last = vma_pages(vma) +
+- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
++ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
++ page_last = vma_pages(vma) + vma->vm_pgoff -
++ drm_vma_node_start(&bo->vma_node);
+
+ if (unlikely(page_offset >= bo->num_pages)) {
+ retval = VM_FAULT_SIGBUS;
--- /dev/null
+From ce027ed98fd176710fb14be9d6015697b62436f0 Mon Sep 17 00:00:00 2001
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Date: Sun, 15 Dec 2013 16:18:01 +0100
+Subject: firewire: sbp2: bring back WRITE SAME support
+
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+commit ce027ed98fd176710fb14be9d6015697b62436f0 upstream.
+
+Commit 54b2b50c20a6 "[SCSI] Disable WRITE SAME for RAID and virtual
+host adapter drivers" disabled WRITE SAME support for all SBP-2 attached
+targets. But as described in the changelog of commit b0ea5f19d3d8
+"firewire: sbp2: allow WRITE SAME and REPORT SUPPORTED OPERATION CODES",
+it is not required to blacklist WRITE SAME.
+
+Bring the feature back by reverting the sbp2.c hunk of commit 54b2b50c20a6.
+
+Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firewire/sbp2.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/firewire/sbp2.c
++++ b/drivers/firewire/sbp2.c
+@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_dr
+ .cmd_per_lun = 1,
+ .can_queue = 1,
+ .sdev_attrs = sbp2_scsi_sysfs_attrs,
+- .no_write_same = 1,
+ };
+
+ MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
--- /dev/null
+From 77873803363c9e831fc1d1e6895c084279090c22 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 17 Dec 2013 10:09:32 -0800
+Subject: net_dma: mark broken
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 77873803363c9e831fc1d1e6895c084279090c22 upstream.
+
+net_dma can cause data to be copied to a stale mapping if a
+copy-on-write fault occurs during dma. The application sees missing
+data.
+
+The following trace is triggered by modifying the kernel to WARN if it
+ever triggers copy-on-write on a page that is undergoing dma:
+
+ WARNING: CPU: 24 PID: 2529 at lib/dma-debug.c:485 debug_dma_assert_idle+0xd2/0x120()
+ ioatdma 0000:00:04.0: DMA-API: cpu touching an active dma mapped page [pfn=0x16bcd9]
+ Modules linked in: iTCO_wdt iTCO_vendor_support ioatdma lpc_ich pcspkr dca
+ CPU: 24 PID: 2529 Comm: linbug Tainted: G W 3.13.0-rc1+ #353
+ 00000000000001e5 ffff88016f45f688 ffffffff81751041 ffff88017ab0ef70
+ ffff88016f45f6d8 ffff88016f45f6c8 ffffffff8104ed9c ffffffff810f3646
+ ffff8801768f4840 0000000000000282 ffff88016f6cca10 00007fa2bb699349
+ Call Trace:
+ [<ffffffff81751041>] dump_stack+0x46/0x58
+ [<ffffffff8104ed9c>] warn_slowpath_common+0x8c/0xc0
+ [<ffffffff810f3646>] ? ftrace_pid_func+0x26/0x30
+ [<ffffffff8104ee86>] warn_slowpath_fmt+0x46/0x50
+ [<ffffffff8139c062>] debug_dma_assert_idle+0xd2/0x120
+ [<ffffffff81154a40>] do_wp_page+0xd0/0x790
+ [<ffffffff811582ac>] handle_mm_fault+0x51c/0xde0
+ [<ffffffff813830b9>] ? copy_user_enhanced_fast_string+0x9/0x20
+ [<ffffffff8175fc2c>] __do_page_fault+0x19c/0x530
+ [<ffffffff8175c196>] ? _raw_spin_lock_bh+0x16/0x40
+ [<ffffffff810f3539>] ? trace_clock_local+0x9/0x10
+ [<ffffffff810fa1f4>] ? rb_reserve_next_event+0x64/0x310
+ [<ffffffffa0014c00>] ? ioat2_dma_prep_memcpy_lock+0x60/0x130 [ioatdma]
+ [<ffffffff8175ffce>] do_page_fault+0xe/0x10
+ [<ffffffff8175c862>] page_fault+0x22/0x30
+ [<ffffffff81643991>] ? __kfree_skb+0x51/0xd0
+ [<ffffffff813830b9>] ? copy_user_enhanced_fast_string+0x9/0x20
+ [<ffffffff81388ea2>] ? memcpy_toiovec+0x52/0xa0
+ [<ffffffff8164770f>] skb_copy_datagram_iovec+0x5f/0x2a0
+ [<ffffffff8169d0f4>] tcp_rcv_established+0x674/0x7f0
+ [<ffffffff816a68c5>] tcp_v4_do_rcv+0x2e5/0x4a0
+ [..]
+ ---[ end trace e30e3b01191b7617 ]---
+ Mapped at:
+ [<ffffffff8139c169>] debug_dma_map_page+0xb9/0x160
+ [<ffffffff8142bf47>] dma_async_memcpy_pg_to_pg+0x127/0x210
+ [<ffffffff8142cce9>] dma_memcpy_pg_to_iovec+0x119/0x1f0
+ [<ffffffff81669d3c>] dma_skb_copy_datagram_iovec+0x11c/0x2b0
+ [<ffffffff8169d1ca>] tcp_rcv_established+0x74a/0x7f0:
+
+...the problem is that the receive path falls back to cpu-copy in
+several locations and this trace is just one of the areas. A few
+options were considered to fix this:
+
+1/ sync all dma whenever a cpu copy branch is taken
+
+2/ modify the page fault handler to hold off while dma is in-flight
+
+Option 1 adds yet more cpu overhead to an "offload" that struggles to compete
+with cpu-copy. Option 2 adds checks for behavior that is already documented as
+broken when using get_user_pages(). At a minimum a debug mode is warranted to
+catch and flag these violations of the dma-api vs get_user_pages().
+
+Thanks to David for his reproducer.
+
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Vinod Koul <vinod.koul@intel.com>
+Cc: Alexander Duyck <alexander.h.duyck@intel.com>
+Reported-by: David Whipple <whipple@securedatainnovations.ch>
+Acked-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -339,6 +339,7 @@ config NET_DMA
+ bool "Network: TCP receive copy offload"
+ depends on DMA_ENGINE && NET
+ default (INTEL_IOATDMA || FSL_DMA)
++ depends on BROKEN
+ help
+ This enables the use of DMA engines in the network stack to
+ offload receive copy-to-user operations, freeing CPU cycles.
--- /dev/null
+From 3c67f474558748b604e247d92b55dfe89654c81d Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 18 Dec 2013 17:08:40 -0800
+Subject: sched: numa: skip inaccessible VMAs
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 3c67f474558748b604e247d92b55dfe89654c81d upstream.
+
+Inaccessible VMA should not be trapping NUMA hint faults. Skip them.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Alex Thorlton <athorlton@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -974,6 +974,13 @@ void task_numa_work(struct callback_head
+ if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+ continue;
+
++ /*
++ * Skip inaccessible VMAs to avoid any confusion between
++ * PROT_NONE and NUMA hinting ptes
++ */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++ continue;
++
+ do {
+ start = max(start, vma->vm_start);
+ end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
--- /dev/null
+From 757dfcaa41844595964f1220f1d33182dae49976 Mon Sep 17 00:00:00 2001
+From: Kirill Tkhai <tkhai@yandex.ru>
+Date: Wed, 27 Nov 2013 19:59:13 +0400
+Subject: sched/rt: Fix rq's cpupri leak while enqueue/dequeue child RT entities
+
+From: Kirill Tkhai <tkhai@yandex.ru>
+
+commit 757dfcaa41844595964f1220f1d33182dae49976 upstream.
+
+This patch touches the RT group scheduling case.
+
+Functions inc_rt_prio_smp() and dec_rt_prio_smp() change (global) rq's
+priority, while rt_rq passed to them may be not the top-level rt_rq.
+This is wrong, because changing of priority on a child level does not
+guarantee that the priority is the highest all over the rq. So, this
+leak makes RT balancing unusable.
+
+The short example: the task having the highest priority among all rq's
+RT tasks (no one other task has the same priority) are waking on a
+throttle rt_rq. The rq's cpupri is set to the task's priority
+equivalent, but real rq->rt.highest_prio.curr is less.
+
+The patch below fixes the problem.
+
+Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+CC: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/49231385567953@web4m.yandex.ru
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -899,6 +899,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && prio < prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
+ }
+@@ -908,6 +915,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
+ }
ext4-fix-deadlock-when-writing-in-enospc-conditions.patch
ext4-add-explicit-casts-when-masking-cluster-sizes.patch
ext4-fix-fitrim-in-no-journal-mode.patch
+sched-numa-skip-inaccessible-vmas.patch
+sched-rt-fix-rq-s-cpupri-leak-while-enqueue-dequeue-child-rt-entities.patch
+firewire-sbp2-bring-back-write-same-support.patch
+net_dma-mark-broken.patch
+drm-edid-add-quirk-for-bpc-in-samsung-np700g7a-s01pl-notebook.patch
+drm-ttm-fix-accesses-through-vmas-with-only-partial-coverage.patch
+drm-radeon-fix-sideport-problems-on-certain-rs690-boards.patch
+drm-radeon-fix-typo-in-cik_copy_dma.patch
+drm-radeon-add-missing-display-tiling-setup-for-oland.patch
+drm-i915-take-modeset-locks-around-intel_modeset_setup_hw_state.patch
+drm-i915-hold-mutex-across-i915_gem_release.patch
+drm-i915-fix-use-after-free-in-do_switch.patch
+drm-i915-don-t-update-the-dri1-breadcrumb-with-modesetting.patch