--- /dev/null
+From cc261738add93947d138d2fabad9f4dbed4e5c00 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 16 Mar 2015 10:18:08 +0100
+Subject: ALSA: hda - Treat stereo-to-mono mix properly
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit cc261738add93947d138d2fabad9f4dbed4e5c00 upstream.
+
+The commit [ef403edb7558: ALSA: hda - Don't access stereo amps for
+mono channel widgets] fixed the handling of mono widgets in general,
+but it still misses an exceptional case: namely, a mono mixer widget
+taking a single stereo input. In this case, it has stereo volumes
+although it's a mono widget, and thus we have to take care of both
+left and right input channels, as stated in HD-audio spec ("7.1.3
+Widget Interconnection Rules").
+
+This patch covers this missing piece by adding proper checks of stereo
+amps in both the generic parser and the proc output codes.
+
+Reported-by: Raymond Yau <superquad.vortex2@gmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_generic.c | 21 +++++++++++++++++++--
+ sound/pci/hda/hda_proc.c | 38 ++++++++++++++++++++++++++++++--------
+ 2 files changed, 49 insertions(+), 10 deletions(-)
+
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -687,13 +687,30 @@ static int get_amp_val_to_activate(struc
+ return val;
+ }
+
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
++{
++ unsigned int wcaps = get_wcaps(codec, nid);
++ hda_nid_t conn;
++
++ if (wcaps & AC_WCAP_STEREO)
++ return true;
++ if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++ return false;
++ if (snd_hda_get_num_conns(codec, nid) != 1)
++ return false;
++ if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
++ return false;
++ return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
++}
++
+ /* initialize the amp value (only at the first time) */
+ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
+ {
+ unsigned int caps = query_amp_caps(codec, nid, dir);
+ int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
+
+- if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
++ if (is_stereo_amps(codec, nid, dir))
+ snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
+ else
+ snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
+@@ -703,7 +720,7 @@ static void init_amp(struct hda_codec *c
+ static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
+ unsigned int mask, unsigned int val)
+ {
+- if (get_wcaps(codec, nid) & AC_WCAP_STEREO)
++ if (is_stereo_amps(codec, nid, dir))
+ return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
+ mask, val);
+ else
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_in
+ (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
+ }
+
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
++ int dir, unsigned int wcaps, int indices)
++{
++ hda_nid_t conn;
++
++ if (wcaps & AC_WCAP_STEREO)
++ return true;
++ /* check for a stereo-to-mono mix; it must be:
++ * only a single connection, only for input, and only a mixer widget
++ */
++ if (indices != 1 || dir != HDA_INPUT ||
++ get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++ return false;
++
++ if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
++ return false;
++ /* the connection source is a stereo? */
++ wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
++ return !!(wcaps & AC_WCAP_STEREO);
++}
++
+ static void print_amp_vals(struct snd_info_buffer *buffer,
+ struct hda_codec *codec, hda_nid_t nid,
+- int dir, int stereo, int indices)
++ int dir, unsigned int wcaps, int indices)
+ {
+ unsigned int val;
++ bool stereo;
+ int i;
+
++ stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
++
+ dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
+ for (i = 0; i < indices; i++) {
+ snd_iprintf(buffer, " [");
+@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_
+ (codec->single_adc_amp &&
+ wid_type == AC_WID_AUD_IN))
+ print_amp_vals(buffer, codec, nid, HDA_INPUT,
+- wid_caps & AC_WCAP_STEREO,
+- 1);
++ wid_caps, 1);
+ else
+ print_amp_vals(buffer, codec, nid, HDA_INPUT,
+- wid_caps & AC_WCAP_STEREO,
+- conn_len);
++ wid_caps, conn_len);
+ }
+ if (wid_caps & AC_WCAP_OUT_AMP) {
+ snd_iprintf(buffer, " Amp-Out caps: ");
+@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_
+ if (wid_type == AC_WID_PIN &&
+ codec->pin_amp_workaround)
+ print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+- wid_caps & AC_WCAP_STEREO,
+- conn_len);
++ wid_caps, conn_len);
+ else
+ print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+- wid_caps & AC_WCAP_STEREO, 1);
++ wid_caps, 1);
+ }
+
+ switch (wid_type) {
--- /dev/null
+From da293700568ed3d96fcf062ac15d7d7c41377f11 Mon Sep 17 00:00:00 2001
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Wed, 4 Mar 2015 08:09:44 -0600
+Subject: bnx2x: Force fundamental reset for EEH recovery
+
+From: Brian King <brking@linux.vnet.ibm.com>
+
+commit da293700568ed3d96fcf062ac15d7d7c41377f11 upstream.
+
+EEH recovery for bnx2x based adapters is not reliable on all Power
+systems using the default hot reset, which can result in an
+unrecoverable EEH error. Forcing the use of fundamental reset
+during EEH recovery fixes this.
+
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
++ /* Set PCIe reset type to fundamental for EEH recovery */
++ pdev->needs_freset = 1;
++
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (!rc)
--- /dev/null
+From 9a6f5130143c17b91e0a3cbf5cc2d8c1e5a80a63 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 25 Feb 2015 13:45:26 +0000
+Subject: drm: Don't assign fbs for universal cursor support to files
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 9a6f5130143c17b91e0a3cbf5cc2d8c1e5a80a63 upstream.
+
+The internal framebuffers we create to remap legacy cursor ioctls to
+plane operations for the universal plane support shouldn't be linke to
+the file like normal userspace framebuffers. This bug goes back to the
+original universal cursor plane support introduced in
+
+commit 161d0dc1dccb17ff7a38f462c7c0d4ef8bcc5662
+Author: Matt Roper <matthew.d.roper@intel.com>
+Date: Tue Jun 10 08:28:10 2014 -0700
+
+ drm: Support legacy cursor ioctls via universal planes when possible (v4)
+
+The isn't too disastrous since fbs are small, we only create one when the
+cursor bo gets changed and ultimately they'll be reaped when the window
+server restarts.
+
+Conceptually we'd want to just pass NULL for file_priv when creating it,
+but the driver needs the file to lookup the underlying buffer object for
+cursor id. Instead let's move the file_priv linking out of
+add_framebuffer_internal() into the addfb ioctl implementation, which is
+the only place it is needed. And also rename the function for a more
+accurate since it only creates the fb, but doesn't add it anywhere.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> (fix & commit msg)
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (provider of lipstick)
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Cc: Rob Clark <robdclark@gmail.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_crtc.c | 35 +++++++++++++++++++----------------
+ 1 file changed, 19 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -42,9 +42,10 @@
+ #include "drm_crtc_internal.h"
+ #include "drm_internal.h"
+
+-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+- struct drm_mode_fb_cmd2 *r,
+- struct drm_file *file_priv);
++static struct drm_framebuffer *
++internal_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd2 *r,
++ struct drm_file *file_priv);
+
+ /* Avoid boilerplate. I'm tired of typing. */
+ #define DRM_ENUM_NAME_FN(fnname, list) \
+@@ -2817,13 +2818,11 @@ static int drm_mode_cursor_universal(str
+ */
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (req->handle) {
+- fb = add_framebuffer_internal(dev, &fbreq, file_priv);
++ fb = internal_framebuffer_create(dev, &fbreq, file_priv);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ return PTR_ERR(fb);
+ }
+-
+- drm_framebuffer_reference(fb);
+ } else {
+ fb = NULL;
+ }
+@@ -3175,9 +3174,10 @@ static int framebuffer_check(const struc
+ return 0;
+ }
+
+-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+- struct drm_mode_fb_cmd2 *r,
+- struct drm_file *file_priv)
++static struct drm_framebuffer *
++internal_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd2 *r,
++ struct drm_file *file_priv)
+ {
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+@@ -3209,12 +3209,6 @@ static struct drm_framebuffer *add_frame
+ return fb;
+ }
+
+- mutex_lock(&file_priv->fbs_lock);
+- r->fb_id = fb->base.id;
+- list_add(&fb->filp_head, &file_priv->fbs);
+- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+- mutex_unlock(&file_priv->fbs_lock);
+-
+ return fb;
+ }
+
+@@ -3236,15 +3230,24 @@ static struct drm_framebuffer *add_frame
+ int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+ {
++ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- fb = add_framebuffer_internal(dev, data, file_priv);
++ fb = internal_framebuffer_create(dev, r, file_priv);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
++ /* Transfer ownership to the filp for reaping on close */
++
++ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
++ mutex_lock(&file_priv->fbs_lock);
++ r->fb_id = fb->base.id;
++ list_add(&fb->filp_head, &file_priv->fbs);
++ mutex_unlock(&file_priv->fbs_lock);
++
+ return 0;
+ }
+
--- /dev/null
+From 888d0d421663313739a8bf93459c6ba61fd4b121 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Thu, 8 Jan 2015 17:54:13 +0200
+Subject: drm/i915: add dev_to_i915 helper
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 888d0d421663313739a8bf93459c6ba61fd4b121 upstream.
+
+This will be needed by later patches, so factor it out.
+
+No functional change.
+
+v2:
+- s/dev_to_i915_priv/dev_to_i915/ (Jani)
+- don't use the helper in i915_pm_suspend (Chris)
+- simplify the helper (Chris)
+v3:
+- remove redundant upcasting in the helper (Daniel)
+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_drv.c | 9 +++------
+ drivers/gpu/drm/i915/i915_drv.h | 5 +++++
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -934,8 +934,7 @@ static int i915_pm_suspend(struct device
+
+ static int i915_pm_suspend_late(struct device *dev)
+ {
+- struct pci_dev *pdev = to_pci_dev(dev);
+- struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+ /*
+ * We have a suspedn ordering issue with the snd-hda driver also
+@@ -954,8 +953,7 @@ static int i915_pm_suspend_late(struct d
+
+ static int i915_pm_resume_early(struct device *dev)
+ {
+- struct pci_dev *pdev = to_pci_dev(dev);
+- struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+@@ -965,8 +963,7 @@ static int i915_pm_resume_early(struct d
+
+ static int i915_pm_resume(struct device *dev)
+ {
+- struct pci_dev *pdev = to_pci_dev(dev);
+- struct drm_device *drm_dev = pci_get_drvdata(pdev);
++ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1781,6 +1781,11 @@ static inline struct drm_i915_private *t
+ return dev->dev_private;
+ }
+
++static inline struct drm_i915_private *dev_to_i915(struct device *dev)
++{
++ return to_i915(dev_get_drvdata(dev));
++}
++
+ /* Iterate over initialised rings */
+ #define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
--- /dev/null
+From ab3be73fa7b43f4c3648ce29b5fd649ea54d3adb Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 2 Mar 2015 13:04:41 +0200
+Subject: drm/i915: gen4: work around hang during hibernation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit ab3be73fa7b43f4c3648ce29b5fd649ea54d3adb upstream.
+
+Bjørn reported that his machine hang during hibernation and eventually
+bisected the problem to the following commit:
+
+commit da2bc1b9db3351addd293e5b82757efe1f77ed1d
+Author: Imre Deak <imre.deak@intel.com>
+Date: Thu Oct 23 19:23:26 2014 +0300
+
+ drm/i915: add poweroff_late handler
+
+The problem seems to be that after the kernel puts the device into D3
+the BIOS still tries to access it, or otherwise assumes that it's in D0.
+This is clearly bogus, since ACPI mandates that devices are put into D3
+by the OSPM if they are not wake-up sources. In the future we want to
+unify more of the driver's runtime and system suspend paths, for example
+by skipping all the system suspend/hibernation hooks if the device is
+runtime suspended already. Accordingly for all other platforms the goal
+is still to properly power down the device during hibernation.
+
+v2:
+- Another GEN4 Lenovo laptop had the same issue, while platforms from
+ other vendors (including mobile and desktop, GEN4 and non-GEN4) seem
+ to work fine. Based on this apply the workaround on all GEN4 Lenovo
+ platforms.
+- add code comment about failing platforms (Ville)
+
+Reference: http://lists.freedesktop.org/archives/intel-gfx/2015-February/060633.html
+Reported-and-bisected-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Acked-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_drv.c | 30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_d
+ return 0;
+ }
+
+-static int i915_drm_suspend_late(struct drm_device *drm_dev)
++static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+ {
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ int ret;
+@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct
+ }
+
+ pci_disable_device(drm_dev->pdev);
+- pci_set_power_state(drm_dev->pdev, PCI_D3hot);
++ /*
++ * During hibernation on some GEN4 platforms the BIOS may try to access
++ * the device even though it's already in D3 and hang the machine. So
++ * leave the device in D0 on those platforms and hope the BIOS will
++ * power down the device properly. Platforms where this was seen:
++ * Lenovo Thinkpad X301, X61s
++ */
++ if (!(hibernation &&
++ drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
++ INTEL_INFO(dev_priv)->gen == 4))
++ pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+
+ return 0;
+ }
+@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_devic
+ if (error)
+ return error;
+
+- return i915_drm_suspend_late(dev);
++ return i915_drm_suspend_late(dev, false);
+ }
+
+ static int i915_drm_resume(struct drm_device *dev)
+@@ -948,7 +958,17 @@ static int i915_pm_suspend_late(struct d
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- return i915_drm_suspend_late(drm_dev);
++ return i915_drm_suspend_late(drm_dev, false);
++}
++
++static int i915_pm_poweroff_late(struct device *dev)
++{
++ struct drm_device *drm_dev = dev_to_i915(dev)->dev;
++
++ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++ return 0;
++
++ return i915_drm_suspend_late(drm_dev, true);
+ }
+
+ static int i915_pm_resume_early(struct device *dev)
+@@ -1514,7 +1534,7 @@ static const struct dev_pm_ops i915_pm_o
+ .thaw_early = i915_pm_resume_early,
+ .thaw = i915_pm_resume,
+ .poweroff = i915_pm_suspend,
+- .poweroff_late = i915_pm_suspend_late,
++ .poweroff_late = i915_pm_poweroff_late,
+ .restore_early = i915_pm_resume_early,
+ .restore = i915_pm_resume,
+
--- /dev/null
+From 5151adb37a5918957f4c33a8d8e7629c0fb00563 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Mon, 9 Mar 2015 01:56:21 -0700
+Subject: drm/vmwgfx: Fix a couple of lock dependency violations
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 5151adb37a5918957f4c33a8d8e7629c0fb00563 upstream.
+
+Experimental lockdep annotation added to the TTM lock has unveiled a
+couple of lock dependency violations in the vmwgfx driver. In both
+cases it turns out that the device_private::reservation_sem is not
+needed so the offending code is moved out of that lock.
+
+Acked-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 8 +++-----
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 14 +++-----------
+ 2 files changed, 6 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2780,13 +2780,11 @@ int vmw_execbuf_ioctl(struct drm_device
+ NULL, arg->command_size, arg->throttle_us,
+ (void __user *)(unsigned long)arg->fence_rep,
+ NULL);
+-
++ ttm_read_unlock(&dev_priv->reservation_sem);
+ if (unlikely(ret != 0))
+- goto out_unlock;
++ return ret;
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+
+-out_unlock:
+- ttm_read_unlock(&dev_priv->reservation_sem);
+- return ret;
++ return 0;
+ }
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct d
+ int i;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+
+- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+- if (unlikely(ret != 0))
+- return ret;
+-
+ if (!arg->num_outputs) {
+ struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ vmw_du_update_layout(dev_priv, 1, &def_rect);
+- goto out_unlock;
++ return 0;
+ }
+
+ rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
+- if (unlikely(!rects)) {
+- ret = -ENOMEM;
+- goto out_unlock;
+- }
++ if (unlikely(!rects))
++ return -ENOMEM;
+
+ user_rects = (void __user *)(unsigned long)arg->rects;
+ ret = copy_from_user(rects, user_rects, rects_size);
+@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct d
+
+ out_free:
+ kfree(rects);
+-out_unlock:
+- ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+ }
--- /dev/null
+From 3458390b9f0ba784481d23134798faee27b5f16f Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom@vmware.com>
+Date: Thu, 5 Mar 2015 02:33:24 -0800
+Subject: drm/vmwgfx: Reorder device takedown somewhat
+
+From: Thomas Hellstrom <thellstrom@vmware.com>
+
+commit 3458390b9f0ba784481d23134798faee27b5f16f upstream.
+
+To take down the MOB and GMR memory types, the driver may have to issue
+fence objects and thus make sure that the fence manager is taken down
+after those memory types.
+Reorder device init accordingly.
+
+Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
+Reviewed-by: Sinclair Yeh <syeh@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 77 ++++++++++++++++++------------------
+ 1 file changed, 40 insertions(+), 37 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_de
+ goto out_err1;
+ }
+
+- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+- (dev_priv->vram_size >> PAGE_SHIFT));
+- if (unlikely(ret != 0)) {
+- DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+- goto out_err2;
+- }
+-
+- dev_priv->has_gmr = true;
+- if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+- VMW_PL_GMR) != 0) {
+- DRM_INFO("No GMR memory available. "
+- "Graphics memory resources are very limited.\n");
+- dev_priv->has_gmr = false;
+- }
+-
+- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+- dev_priv->has_mob = true;
+- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+- VMW_PL_MOB) != 0) {
+- DRM_INFO("No MOB memory available. "
+- "3D will be disabled.\n");
+- dev_priv->has_mob = false;
+- }
+- }
+-
+ dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
+ dev_priv->mmio_size);
+
+@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_de
+ goto out_no_fman;
+ }
+
++
++ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
++ (dev_priv->vram_size >> PAGE_SHIFT));
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
++ goto out_no_vram;
++ }
++
++ dev_priv->has_gmr = true;
++ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
++ refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
++ VMW_PL_GMR) != 0) {
++ DRM_INFO("No GMR memory available. "
++ "Graphics memory resources are very limited.\n");
++ dev_priv->has_gmr = false;
++ }
++
++ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
++ dev_priv->has_mob = true;
++ if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
++ VMW_PL_MOB) != 0) {
++ DRM_INFO("No MOB memory available. "
++ "3D will be disabled.\n");
++ dev_priv->has_mob = false;
++ }
++ }
++
+ vmw_kms_save_vga(dev_priv);
+
+ /* Start kms and overlay systems, needs fifo. */
+@@ -838,6 +839,12 @@ out_no_fifo:
+ vmw_kms_close(dev_priv);
+ out_no_kms:
+ vmw_kms_restore_vga(dev_priv);
++ if (dev_priv->has_mob)
++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
++ if (dev_priv->has_gmr)
++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++out_no_vram:
+ vmw_fence_manager_takedown(dev_priv->fman);
+ out_no_fman:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+@@ -853,12 +860,6 @@ out_err4:
+ iounmap(dev_priv->mmio_virt);
+ out_err3:
+ arch_phys_wc_del(dev_priv->mmio_mtrr);
+- if (dev_priv->has_mob)
+- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+- if (dev_priv->has_gmr)
+- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+-out_err2:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ out_err1:
+ vmw_ttm_global_release(dev_priv);
+@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_
+ }
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
++
++ if (dev_priv->has_mob)
++ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
++ if (dev_priv->has_gmr)
++ (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++
+ vmw_fence_manager_takedown(dev_priv->fman);
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_
+ ttm_object_device_release(&dev_priv->tdev);
+ iounmap(dev_priv->mmio_virt);
+ arch_phys_wc_del(dev_priv->mmio_mtrr);
+- if (dev_priv->has_mob)
+- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+- if (dev_priv->has_gmr)
+- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ vmw_ttm_global_release(dev_priv);
+
--- /dev/null
+From 850fc430f47aad52092deaaeb32b99f97f0e6aca Mon Sep 17 00:00:00 2001
+From: Danesh Petigara <dpetigara@broadcom.com>
+Date: Thu, 12 Mar 2015 16:25:57 -0700
+Subject: mm: cma: fix CMA aligned offset calculation
+
+From: Danesh Petigara <dpetigara@broadcom.com>
+
+commit 850fc430f47aad52092deaaeb32b99f97f0e6aca upstream.
+
+The CMA aligned offset calculation is incorrect for non-zero order_per_bit
+values.
+
+For example, if cma->order_per_bit=1, cma->base_pfn= 0x2f800000 and
+align_order=12, the function returns a value of 0x17c00 instead of 0x400.
+
+This patch fixes the CMA aligned offset calculation.
+
+The previous calculation was wrong and would return too-large values for
+the offset, so that when cma_alloc looks for free pages in the bitmap with
+the requested alignment > order_per_bit, it starts too far into the bitmap
+and so CMA allocations will fail despite there actually being plenty of
+free pages remaining. It will also probably have the wrong alignment.
+With this change, we will get the correct offset into the bitmap.
+
+One affected user is powerpc KVM, which has kvm_cma->order_per_bit set to
+KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, or 18 - 12 = 6.
+
+[gregory.0xf0@gmail.com: changelog additions]
+Signed-off-by: Danesh Petigara <dpetigara@broadcom.com>
+Reviewed-by: Gregory Fong <gregory.0xf0@gmail.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/cma.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_
+ return (1UL << (align_order - cma->order_per_bit)) - 1;
+ }
+
++/*
++ * Find a PFN aligned to the specified order and return an offset represented in
++ * order_per_bits.
++ */
+ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+ {
+- unsigned int alignment;
+-
+ if (align_order <= cma->order_per_bit)
+ return 0;
+- alignment = 1UL << (align_order - cma->order_per_bit);
+- return ALIGN(cma->base_pfn, alignment) -
+- (cma->base_pfn >> cma->order_per_bit);
++
++ return (ALIGN(cma->base_pfn, (1UL << align_order))
++ - cma->base_pfn) >> cma->order_per_bit;
+ }
+
+ static unsigned long cma_bitmap_maxno(struct cma *cma)
--- /dev/null
+From 8dad0386b97c4bd6edd56752ca7f2e735fe5beb4 Mon Sep 17 00:00:00 2001
+From: Maxime Ripard <maxime.ripard@free-electrons.com>
+Date: Wed, 18 Feb 2015 11:32:07 +0100
+Subject: mtd: nand: pxa3xx: Fix PIO FIFO draining
+
+From: Maxime Ripard <maxime.ripard@free-electrons.com>
+
+commit 8dad0386b97c4bd6edd56752ca7f2e735fe5beb4 upstream.
+
+The NDDB register holds the data that are needed by the read and write
+commands.
+
+However, during a read PIO access, the datasheet specifies that after each 32
+bytes read in that register, when BCH is enabled, we have to make sure that the
+RDDREQ bit is set in the NDSR register.
+
+This fixes an issue that was seen on the Armada 385, and presumably other mvebu
+SoCs, when a read on a newly erased page would end up in the driver reporting a
+timeout from the NAND.
+
+Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Acked-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/nand/pxa3xx_nand.c | 48 +++++++++++++++++++++++++++++++++++------
+ 1 file changed, 42 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_na
+ nand_writel(info, NDCR, ndcr | int_mask);
+ }
+
++static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
++{
++ if (info->ecc_bch) {
++ int timeout;
++
++ /*
++ * According to the datasheet, when reading from NDDB
++ * with BCH enabled, after each 32 bytes reads, we
++ * have to make sure that the NDSR.RDDREQ bit is set.
++ *
++ * Drain the FIFO 8 32 bits reads at a time, and skip
++ * the polling on the last read.
++ */
++ while (len > 8) {
++ __raw_readsl(info->mmio_base + NDDB, data, 8);
++
++ for (timeout = 0;
++ !(nand_readl(info, NDSR) & NDSR_RDDREQ);
++ timeout++) {
++ if (timeout >= 5) {
++ dev_err(&info->pdev->dev,
++ "Timeout on RDDREQ while draining the FIFO\n");
++ return;
++ }
++
++ mdelay(1);
++ }
++
++ data += 32;
++ len -= 8;
++ }
++ }
++
++ __raw_readsl(info->mmio_base + NDDB, data, len);
++}
++
+ static void handle_data_pio(struct pxa3xx_nand_info *info)
+ {
+ unsigned int do_bytes = min(info->data_size, info->chunk_size);
+@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3x
+ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ case STATE_PIO_READING:
+- __raw_readsl(info->mmio_base + NDDB,
+- info->data_buff + info->data_buff_pos,
+- DIV_ROUND_UP(do_bytes, 4));
++ drain_fifo(info,
++ info->data_buff + info->data_buff_pos,
++ DIV_ROUND_UP(do_bytes, 4));
+
+ if (info->oob_size > 0)
+- __raw_readsl(info->mmio_base + NDDB,
+- info->oob_buff + info->oob_buff_pos,
+- DIV_ROUND_UP(info->oob_size, 4));
++ drain_fifo(info,
++ info->oob_buff + info->oob_buff_pos,
++ DIV_ROUND_UP(info->oob_size, 4));
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
--- /dev/null
+From 61615cd27e2fdcf698261ba77c7d93f7a7739c65 Mon Sep 17 00:00:00 2001
+From: Fugang Duan <b38611@freescale.com>
+Date: Wed, 4 Mar 2015 07:52:11 +0800
+Subject: net: fec: fix rcv is not last issue when do suspend/resume test
+
+From: Fugang Duan <b38611@freescale.com>
+
+commit 61615cd27e2fdcf698261ba77c7d93f7a7739c65 upstream.
+
+When do suspend/resume stress test, some log shows "rcv is not +last".
+The issue is that enet suspend will disable phy clock, phy link down,
+after resume back, enet MAC redo initial and ready to tx/rx packet,
+but phy still is not ready which is doing auto-negotiation. When phy
+link is not up, don't schdule napi soft irq.
+
+[Peter]
+It has fixed kernel panic after long time suspend/resume test
+with nfs rootfs.
+
+[ 8864.429458] fec 2188000.ethernet eth0: rcv is not +last
+[ 8864.434799] fec 2188000.ethernet eth0: rcv is not +last
+[ 8864.440088] fec 2188000.ethernet eth0: rcv is not +last
+[ 8864.445424] fec 2188000.ethernet eth0: rcv is not +last
+[ 8864.450782] fec 2188000.ethernet eth0: rcv is not +last
+[ 8864.456111] Unable to handle kernel NULL pointer dereference at virtual address 00000000
+[ 8864.464225] pgd = 80004000
+[ 8864.466997] [00000000] *pgd=00000000
+[ 8864.470627] Internal error: Oops: 17 [#1] SMP ARM
+[ 8864.475353] Modules linked in: evbug
+[ 8864.479006] CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.0.0-rc1-00044-g7a2a1d2 #234
+[ 8864.486854] Hardware name: Freescale i.MX6 SoloX (Device Tree)
+[ 8864.492709] task: be069380 ti: be07a000 task.ti: be07a000
+[ 8864.498137] PC is at memcpy+0x80/0x330
+[ 8864.501919] LR is at gro_pull_from_frag0+0x34/0xa8
+[ 8864.506735] pc : [<802bb080>] lr : [<8057c204>] psr: 00000113
+[ 8864.506735] sp : be07bbd4 ip : 00000010 fp : be07bc0c
+[ 8864.518235] r10: 0000000e r9 : 00000000 r8 : 809c7754
+[ 8864.523479] r7 : 809c7754 r6 : bb43c040 r5 : bd280cc0 r4 : 00000012
+[ 8864.530025] r3 : 00000804 r2 : fffffff2 r1 : 00000000 r0 : bb43b83c
+[ 8864.536575] Flags: nzcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel
+[ 8864.543904] Control: 10c5387d Table: bd14c04a DAC: 00000015
+[ 8864.549669] Process ksoftirqd/0 (pid: 3, stack limit = 0xbe07a210)
+[ 8864.555869] Stack: (0xbe07bbd4 to 0xbe07c000)
+[ 8864.560250] bbc0: bd280cc0 bb43c040 809c7754
+[ 8864.568455] bbe0: 809c7754 bb43b83c 00000012 8057c204 00000000 bd280cc0 bd8a0718 00000003
+[ 8864.576658] bc00: be07bc5c be07bc10 8057ebf0 8057c1dc 00000000 00000000 8057ecc4 bef59760
+[ 8864.584863] bc20: 00000002 bd8a0000 be07bc64 809c7754 00000000 bd8a0718 bd280cc0 bd8a0000
+[ 8864.593066] bc40: 00000000 0000001c 00000000 bd8a0000 be07bc74 be07bc60 8057f148 8057eb90
+[ 8864.601268] bc60: bf0810a0 00000000 be07bcf4 be07bc78 8044e7b4 8057f12c 00000000 8007df6c
+[ 8864.609470] bc80: bd8a0718 00000040 00000000 bd280a80 00000002 00000019 bd8a0600 bd8a1214
+[ 8864.617672] bca0: bd8a0690 bf0810a0 00000000 00000000 bd8a1000 00000000 00000027 bd280cc0
+[ 8864.625874] bcc0: 80062708 800625cc 000943db bd8a0718 00000001 000d1166 00000040 be7c1ec0
+[ 8864.634077] bce0: 0000012c be07bd00 be07bd3c be07bcf8 8057fc98 8044e3ac 809c2ec0 3ddff000
+[ 8864.642280] bd00: be07bd00 be07bd00 be07bd08 be07bd08 00000000 00000020 809c608c 00000003
+[ 8864.650481] bd20: 809c6080 40000001 809c6088 00200100 be07bd84 be07bd40 8002e690 8057fac8
+[ 8864.658684] bd40: be07bd64 be07bd50 00000001 04208040 000d1165 0000000a be07bd84 809c0d7c
+[ 8864.666885] bd60: 00000000 809c6af8 00000000 00000001 be008000 00000000 be07bd9c be07bd88
+[ 8864.675087] bd80: 8002eb64 8002e564 00000125 809c0d7c be07bdc4 be07bda0 8006f100 8002eaac
+[ 8864.683291] bda0: c080e10c be07bde8 809c6c6c c080e100 00000002 00000000 be07bde4 be07bdc8
+[ 8864.691492] bdc0: 800087a0 8006f098 806f2934 20000013 ffffffff be07be1c be07be44 be07bde8
+[ 8864.699695] bde0: 800133a4 80008784 00000001 00000001 00000000 00000000 be7c1680 00000000
+[ 8864.707896] be00: be0cfe00 bd93eb40 00000002 00000000 00000000 be07be44 be07be00 be07be30
+[ 8864.716098] be20: 8006278c 806f2934 20000013 ffffffff be069380 be7c1680 be07be7c be07be48
+[ 8864.724300] be40: 80049cfc 806f2910 00000001 00000000 80049cb4 00000000 be07be7c be7c1680
+[ 8864.732502] be60: be3289c0 be069380 bd23b600 be0cfe00 be07bebc be07be80 806ed614 80049c68
+[ 8864.740706] be80: be07a000 0000020a 809c608c 00000003 00000001 8002e858 be07a000 be035740
+[ 8864.748907] bea0: 00000000 00000001 809d4598 00000000 be07bed4 be07bec0 806edd0c 806ed440
+[ 8864.757110] bec0: be07a000 be07a000 be07bee4 be07bed8 806edd68 806edcf0 be07bef4 be07bee8
+[ 8864.765311] bee0: 8002e860 806edd34 be07bf24 be07bef8 800494b0 8002e828 be069380 00000000
+[ 8864.773512] bf00: be035780 be035740 8004938c 00000000 00000000 00000000 be07bfac be07bf28
+[ 8864.781715] bf20: 80045928 80049398 be07bf44 00000001 00000000 be035740 00000000 00030003
+[ 8864.789917] bf40: dead4ead ffffffff ffffffff 80a2716c 80b59b00 00000000 8088c954 be07bf5c
+[ 8864.798120] bf60: be07bf5c 00000000 00000000 dead4ead ffffffff ffffffff 80a2716c 00000000
+[ 8864.806320] bf80: 00000000 8088c954 be07bf88 be07bf88 be035780 8004584c 00000000 00000000
+[ 8864.814523] bfa0: 00000000 be07bfb0 8000ed10 80045858 00000000 00000000 00000000 00000000
+[ 8864.822723] bfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+[ 8864.830925] bfe0: 00000000 00000000 00000000 00000000 00000013 00000000 5ffbb5f7 f9fcf5e7
+[ 8864.839115] Backtrace:
+[ 8864.841631] [<8057c1d0>] (gro_pull_from_frag0) from [<8057ebf0>] (dev_gro_receive+0x6c/0x3f8)
+[ 8864.850173] r6:00000003 r5:bd8a0718 r4:bd280cc0 r3:00000000
+[ 8864.855958] [<8057eb84>] (dev_gro_receive) from [<8057f148>] (napi_gro_receive+0x28/0xac)
+[ 8864.864152] r10:bd8a0000 r9:00000000 r8:0000001c r7:00000000 r6:bd8a0000 r5:bd280cc0
+[ 8864.872115] r4:bd8a0718
+[ 8864.874713] [<8057f120>] (napi_gro_receive) from [<8044e7b4>] (fec_enet_rx_napi+0x414/0xc74)
+[ 8864.883167] r5:00000000 r4:bf0810a0
+[ 8864.886823] [<8044e3a0>] (fec_enet_rx_napi) from [<8057fc98>] (net_rx_action+0x1dc/0x2ec)
+[ 8864.895016] r10:be07bd00 r9:0000012c r8:be7c1ec0 r7:00000040 r6:000d1166 r5:00000001
+[ 8864.902982] r4:bd8a0718
+[ 8864.905570] [<8057fabc>] (net_rx_action) from [<8002e690>] (__do_softirq+0x138/0x2c4)
+[ 8864.913417] r10:00200100 r9:809c6088 r8:40000001 r7:809c6080 r6:00000003 r5:809c608c
+[ 8864.921382] r4:00000020
+[ 8864.923966] [<8002e558>] (__do_softirq) from [<8002eb64>] (irq_exit+0xc4/0x138)
+[ 8864.931289] r10:00000000 r9:be008000 r8:00000001 r7:00000000 r6:809c6af8 r5:00000000
+[ 8864.939252] r4:809c0d7c
+[ 8864.941841] [<8002eaa0>] (irq_exit) from [<8006f100>] (__handle_domain_irq+0x74/0xe8)
+[ 8864.949688] r4:809c0d7c r3:00000125
+[ 8864.953342] [<8006f08c>] (__handle_domain_irq) from [<800087a0>] (gic_handle_irq+0x28/0x68)
+[ 8864.961707] r9:00000000 r8:00000002 r7:c080e100 r6:809c6c6c r5:be07bde8 r4:c080e10c
+[ 8864.969597] [<80008778>] (gic_handle_irq) from [<800133a4>] (__irq_svc+0x44/0x5c)
+[ 8864.977097] Exception stack(0xbe07bde8 to 0xbe07be30)
+[ 8864.982173] bde0: 00000001 00000001 00000000 00000000 be7c1680 00000000
+[ 8864.990377] be00: be0cfe00 bd93eb40 00000002 00000000 00000000 be07be44 be07be00 be07be30
+[ 8864.998573] be20: 8006278c 806f2934 20000013 ffffffff
+[ 8865.003638] r7:be07be1c r6:ffffffff r5:20000013 r4:806f2934
+[ 8865.009447] [<806f2904>] (_raw_spin_unlock_irq) from [<80049cfc>] (finish_task_switch+0xa0/0x160)
+[ 8865.018334] r4:be7c1680 r3:be069380
+[ 8865.021993] [<80049c5c>] (finish_task_switch) from [<806ed614>] (__schedule+0x1e0/0x5dc)
+[ 8865.030098] r8:be0cfe00 r7:bd23b600 r6:be069380 r5:be3289c0 r4:be7c1680
+[ 8865.036942] [<806ed434>] (__schedule) from [<806edd0c>] (preempt_schedule_common+0x28/0x44)
+[ 8865.045307] r9:00000000 r8:809d4598 r7:00000001 r6:00000000 r5:be035740 r4:be07a000
+[ 8865.053197] [<806edce4>] (preempt_schedule_common) from [<806edd68>] (_cond_resched+0x40/0x48)
+[ 8865.061822] r4:be07a000 r3:be07a000
+[ 8865.065472] [<806edd28>] (_cond_resched) from [<8002e860>] (run_ksoftirqd+0x44/0x64)
+[ 8865.073252] [<8002e81c>] (run_ksoftirqd) from [<800494b0>] (smpboot_thread_fn+0x124/0x190)
+[ 8865.081550] [<8004938c>] (smpboot_thread_fn) from [<80045928>] (kthread+0xdc/0xf8)
+[ 8865.089133] r10:00000000 r9:00000000 r8:00000000 r7:8004938c r6:be035740 r5:be035780
+[ 8865.097097] r4:00000000 r3:be069380
+[ 8865.100752] [<8004584c>] (kthread) from [<8000ed10>] (ret_from_fork+0x14/0x24)
+[ 8865.107990] r7:00000000 r6:00000000 r5:8004584c r4:be035780
+[ 8865.113767] Code: e320f000 e4913004 e4914004 e4915004 (e4916004)
+[ 8865.120006] ---[ end trace b0a4c6bd499288ca ]---
+[ 8865.124697] Kernel panic - not syncing: Fatal exception in interrupt
+[ 8865.131084] ---[ end Kernel panic - not syncing: Fatal exception in interrupt
+
+Tested-by: Peter Chen <peter.chen@freescale.com>
+Signed-off-by: Peter Chen <peter.chen@freescale.com>
+Signed-off-by: Fugang Duan <B38611@freescale.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/freescale/fec_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1565,7 +1565,7 @@ fec_enet_interrupt(int irq, void *dev_id
+ writel(int_events, fep->hwp + FEC_IEVENT);
+ fec_enet_collect_events(fep, int_events);
+
+- if (fep->work_tx || fep->work_rx) {
++ if ((fep->work_tx || fep->work_rx) && fep->link) {
+ ret = IRQ_HANDLED;
+
+ if (napi_schedule_prep(&fep->napi)) {
--- /dev/null
+From 283ee1482f349d6c0c09dfb725db5880afc56813 Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Date: Thu, 12 Mar 2015 16:26:00 -0700
+Subject: nilfs2: fix deadlock of segment constructor during recovery
+
+From: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+
+commit 283ee1482f349d6c0c09dfb725db5880afc56813 upstream.
+
+According to a report from Yuxuan Shui, nilfs2 in kernel 3.19 got stuck
+during recovery at mount time. The code path that caused the deadlock was
+as follows:
+
+ nilfs_fill_super()
+ load_nilfs()
+ nilfs_salvage_orphan_logs()
+ * Do roll-forwarding, attach segment constructor for recovery,
+ and kick it.
+
+ nilfs_segctor_thread()
+ nilfs_segctor_thread_construct()
+ * A lock is held with nilfs_transaction_lock()
+ nilfs_segctor_do_construct()
+ nilfs_segctor_drop_written_files()
+ iput()
+ iput_final()
+ write_inode_now()
+ writeback_single_inode()
+ __writeback_single_inode()
+ do_writepages()
+ nilfs_writepage()
+ nilfs_construct_dsync_segment()
+ nilfs_transaction_lock() --> deadlock
+
+This can happen if commit 7ef3ff2fea8b ("nilfs2: fix deadlock of segment
+constructor over I_SYNC flag") is applied and roll-forward recovery was
+performed at mount time. The roll-forward recovery can happen if datasync
+write is done and the file system crashes immediately after that. For
+instance, we can reproduce the issue with the following steps:
+
+ < nilfs2 is mounted on /nilfs (device: /dev/sdb1) >
+ # dd if=/dev/zero of=/nilfs/test bs=4k count=1 && sync
+ # dd if=/dev/zero of=/nilfs/test conv=notrunc oflag=dsync bs=4k
+ count=1 && reboot -nfh
+ < the system will immediately reboot >
+ # mount -t nilfs2 /dev/sdb1 /nilfs
+
+The deadlock occurs because iput() can run segment constructor through
+writeback_single_inode() if MS_ACTIVE flag is not set on sb->s_flags. The
+above commit changed segment constructor so that it calls iput()
+asynchronously for inodes with i_nlink == 0, but that change was
+imperfect.
+
+This fixes the another deadlock by deferring iput() in segment constructor
+even for the case that mount is not finished, that is, for the case that
+MS_ACTIVE flag is not set.
+
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Reported-by: Yuxuan Shui <yshuiv7@gmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nilfs2/segment.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_f
+ struct the_nilfs *nilfs)
+ {
+ struct nilfs_inode_info *ii, *n;
++ int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+ int defer_iput = false;
+
+ spin_lock(&nilfs->ns_inode_lock);
+@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_f
+ brelse(ii->i_bh);
+ ii->i_bh = NULL;
+ list_del_init(&ii->i_dirty);
+- if (!ii->vfs_inode.i_nlink) {
++ if (!ii->vfs_inode.i_nlink || during_mount) {
+ /*
+- * Defer calling iput() to avoid a deadlock
+- * over I_SYNC flag for inodes with i_nlink == 0
++ * Defer calling iput() to avoid deadlocks if
++ * i_nlink == 0 or mount is not yet finished.
+ */
+ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ defer_iput = true;
--- /dev/null
+From 29d62ec5f87fbeec8413e2215ddad12e7f972e4c Mon Sep 17 00:00:00 2001
+From: Doug Anderson <dianders@chromium.org>
+Date: Tue, 3 Mar 2015 15:20:47 -0800
+Subject: regulator: core: Fix enable GPIO reference counting
+
+From: Doug Anderson <dianders@chromium.org>
+
+commit 29d62ec5f87fbeec8413e2215ddad12e7f972e4c upstream.
+
+Normally _regulator_do_enable() isn't called on an already-enabled
+rdev. That's because the main caller, _regulator_enable() always
+calls _regulator_is_enabled() and only calls _regulator_do_enable() if
+the rdev was not already enabled.
+
+However, there is one caller of _regulator_do_enable() that doesn't
+check: regulator_suspend_finish(). While we might want to make
+regulator_suspend_finish() behave more like _regulator_enable(), it's
+probably also a good idea to make _regulator_do_enable() robust if it
+is called on an already enabled rdev.
+
+At the moment, _regulator_do_enable() is _not_ robust for already
+enabled rdevs if we're using an ena_pin. Each time
+_regulator_do_enable() is called for an rdev using an ena_pin the
+reference count of the ena_pin is incremented even if the rdev was
+already enabled. This is not as intended because the ena_pin is for
+something else: for keeping track of how many active rdevs there are
+sharing the same ena_pin.
+
+Here's how the reference counting works here:
+
+* Each time _regulator_enable() is called we increment
+ rdev->use_count, so _regulator_enable() calls need to be balanced
+ with _regulator_disable() calls.
+
+* There is no explicit reference counting in _regulator_do_enable()
+ which is normally just a warapper around rdev->desc->ops->enable()
+ with code for supporting delays. It's not expected that the
+ "ops->enable()" call do reference counting.
+
+* Since regulator_ena_gpio_ctrl() does have reference counting
+ (handling the sharing of the pin amongst multiple rdevs), we
+ shouldn't call it if the current rdev is already enabled.
+
+Note that as part of this we cleanup (remove) the initting of
+ena_gpio_state in regulator_register(). In _regulator_do_enable(),
+_regulator_do_disable() and _regulator_is_enabled() is is clear that
+ena_gpio_state should be the state of whether this particular rdev has
+requested the GPIO be enabled. regulator_register() was initting it
+as the actual state of the pin.
+
+Fixes: 967cfb18c0e3 ("regulator: core: manage enable GPIO list")
+Signed-off-by: Doug Anderson <dianders@chromium.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/core.c | 26 ++++++++++++--------------
+ 1 file changed, 12 insertions(+), 14 deletions(-)
+
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1843,10 +1843,12 @@ static int _regulator_do_enable(struct r
+ }
+
+ if (rdev->ena_pin) {
+- ret = regulator_ena_gpio_ctrl(rdev, true);
+- if (ret < 0)
+- return ret;
+- rdev->ena_gpio_state = 1;
++ if (!rdev->ena_gpio_state) {
++ ret = regulator_ena_gpio_ctrl(rdev, true);
++ if (ret < 0)
++ return ret;
++ rdev->ena_gpio_state = 1;
++ }
+ } else if (rdev->desc->ops->enable) {
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+@@ -1943,10 +1945,12 @@ static int _regulator_do_disable(struct
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ if (rdev->ena_pin) {
+- ret = regulator_ena_gpio_ctrl(rdev, false);
+- if (ret < 0)
+- return ret;
+- rdev->ena_gpio_state = 0;
++ if (rdev->ena_gpio_state) {
++ ret = regulator_ena_gpio_ctrl(rdev, false);
++ if (ret < 0)
++ return ret;
++ rdev->ena_gpio_state = 0;
++ }
+
+ } else if (rdev->desc->ops->disable) {
+ ret = rdev->desc->ops->disable(rdev);
+@@ -3678,12 +3682,6 @@ regulator_register(const struct regulato
+ config->ena_gpio, ret);
+ goto wash;
+ }
+-
+- if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+- rdev->ena_gpio_state = 1;
+-
+- if (config->ena_gpio_invert)
+- rdev->ena_gpio_state = !rdev->ena_gpio_state;
+ }
+
+ /* set regulator constraints */
--- /dev/null
+From 0548bf4f5ad6fc3bd93c4940fa48078b34609682 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Date: Mon, 2 Mar 2015 21:40:39 +0100
+Subject: regulator: Only enable disabled regulators on resume
+
+From: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+
+commit 0548bf4f5ad6fc3bd93c4940fa48078b34609682 upstream.
+
+The _regulator_do_enable() call ought to be a no-op when called on an
+already-enabled regulator. However, as an optimization
+_regulator_enable() doesn't call _regulator_do_enable() on an already
+enabled regulator. That means we never test the case of calling
+_regulator_do_enable() during normal usage and there may be hidden
+bugs or warnings. We have seen warnings issued by the tps65090 driver
+and bugs when using the GPIO enable pin.
+
+Let's match the same optimization that _regulator_enable() in
+regulator_suspend_finish(). That may speed up suspend/resume and also
+avoids exposing hidden bugs.
+
+[Use much clearer commit message from Doug Anderson]
+
+Signed-off-by: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/core.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -3856,9 +3856,11 @@ int regulator_suspend_finish(void)
+ list_for_each_entry(rdev, ®ulator_list, list) {
+ mutex_lock(&rdev->mutex);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+- error = _regulator_do_enable(rdev);
+- if (error)
+- ret = error;
++ if (!_regulator_is_enabled(rdev)) {
++ error = _regulator_do_enable(rdev);
++ if (error)
++ ret = error;
++ }
+ } else {
+ if (!have_full_constraints())
+ goto unlock;
--- /dev/null
+From 28249b0c2fa361cdac450a6f40242ed45408a24f Mon Sep 17 00:00:00 2001
+From: Doug Anderson <dianders@chromium.org>
+Date: Fri, 20 Feb 2015 16:53:38 -0800
+Subject: regulator: rk808: Set the enable time for LDOs
+
+From: Doug Anderson <dianders@chromium.org>
+
+commit 28249b0c2fa361cdac450a6f40242ed45408a24f upstream.
+
+The LDOs are documented in the rk808 datasheet to have a soft start
+time of 400us. Add that to the driver. If this time takes longer on
+a certain board the device tree should be able to override with
+"regulator-enable-ramp-delay".
+
+This fixes some dw_mmc probing problems (together with other patches
+posted to the mmc maiing lists) on rk3288.
+
+Signed-off-by: Doug Anderson <dianders@chromium.org>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/regulator/rk808-regulator.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/regulator/rk808-regulator.c
++++ b/drivers/regulator/rk808-regulator.c
+@@ -235,6 +235,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(0),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG2",
+@@ -249,6 +250,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(1),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG3",
+@@ -263,6 +265,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_BUCK4_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(2),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG4",
+@@ -277,6 +280,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(3),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG5",
+@@ -291,6 +295,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(4),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG6",
+@@ -305,6 +310,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(5),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG7",
+@@ -319,6 +325,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(6),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "LDO_REG8",
+@@ -333,6 +340,7 @@ static const struct regulator_desc rk808
+ .vsel_mask = RK808_LDO_VSEL_MASK,
+ .enable_reg = RK808_LDO_EN_REG,
+ .enable_mask = BIT(7),
++ .enable_time = 400,
+ .owner = THIS_MODULE,
+ }, {
+ .name = "SWITCH_REG1",
--- /dev/null
+From a49445727014216703a3c28ccee4cef36d41571e Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kubakici@wp.pl>
+Date: Wed, 11 Mar 2015 18:35:36 +0100
+Subject: Revert "i2c: core: Dispose OF IRQ mapping at client removal time"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jakub Kicinski <kubakici@wp.pl>
+
+commit a49445727014216703a3c28ccee4cef36d41571e upstream.
+
+This reverts commit e4df3a0b6228
+("i2c: core: Dispose OF IRQ mapping at client removal time")
+
+Calling irq_dispose_mapping() will destroy the mapping and disassociate
+the IRQ from the IRQ chip to which it belongs. Keeping it is OK, because
+existent mappings are reused properly.
+
+Also, this commit breaks drivers using devm* for IRQ management on
+OF-based systems because devm* cleanup happens in device code, after
+bus's remove() method returns.
+
+Signed-off-by: Jakub Kicinski <kubakici@wp.pl>
+Reported-by: Sébastien Szymanski <sebastien.szymanski@armadeus.com>
+Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+[wsa: updated the commit message with findings fromt the other bug report]
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Fixes: e4df3a0b6228
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/i2c-core.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -679,9 +679,6 @@ static int i2c_device_remove(struct devi
+ status = driver->remove(client);
+ }
+
+- if (dev->of_node)
+- irq_dispose_mapping(client->irq);
+-
+ dev_pm_domain_detach(&client->dev, true);
+ return status;
+ }
alsa-hda-set-single_adc_amp-flag-for-cs420x-codecs.patch
alsa-hda-add-workaround-for-macbook-air-5-2-built-in-mic.patch
alsa-hda-fix-regression-of-hd-audio-controller-fallback-modes.patch
+alsa-hda-treat-stereo-to-mono-mix-properly.patch
+mtd-nand-pxa3xx-fix-pio-fifo-draining.patch
+bnx2x-force-fundamental-reset-for-eeh-recovery.patch
+net-fec-fix-rcv-is-not-last-issue-when-do-suspend-resume-test.patch
+regulator-rk808-set-the-enable-time-for-ldos.patch
+regulator-only-enable-disabled-regulators-on-resume.patch
+regulator-core-fix-enable-gpio-reference-counting.patch
+nilfs2-fix-deadlock-of-segment-constructor-during-recovery.patch
+mm-cma-fix-cma-aligned-offset-calculation.patch
+revert-i2c-core-dispose-of-irq-mapping-at-client-removal-time.patch
+drm-vmwgfx-reorder-device-takedown-somewhat.patch
+drm-vmwgfx-fix-a-couple-of-lock-dependency-violations.patch
+drm-don-t-assign-fbs-for-universal-cursor-support-to-files.patch
+drm-i915-add-dev_to_i915-helper.patch
+drm-i915-gen4-work-around-hang-during-hibernation.patch