--- /dev/null
+From 0ca9488193e61ec5f31a631d8147f74525629e8a Mon Sep 17 00:00:00 2001
+From: Clint Taylor <clinton.a.taylor@intel.com>
+Date: Tue, 10 Jul 2018 13:02:05 -0700
+Subject: drm/i915/glk: Add Quirk for GLK NUC HDMI port issues.
+
+From: Clint Taylor <clinton.a.taylor@intel.com>
+
+commit 0ca9488193e61ec5f31a631d8147f74525629e8a upstream.
+
+On GLK NUC platforms the HDMI retiming buffer needs additional disabled
+time to correctly sync to a faster incoming signal.
+
+When measured on a scope the highspeed lines of the HDMI clock turn off
+ for ~400uS during a normal resolution change. The HDMI retimer on the
+ GLK NUC appears to require at least a full frame of quiet time before a
+new faster clock can be correctly sync'd. Wait 100ms due to msleep
+inaccuracies while waiting for a completed frame. Add a quirk to the
+driver for GLK boards that use ITE66317 HDMI retimers.
+
+V2: Add more devices to the quirk list
+V3: Delay increased to 100ms, check to confirm crtc type is HDMI.
+V4: crtc type check extended to include _DDI and whitespace fixes
+v5: Fix white spaces, remove the macro for delay. Revert the crtc type
+ check introduced in v4.
+
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: <stable@vger.kernel.org> # v4.14+
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105887
+Signed-off-by: Clint Taylor <clinton.a.taylor@intel.com>
+Tested-by: Daniel Scheller <d.scheller.oss@gmail.com>
+Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180710200205.1478-1-radhakrishna.sripada@intel.com
+(cherry picked from commit 90c3e2198777aaa355b6994a31a79c636c8d4306)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1 +
+ drivers/gpu/drm/i915/intel_ddi.c | 13 +++++++++++--
+ drivers/gpu/drm/i915/intel_display.c | 21 ++++++++++++++++++++-
+ drivers/gpu/drm/i915/intel_drv.h | 3 +--
+ 4 files changed, 33 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1183,6 +1183,7 @@ enum intel_sbi_destination {
+ #define QUIRK_BACKLIGHT_PRESENT (1<<3)
+ #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
+ #define QUIRK_INCREASE_T12_DELAY (1<<6)
++#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
+
+ struct intel_fbdev;
+ struct intel_fbc_work;
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1526,15 +1526,24 @@ void intel_ddi_enable_transcoder_func(co
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ }
+
+-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+- enum transcoder cpu_transcoder)
++void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
+ {
++ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
++ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
++
++ if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
++ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
++ DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
++ /* Quirk time at 100ms for reliable operation */
++ msleep(100);
++ }
+ }
+
+ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5653,7 +5653,7 @@ static void haswell_crtc_disable(struct
+ intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+
+ if (!transcoder_is_dsi(cpu_transcoder))
+- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
++ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skylake_scaler_disable(intel_crtc);
+@@ -14286,6 +14286,18 @@ static void quirk_increase_t12_delay(str
+ DRM_INFO("Applying T12 delay quirk\n");
+ }
+
++/*
++ * GeminiLake NUC HDMI outputs require additional off time
++ * this allows the onboard retimer to correctly sync to signal
++ */
++static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = to_i915(dev);
++
++ dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
++ DRM_INFO("Applying Increase DDI Disabled quirk\n");
++}
++
+ struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+@@ -14372,6 +14384,13 @@ static struct intel_quirk intel_quirks[]
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
++
++ /* GeminiLake NUC */
++ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
++ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
++ /* ASRock ITX*/
++ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
++ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1254,8 +1254,7 @@ void intel_ddi_init(struct drm_i915_priv
+ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+- enum transcoder cpu_transcoder);
++void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
+ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+ struct intel_encoder *
--- /dev/null
+From 05c72e77ccda89ff624108b1b59a0fc43843f343 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Tue, 17 Jul 2018 20:42:14 +0300
+Subject: drm/i915: Nuke the LVDS lid notifier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 05c72e77ccda89ff624108b1b59a0fc43843f343 upstream.
+
+We broke the LVDS notifier resume thing in (presumably) commit
+e2c8b8701e2d ("drm/i915: Use atomic helpers for suspend, v2.") as
+we no longer duplicate the current state in the LVDS notifier and
+thus we never resume it properly either.
+
+Instead of trying to fix it again let's just kill off the lid
+notifier entirely. None of the machines tested thus far have
+apparently needed it. Originally the lid notifier was added to
+work around cases where the VBIOS was clobbering some of the
+hardware state behind the driver's back, mostly on Thinkpads.
+We now have a few report of Thinkpads working just fine without
+the notifier. So maybe it was misdiagnosed originally, or
+something else has changed (ACPI video stuff perhaps?).
+
+If we do end up finding a machine where the VBIOS is still causing
+problems I would suggest that we first try setting various bits in
+the VBIOS scratch registers. There are several to choose from that
+may instruct the VBIOS to steer clear.
+
+With the notifier gone we'll also stop looking at the panel status
+in ->detect().
+
+v2: Nuke enum modeset_restore (Rodrigo)
+
+Cc: stable@vger.kernel.org
+Cc: Wolfgang Draxinger <wdraxinger.maillist@draxit.de>
+Cc: Vito Caputo <vcaputo@pengaru.com>
+Cc: kitsunyan <kitsunyan@airmail.cc>
+Cc: Joonas Saarinen <jza@saunalahti.fi>
+Tested-by: Vito Caputo <vcaputo@pengaru.com> # Thinkapd X61s
+Tested-by: kitsunyan <kitsunyan@airmail.cc> # ThinkPad X200
+Tested-by: Joonas Saarinen <jza@saunalahti.fi> # Fujitsu Siemens U9210
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105902
+References: https://lists.freedesktop.org/archives/intel-gfx/2018-June/169315.html
+References: https://bugs.freedesktop.org/show_bug.cgi?id=21230
+Fixes: e2c8b8701e2d ("drm/i915: Use atomic helpers for suspend, v2.")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180717174216.22252-1-ville.syrjala@linux.intel.com
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.c | 10 --
+ drivers/gpu/drm/i915/i915_drv.h | 8 --
+ drivers/gpu/drm/i915/intel_lvds.c | 136 --------------------------------------
+ 3 files changed, 2 insertions(+), 152 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -878,7 +878,6 @@ static int i915_driver_init_early(struct
+
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ mutex_init(&dev_priv->sb_lock);
+- mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+@@ -1505,11 +1504,6 @@ static int i915_drm_suspend(struct drm_d
+ pci_power_t opregion_target_state;
+ int error;
+
+- /* ignore lid events during suspend */
+- mutex_lock(&dev_priv->modeset_restore_lock);
+- dev_priv->modeset_restore = MODESET_SUSPENDED;
+- mutex_unlock(&dev_priv->modeset_restore_lock);
+-
+ disable_rpm_wakeref_asserts(dev_priv);
+
+ /* We do a lot of poking in a lot of registers, make sure they work
+@@ -1718,10 +1712,6 @@ static int i915_drm_resume(struct drm_de
+
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+
+- mutex_lock(&dev_priv->modeset_restore_lock);
+- dev_priv->modeset_restore = MODESET_DONE;
+- mutex_unlock(&dev_priv->modeset_restore_lock);
+-
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
+
+ intel_autoenable_gt_powersave(dev_priv);
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1614,12 +1614,6 @@ struct i915_gpu_error {
+ unsigned long test_irq_rings;
+ };
+
+-enum modeset_restore {
+- MODESET_ON_LID_OPEN,
+- MODESET_DONE,
+- MODESET_SUSPENDED,
+-};
+-
+ #define DP_AUX_A 0x40
+ #define DP_AUX_B 0x10
+ #define DP_AUX_C 0x20
+@@ -2296,8 +2290,6 @@ struct drm_i915_private {
+
+ unsigned long quirks;
+
+- enum modeset_restore modeset_restore;
+- struct mutex modeset_restore_lock;
+ struct drm_atomic_state *modeset_restore_state;
+ struct drm_modeset_acquire_ctx reset_ctx;
+
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -44,8 +44,6 @@
+ /* Private structure for the integrated LVDS support */
+ struct intel_lvds_connector {
+ struct intel_connector base;
+-
+- struct notifier_block lid_notifier;
+ };
+
+ struct intel_lvds_pps {
+@@ -440,26 +438,9 @@ static bool intel_lvds_compute_config(st
+ return true;
+ }
+
+-/**
+- * Detect the LVDS connection.
+- *
+- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+- * connected and closed means disconnected. We also send hotplug events as
+- * needed, using lid status notification from the input layer.
+- */
+ static enum drm_connector_status
+ intel_lvds_detect(struct drm_connector *connector, bool force)
+ {
+- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+- enum drm_connector_status status;
+-
+- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+- connector->base.id, connector->name);
+-
+- status = intel_panel_detect(dev_priv);
+- if (status != connector_status_unknown)
+- return status;
+-
+ return connector_status_connected;
+ }
+
+@@ -484,117 +465,6 @@ static int intel_lvds_get_modes(struct d
+ return 1;
+ }
+
+-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+-{
+- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
+- return 1;
+-}
+-
+-/* The GPU hangs up on these systems if modeset is performed on LID open */
+-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+- {
+- .callback = intel_no_modeset_on_lid_dmi_callback,
+- .ident = "Toshiba Tecra A11",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+- },
+- },
+-
+- { } /* terminating entry */
+-};
+-
+-/*
+- * Lid events. Note the use of 'modeset':
+- * - we set it to MODESET_ON_LID_OPEN on lid close,
+- * and set it to MODESET_DONE on open
+- * - we use it as a "only once" bit (ie we ignore
+- * duplicate events where it was already properly set)
+- * - the suspend/resume paths will set it to
+- * MODESET_SUSPENDED and ignore the lid open event,
+- * because they restore the mode ("lid open").
+- */
+-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+- void *unused)
+-{
+- struct intel_lvds_connector *lvds_connector =
+- container_of(nb, struct intel_lvds_connector, lid_notifier);
+- struct drm_connector *connector = &lvds_connector->base.base;
+- struct drm_device *dev = connector->dev;
+- struct drm_i915_private *dev_priv = to_i915(dev);
+-
+- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+- return NOTIFY_OK;
+-
+- mutex_lock(&dev_priv->modeset_restore_lock);
+- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+- goto exit;
+- /*
+- * check and update the status of LVDS connector after receiving
+- * the LID nofication event.
+- */
+- connector->status = connector->funcs->detect(connector, false);
+-
+- /* Don't force modeset on machines where it causes a GPU lockup */
+- if (dmi_check_system(intel_no_modeset_on_lid))
+- goto exit;
+- if (!acpi_lid_open()) {
+- /* do modeset on next lid open event */
+- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+- goto exit;
+- }
+-
+- if (dev_priv->modeset_restore == MODESET_DONE)
+- goto exit;
+-
+- /*
+- * Some old platform's BIOS love to wreak havoc while the lid is closed.
+- * We try to detect this here and undo any damage. The split for PCH
+- * platforms is rather conservative and a bit arbitrary expect that on
+- * those platforms VGA disabling requires actual legacy VGA I/O access,
+- * and as part of the cleanup in the hw state restore we also redisable
+- * the vga plane.
+- */
+- if (!HAS_PCH_SPLIT(dev_priv))
+- intel_display_resume(dev);
+-
+- dev_priv->modeset_restore = MODESET_DONE;
+-
+-exit:
+- mutex_unlock(&dev_priv->modeset_restore_lock);
+- return NOTIFY_OK;
+-}
+-
+-static int
+-intel_lvds_connector_register(struct drm_connector *connector)
+-{
+- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+- int ret;
+-
+- ret = intel_connector_register(connector);
+- if (ret)
+- return ret;
+-
+- lvds->lid_notifier.notifier_call = intel_lid_notify;
+- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
+- DRM_DEBUG_KMS("lid notifier registration failed\n");
+- lvds->lid_notifier.notifier_call = NULL;
+- }
+-
+- return 0;
+-}
+-
+-static void
+-intel_lvds_connector_unregister(struct drm_connector *connector)
+-{
+- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+-
+- if (lvds->lid_notifier.notifier_call)
+- acpi_lid_notifier_unregister(&lvds->lid_notifier);
+-
+- intel_connector_unregister(connector);
+-}
+-
+ /**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+@@ -627,8 +497,8 @@ static const struct drm_connector_funcs
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+- .late_register = intel_lvds_connector_register,
+- .early_unregister = intel_lvds_connector_unregister,
++ .late_register = intel_connector_register,
++ .early_unregister = intel_connector_unregister,
+ .destroy = intel_lvds_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+@@ -1091,8 +961,6 @@ void intel_lvds_init(struct drm_i915_pri
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+- * 4) make sure lid is open
+- * if closed, act like it's not there for now
+ */
+
+ /*
--- /dev/null
+From 2b16fd63059ab9a46d473620749672dc342e1d21 Mon Sep 17 00:00:00 2001
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Date: Thu, 28 Jun 2018 22:45:38 +0200
+Subject: i2c: rcar: handle RXDMA HW behaviour on Gen3
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+commit 2b16fd63059ab9a46d473620749672dc342e1d21 upstream.
+
+On Gen3, we can only do RXDMA once per transfer reliably. For that, we
+must reset the device, then we can have RXDMA once. This patch
+implements this. When there is no reset controller or the reset fails,
+RXDMA will be blocked completely. Otherwise, it will be disabled after
+the first RXDMA transfer. Based on a commit from the BSP by Hiromitsu
+Yamasaki, yet completely refactored to handle multiple read messages
+within one transfer.
+
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-rcar.c | 54 +++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 51 insertions(+), 3 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -32,6 +32,7 @@
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/reset.h>
+ #include <linux/slab.h>
+
+ /* register offsets */
+@@ -111,8 +112,9 @@
+ #define ID_ARBLOST (1 << 3)
+ #define ID_NACK (1 << 4)
+ /* persistent flags */
++#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
+ #define ID_P_PM_BLOCKED (1 << 31)
+-#define ID_P_MASK ID_P_PM_BLOCKED
++#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
+
+ enum rcar_i2c_type {
+ I2C_RCAR_GEN1,
+@@ -140,6 +142,8 @@ struct rcar_i2c_priv {
+ struct dma_chan *dma_rx;
+ struct scatterlist sg;
+ enum dma_data_direction dma_direction;
++
++ struct reset_control *rstc;
+ };
+
+ #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
+@@ -321,6 +325,11 @@ static void rcar_i2c_dma_unmap(struct rc
+ dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+ sg_dma_len(&priv->sg), priv->dma_direction);
+
++ /* Gen3 can only do one RXDMA per transfer and we just completed it */
++ if (priv->devtype == I2C_RCAR_GEN3 &&
++ priv->dma_direction == DMA_FROM_DEVICE)
++ priv->flags |= ID_P_NO_RXDMA;
++
+ priv->dma_direction = DMA_NONE;
+ }
+
+@@ -358,8 +367,9 @@ static void rcar_i2c_dma(struct rcar_i2c
+ unsigned char *buf;
+ int len;
+
+- /* Do not use DMA if it's not available or for messages < 8 bytes */
+- if (IS_ERR(chan) || msg->len < 8)
++ /* Do various checks to see if DMA is feasible at all */
++ if (IS_ERR(chan) || msg->len < 8 ||
++ (read && priv->flags & ID_P_NO_RXDMA))
+ return;
+
+ if (read) {
+@@ -688,6 +698,25 @@ static void rcar_i2c_release_dma(struct
+ }
+ }
+
++/* I2C is a special case, we need to poll the status of a reset */
++static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
++{
++ int i, ret;
++
++ ret = reset_control_reset(priv->rstc);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < LOOP_TIMEOUT; i++) {
++ ret = reset_control_status(priv->rstc);
++ if (ret == 0)
++ return 0;
++ udelay(1);
++ }
++
++ return -ETIMEDOUT;
++}
++
+ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs,
+ int num)
+@@ -699,6 +728,16 @@ static int rcar_i2c_master_xfer(struct i
+
+ pm_runtime_get_sync(dev);
+
++ /* Gen3 needs a reset before allowing RXDMA once */
++ if (priv->devtype == I2C_RCAR_GEN3) {
++ priv->flags |= ID_P_NO_RXDMA;
++ if (!IS_ERR(priv->rstc)) {
++ ret = rcar_i2c_do_reset(priv);
++ if (ret == 0)
++ priv->flags &= ~ID_P_NO_RXDMA;
++ }
++ }
++
+ rcar_i2c_init(priv);
+
+ ret = rcar_i2c_bus_barrier(priv);
+@@ -868,6 +907,15 @@ static int rcar_i2c_probe(struct platfor
+ if (ret < 0)
+ goto out_pm_put;
+
++ if (priv->devtype == I2C_RCAR_GEN3) {
++ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
++ if (!IS_ERR(priv->rstc)) {
++ ret = reset_control_status(priv->rstc);
++ if (ret < 0)
++ priv->rstc = ERR_PTR(-ENOTSUPP);
++ }
++ }
++
+ /* Stay always active when multi-master to keep arbitration working */
+ if (of_property_read_bool(dev->of_node, "multi-master"))
+ priv->flags |= ID_P_PM_BLOCKED;
--- /dev/null
+From b4a4957d3d1c328b733fce783b7264996f866ad2 Mon Sep 17 00:00:00 2001
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Thu, 20 Sep 2018 12:59:14 -0700
+Subject: IB/hfi1: Fix destroy_qp hang after a link down
+
+From: Michael J. Ruhl <michael.j.ruhl@intel.com>
+
+commit b4a4957d3d1c328b733fce783b7264996f866ad2 upstream.
+
+rvt_destroy_qp() cannot complete until all in process packets have
+been released from the underlying hardware. If a link down event
+occurs, an application can hang with a kernel stack similar to:
+
+cat /proc/<app PID>/stack
+ quiesce_qp+0x178/0x250 [hfi1]
+ rvt_reset_qp+0x23d/0x400 [rdmavt]
+ rvt_destroy_qp+0x69/0x210 [rdmavt]
+ ib_destroy_qp+0xba/0x1c0 [ib_core]
+ nvme_rdma_destroy_queue_ib+0x46/0x80 [nvme_rdma]
+ nvme_rdma_free_queue+0x3c/0xd0 [nvme_rdma]
+ nvme_rdma_destroy_io_queues+0x88/0xd0 [nvme_rdma]
+ nvme_rdma_error_recovery_work+0x52/0xf0 [nvme_rdma]
+ process_one_work+0x17a/0x440
+ worker_thread+0x126/0x3c0
+ kthread+0xcf/0xe0
+ ret_from_fork+0x58/0x90
+ 0xffffffffffffffff
+
+quiesce_qp() waits until all outstanding packets have been freed.
+This wait should be momentary. During a link down event, the cleanup
+handling does not ensure that all packets caught by the link down are
+flushed properly.
+
+This is caused by the fact that the freeze path and the link down
+event is handled the same. This is not correct. The freeze path
+waits until the HFI is unfrozen and then restarts PIO. A link down
+is not a freeze event. The link down path cannot restart the PIO
+until link is restored. If the PIO path is restarted before the link
+comes up, the application (QP) using the PIO path will hang (until
+link is restored).
+
+Fix by separating the linkdown path from the freeze path and use the
+link down path for link down events.
+
+Close a race condition sc_disable() by acquiring both the progress
+and release locks.
+
+Close a race condition in sc_stop() by moving the setting of the flag
+bits under the alloc lock.
+
+Cc: <stable@vger.kernel.org> # 4.9.x+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/chip.c | 7 +++++-
+ drivers/infiniband/hw/hfi1/pio.c | 42 ++++++++++++++++++++++++++++++--------
+ drivers/infiniband/hw/hfi1/pio.h | 2 +
+ 3 files changed, 42 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -6722,6 +6722,7 @@ void start_freeze_handling(struct hfi1_p
+ struct hfi1_devdata *dd = ppd->dd;
+ struct send_context *sc;
+ int i;
++ int sc_flags;
+
+ if (flags & FREEZE_SELF)
+ write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
+@@ -6732,11 +6733,13 @@ void start_freeze_handling(struct hfi1_p
+ /* notify all SDMA engines that they are going into a freeze */
+ sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
+
++ sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
++ SCF_LINK_DOWN : 0);
+ /* do halt pre-handling on all enabled send contexts */
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (sc && (sc->flags & SCF_ENABLED))
+- sc_stop(sc, SCF_FROZEN | SCF_HALTED);
++ sc_stop(sc, sc_flags);
+ }
+
+ /* Send context are frozen. Notify user space */
+@@ -10646,6 +10649,8 @@ int set_link_state(struct hfi1_pportdata
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ handle_linkup_change(dd, 1);
++ pio_kernel_linkup(dd);
++
+ ppd->host_link_state = HLS_UP_INIT;
+ break;
+ case HLS_UP_ARMED:
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -942,20 +942,18 @@ void sc_free(struct send_context *sc)
+ void sc_disable(struct send_context *sc)
+ {
+ u64 reg;
+- unsigned long flags;
+ struct pio_buf *pbuf;
+
+ if (!sc)
+ return;
+
+ /* do all steps, even if already disabled */
+- spin_lock_irqsave(&sc->alloc_lock, flags);
++ spin_lock_irq(&sc->alloc_lock);
+ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
+ reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
+ sc->flags &= ~SCF_ENABLED;
+ sc_wait_for_packet_egress(sc, 1);
+ write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
+- spin_unlock_irqrestore(&sc->alloc_lock, flags);
+
+ /*
+ * Flush any waiters. Once the context is disabled,
+@@ -965,7 +963,7 @@ void sc_disable(struct send_context *sc)
+ * proceed with the flush.
+ */
+ udelay(1);
+- spin_lock_irqsave(&sc->release_lock, flags);
++ spin_lock(&sc->release_lock);
+ if (sc->sr) { /* this context has a shadow ring */
+ while (sc->sr_tail != sc->sr_head) {
+ pbuf = &sc->sr[sc->sr_tail].pbuf;
+@@ -976,7 +974,8 @@ void sc_disable(struct send_context *sc)
+ sc->sr_tail = 0;
+ }
+ }
+- spin_unlock_irqrestore(&sc->release_lock, flags);
++ spin_unlock(&sc->release_lock);
++ spin_unlock_irq(&sc->alloc_lock);
+ }
+
+ /* return SendEgressCtxtStatus.PacketOccupancy */
+@@ -1199,11 +1198,39 @@ void pio_kernel_unfreeze(struct hfi1_dev
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
+ continue;
++ if (sc->flags & SCF_LINK_DOWN)
++ continue;
+
+ sc_enable(sc); /* will clear the sc frozen flag */
+ }
+ }
+
++/**
++ * pio_kernel_linkup() - Re-enable send contexts after linkup event
++ * @dd: valid devive data
++ *
++ * When the link goes down, the freeze path is taken. However, a link down
++ * event is different from a freeze because if the send context is re-enabled
++ * whowever is sending data will start sending data again, which will hang
++ * any QP that is sending data.
++ *
++ * The freeze path now looks at the type of event that occurs and takes this
++ * path for link down event.
++ */
++void pio_kernel_linkup(struct hfi1_devdata *dd)
++{
++ struct send_context *sc;
++ int i;
++
++ for (i = 0; i < dd->num_send_contexts; i++) {
++ sc = dd->send_contexts[i].sc;
++ if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
++ continue;
++
++ sc_enable(sc); /* will clear the sc link down flag */
++ }
++}
++
+ /*
+ * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
+ * Returns:
+@@ -1403,11 +1430,10 @@ void sc_stop(struct send_context *sc, in
+ {
+ unsigned long flags;
+
+- /* mark the context */
+- sc->flags |= flag;
+-
+ /* stop buffer allocations */
+ spin_lock_irqsave(&sc->alloc_lock, flags);
++ /* mark the context */
++ sc->flags |= flag;
+ sc->flags &= ~SCF_ENABLED;
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+ wake_up(&sc->halt_wait);
+--- a/drivers/infiniband/hw/hfi1/pio.h
++++ b/drivers/infiniband/hw/hfi1/pio.h
+@@ -145,6 +145,7 @@ struct send_context {
+ #define SCF_IN_FREE 0x02
+ #define SCF_HALTED 0x04
+ #define SCF_FROZEN 0x08
++#define SCF_LINK_DOWN 0x10
+
+ struct send_context_info {
+ struct send_context *sc; /* allocated working context */
+@@ -312,6 +313,7 @@ void set_pio_integrity(struct send_conte
+ void pio_reset_all(struct hfi1_devdata *dd);
+ void pio_freeze(struct hfi1_devdata *dd);
+ void pio_kernel_unfreeze(struct hfi1_devdata *dd);
++void pio_kernel_linkup(struct hfi1_devdata *dd);
+
+ /* global PIO send control operations */
+ #define PSC_GLOBAL_ENABLE 0
--- /dev/null
+From 15d36fecd0bdc7510b70a0e5ec6671140b3fce0c Mon Sep 17 00:00:00 2001
+From: Dave Jiang <dave.jiang@intel.com>
+Date: Thu, 26 Jul 2018 16:37:15 -0700
+Subject: mm: disallow mappings that conflict for devm_memremap_pages()
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+commit 15d36fecd0bdc7510b70a0e5ec6671140b3fce0c upstream.
+
+When pmem namespaces created are smaller than section size, this can
+cause an issue during removal and gpf was observed:
+
+ general protection fault: 0000 1 SMP PTI
+ CPU: 36 PID: 3941 Comm: ndctl Tainted: G W 4.14.28-1.el7uek.x86_64 #2
+ task: ffff88acda150000 task.stack: ffffc900233a4000
+ RIP: 0010:__put_page+0x56/0x79
+ Call Trace:
+ devm_memremap_pages_release+0x155/0x23a
+ release_nodes+0x21e/0x260
+ devres_release_all+0x3c/0x48
+ device_release_driver_internal+0x15c/0x207
+ device_release_driver+0x12/0x14
+ unbind_store+0xba/0xd8
+ drv_attr_store+0x27/0x31
+ sysfs_kf_write+0x3f/0x46
+ kernfs_fop_write+0x10f/0x18b
+ __vfs_write+0x3a/0x16d
+ vfs_write+0xb2/0x1a1
+ SyS_write+0x55/0xb9
+ do_syscall_64+0x79/0x1ae
+ entry_SYSCALL_64_after_hwframe+0x3d/0x0
+
+Add code to check whether we have a mapping already in the same section
+and prevent additional mappings from being created if that is the case.
+
+Link: http://lkml.kernel.org/r/152909478401.50143.312364396244072931.stgit@djiang5-desk3.ch.intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Robert Elliott <elliott@hpe.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/memremap.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -355,10 +355,27 @@ void *devm_memremap_pages(struct device
+ struct dev_pagemap *pgmap;
+ struct page_map *page_map;
+ int error, nid, is_ram, i = 0;
++ struct dev_pagemap *conflict_pgmap;
+
+ align_start = res->start & ~(SECTION_SIZE - 1);
+ align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+ - align_start;
++ align_end = align_start + align_size - 1;
++
++ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
++ if (conflict_pgmap) {
++ dev_WARN(dev, "Conflicting mapping in same section\n");
++ put_dev_pagemap(conflict_pgmap);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
++ if (conflict_pgmap) {
++ dev_WARN(dev, "Conflicting mapping in same section\n");
++ put_dev_pagemap(conflict_pgmap);
++ return ERR_PTR(-ENOMEM);
++ }
++
+ is_ram = region_intersects(align_start, align_size,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+
+@@ -396,7 +413,6 @@ void *devm_memremap_pages(struct device
+
+ mutex_lock(&pgmap_lock);
+ error = 0;
+- align_end = align_start + align_size - 1;
+
+ foreach_order_pgoff(res, order, pgoff) {
+ struct dev_pagemap *dup;
arc-build-get-rid-of-toolchain-check.patch
arc-build-don-t-set-cross_compile-in-arch-s-makefile.patch
hid-quirks-fix-support-for-apple-magic-keyboards.patch
+drm-i915-nuke-the-lvds-lid-notifier.patch
+staging-ccree-check-dma-pool-buf-null-before-free.patch
+mm-disallow-mappings-that-conflict-for-devm_memremap_pages.patch
+drm-i915-glk-add-quirk-for-glk-nuc-hdmi-port-issues.patch
+i2c-rcar-handle-rxdma-hw-behaviour-on-gen3.patch
+ib-hfi1-fix-destroy_qp-hang-after-a-link-down.patch
--- /dev/null
+From 2f7caf6b02145bd9cd9d0b56204f51a5fefe7790 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Sun, 7 Jan 2018 12:14:24 +0000
+Subject: staging: ccree: check DMA pool buf !NULL before free
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 2f7caf6b02145bd9cd9d0b56204f51a5fefe7790 upstream.
+
+If we ran out of DMA pool buffers, we get into the unmap
+code path with a NULL before. Deal with this by checking
+the virtual mapping is not NULL.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/ccree/ssi_buffer_mgr.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/ccree/ssi_buffer_mgr.c
++++ b/drivers/staging/ccree/ssi_buffer_mgr.c
+@@ -492,7 +492,8 @@ void ssi_buffer_mgr_unmap_blkcipher_requ
+ DMA_TO_DEVICE);
+ }
+ /* Release pool */
+- if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
++ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI &&
++ req_ctx->mlli_params.mlli_virt_addr) {
+ dma_pool_free(req_ctx->mlli_params.curr_pool,
+ req_ctx->mlli_params.mlli_virt_addr,
+ req_ctx->mlli_params.mlli_dma_addr);