--- /dev/null
+From d6e57f19e251d7b48273899fbe606b5b839cadd8 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 28 Jun 2019 19:59:56 +0800
+Subject: bcache: add comments for mutex_lock(&b->write_lock)
+
+When accessing or modifying BTREE_NODE_dirty bit, it is not always
+necessary to acquire b->write_lock. In bch_btree_cache_free() and
+mca_reap() acquiring b->write_lock is necessary, and this patch adds
+comments to explain why mutex_lock(&b->write_lock) is necessary for
+checking or clearing BTREE_NODE_dirty bit there.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ drivers/md/bcache/btree.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 3fbadf2058a65..9788b2ee6638f 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -655,6 +655,11 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
+ up(&b->io_mutex);
+ }
+
++ /*
++ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
++ * __bch_btree_node_write(). To avoid an extra flush, acquire
++ * b->write_lock before checking BTREE_NODE_dirty bit.
++ */
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, &cl);
+@@ -778,6 +783,11 @@ void bch_btree_cache_free(struct cache_set *c)
+ while (!list_empty(&c->btree_cache)) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+
++ /*
++ * This function is called by cache_set_free(), no I/O
++ * request on cache now, it is unnecessary to acquire
++ * b->write_lock before clearing BTREE_NODE_dirty anymore.
++ */
+ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+--
+2.20.1
+
--- /dev/null
+From 548fba48b31fd61b777f52f9b4f9e1e921b7fab3 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 28 Jun 2019 19:59:58 +0800
+Subject: bcache: fix race in btree_flush_write()
+
+There is a race between mca_reap(), btree_node_free() and journal code
+btree_flush_write(), which results very rare and strange deadlock or
+panic and are very hard to reproduce.
+
+Let me explain how the race happens. In btree_flush_write() one btree
+node with oldest journal pin is selected, then it is flushed to cache
+device, the select-and-flush is a two steps operation. Between these two
+steps, there are something may happen inside the race window,
+- The selected btree node was reaped by mca_reap() and allocated to
+ other requesters for other btree node.
+- The slected btree node was selected, flushed and released by mca
+ shrink callback bch_mca_scan().
+When btree_flush_write() tries to flush the selected btree node, firstly
+b->write_lock is held by mutex_lock(). If the race happens and the
+memory of selected btree node is allocated to other btree node, if that
+btree node's write_lock is held already, a deadlock very probably
+happens here. A worse case is the memory of the selected btree node is
+released, then all references to this btree node (e.g. b->write_lock)
+will trigger NULL pointer deference panic.
+
+This race was introduced in commit cafe56359144 ("bcache: A block layer
+cache"), and enlarged by commit c4dc2497d50d ("bcache: fix high CPU
+occupancy during journal"), which selected 128 btree nodes and flushed
+them one-by-one in a quite long time period.
+
+Such race is not easy to reproduce before. On a Lenovo SR650 server with
+48 Xeon cores, and configure 1 NVMe SSD as cache device, a MD raid0
+device assembled by 3 NVMe SSDs as backing device, this race can be
+observed around every 10,000 times btree_flush_write() gets called. Both
+deadlock and kernel panic all happened as aftermath of the race.
+
+The idea of the fix is to add a btree flag BTREE_NODE_journal_flush. It
+is set when selecting btree nodes, and cleared after btree nodes
+flushed. Then when mca_reap() selects a btree node with this bit set,
+this btree node will be skipped. Since mca_reap() only reaps btree node
+without BTREE_NODE_journal_flush flag, such race is avoided.
+
+Once corner case should be noticed, that is btree_node_free(). It might
+be called in some error handling code path. For example the following
+code piece from btree_split(),
+ 2149 err_free2:
+ 2150 bkey_put(b->c, &n2->key);
+ 2151 btree_node_free(n2);
+ 2152 rw_unlock(true, n2);
+ 2153 err_free1:
+ 2154 bkey_put(b->c, &n1->key);
+ 2155 btree_node_free(n1);
+ 2156 rw_unlock(true, n1);
+At line 2151 and 2155, the btree node n2 and n1 are released without
+mac_reap(), so BTREE_NODE_journal_flush also needs to be checked here.
+If btree_node_free() is called directly in such error handling path,
+and the selected btree node has BTREE_NODE_journal_flush bit set, just
+delay for 1 us and retry again. In this case this btree node won't
+be skipped, just retry until the BTREE_NODE_journal_flush bit cleared,
+and free the btree node memory.
+
+Fixes: cafe56359144 ("bcache: A block layer cache")
+Signed-off-by: Coly Li <colyli@suse.de>
+Reported-and-tested-by: kbuild test robot <lkp@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ drivers/md/bcache/btree.c | 28 +++++++++++++++++++++++++++-
+ drivers/md/bcache/btree.h | 2 ++
+ drivers/md/bcache/journal.c | 7 +++++++
+ 3 files changed, 36 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 9788b2ee6638f..5cf3247e8afb2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -35,7 +35,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/sched/clock.h>
+ #include <linux/rculist.h>
+-
++#include <linux/delay.h>
+ #include <trace/events/bcache.h>
+
+ /*
+@@ -655,12 +655,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
+ up(&b->io_mutex);
+ }
+
++retry:
+ /*
+ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
+ * __bch_btree_node_write(). To avoid an extra flush, acquire
+ * b->write_lock before checking BTREE_NODE_dirty bit.
+ */
+ mutex_lock(&b->write_lock);
++ /*
++ * If this btree node is selected in btree_flush_write() by journal
++ * code, delay and retry until the node is flushed by journal code
++ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
++ */
++ if (btree_node_journal_flush(b)) {
++ pr_debug("bnode %p is flushing by journal, retry", b);
++ mutex_unlock(&b->write_lock);
++ udelay(1);
++ goto retry;
++ }
++
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+@@ -1077,7 +1090,20 @@ static void btree_node_free(struct btree *b)
+
+ BUG_ON(b == b->c->root);
+
++retry:
+ mutex_lock(&b->write_lock);
++ /*
++ * If the btree node is selected and flushing in btree_flush_write(),
++ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
++ * then it is safe to free the btree node here. Otherwise this btree
++ * node will be in race condition.
++ */
++ if (btree_node_journal_flush(b)) {
++ mutex_unlock(&b->write_lock);
++ pr_debug("bnode %p journal_flush set, retry", b);
++ udelay(1);
++ goto retry;
++ }
+
+ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index d1c72ef64edf5..76cfd121a4861 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -158,11 +158,13 @@ enum btree_flags {
+ BTREE_NODE_io_error,
+ BTREE_NODE_dirty,
+ BTREE_NODE_write_idx,
++ BTREE_NODE_journal_flush,
+ };
+
+ BTREE_FLAG(io_error);
+ BTREE_FLAG(dirty);
+ BTREE_FLAG(write_idx);
++BTREE_FLAG(journal_flush);
+
+ static inline struct btree_write *btree_current_write(struct btree *b)
+ {
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index cae2aff5e27ae..33556acdcf9cd 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -405,6 +405,7 @@ static void btree_flush_write(struct cache_set *c)
+ retry:
+ best = NULL;
+
++ mutex_lock(&c->bucket_lock);
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+@@ -417,9 +418,14 @@ retry:
+ }
+
+ b = best;
++ if (b)
++ set_btree_node_journal_flush(b);
++ mutex_unlock(&c->bucket_lock);
++
+ if (b) {
+ mutex_lock(&b->write_lock);
+ if (!btree_current_write(b)->journal) {
++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ /* We raced */
+ atomic_long_inc(&c->retry_flush_write);
+@@ -427,6 +433,7 @@ retry:
+ }
+
+ __bch_btree_node_write(b, NULL);
++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ }
+ }
+--
+2.20.1
+
--- /dev/null
+From 99c3e0db0ff67a4f8f24248d9f54db2e965b8a04 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Fri, 28 Jun 2019 19:59:55 +0800
+Subject: bcache: only clear BTREE_NODE_dirty bit when it is set
+
+In bch_btree_cache_free() and btree_node_free(), BTREE_NODE_dirty is
+always set no matter btree node is dirty or not. The code looks like
+this,
+ if (btree_node_dirty(b))
+ btree_complete_write(b, btree_current_write(b));
+ clear_bit(BTREE_NODE_dirty, &b->flags);
+
+Indeed if btree_node_dirty(b) returns false, it means BTREE_NODE_dirty
+bit is cleared, then it is unnecessary to clear the bit again.
+
+This patch only clears BTREE_NODE_dirty when btree_node_dirty(b) is
+true (the bit is set), to save a few CPU cycles.
+
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ drivers/md/bcache/btree.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 773f5fdad25fb..3fbadf2058a65 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -778,10 +778,10 @@ void bch_btree_cache_free(struct cache_set *c)
+ while (!list_empty(&c->btree_cache)) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+
+- if (btree_node_dirty(b))
++ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+- clear_bit(BTREE_NODE_dirty, &b->flags);
+-
++ clear_bit(BTREE_NODE_dirty, &b->flags);
++ }
+ mca_data_free(b);
+ }
+
+@@ -1069,9 +1069,10 @@ static void btree_node_free(struct btree *b)
+
+ mutex_lock(&b->write_lock);
+
+- if (btree_node_dirty(b))
++ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+- clear_bit(BTREE_NODE_dirty, &b->flags);
++ clear_bit(BTREE_NODE_dirty, &b->flags);
++ }
+
+ mutex_unlock(&b->write_lock);
+
+--
+2.20.1
+
--- /dev/null
+From fa2b5cfb0e93d67d0a3d56b6af2b663967e4f34e Mon Sep 17 00:00:00 2001
+From: John Harrison <John.C.Harrison@Intel.com>
+Date: Mon, 17 Jun 2019 18:01:07 -0700
+Subject: drm/i915: Add whitelist workarounds for ICL
+
+[ Upstream commit 7b3d406310983a89ed7a1ecdd115efbe12b0ded5 ]
+
+Updated whitelist table for ICL.
+
+v2: Reduce changes to just those required for media driver until
+the selftest can be updated to support the new features of the
+other entries.
+
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: Robert M. Fosha <robert.m.fosha@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-4-John.C.Harrison@Intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_workarounds.c | 38 +++++++++++++++++-------
+ 1 file changed, 27 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index be3688908f0ce..efea5a18fa6db 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -1097,17 +1097,33 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
+ {
+ struct i915_wa_list *w = &engine->whitelist;
+
+- if (engine->class != RENDER_CLASS)
+- return;
+-
+- /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+- whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+-
+- /* WaAllowUMDToModifySamplerMode:icl */
+- whitelist_reg(w, GEN10_SAMPLER_MODE);
+-
+- /* WaEnableStateCacheRedirectToCS:icl */
+- whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++ switch (engine->class) {
++ case RENDER_CLASS:
++ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
++ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
++
++ /* WaAllowUMDToModifySamplerMode:icl */
++ whitelist_reg(w, GEN10_SAMPLER_MODE);
++
++ /* WaEnableStateCacheRedirectToCS:icl */
++ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++ break;
++
++ case VIDEO_DECODE_CLASS:
++ /* hucStatusRegOffset */
++ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ /* hucUKernelHdrInfoRegOffset */
++ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ /* hucStatus2RegOffset */
++ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ break;
++
++ default:
++ break;
++ }
+ }
+
+ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+--
+2.20.1
+
--- /dev/null
+From bc0e23c9fbfff295ff8cf8de6550cbbb6edc10f6 Mon Sep 17 00:00:00 2001
+From: Kenneth Graunke <kenneth@whitecape.org>
+Date: Tue, 25 Jun 2019 10:06:55 +0100
+Subject: drm/i915: Disable SAMPLER_STATE prefetching on all Gen11 steppings.
+
+[ Upstream commit 248f883db61283b4f5a1c92a5e27277377b09f16 ]
+
+The Demand Prefetch workaround (binding table prefetching) only applies
+to Icelake A0/B0. But the Sampler Prefetch workaround needs to be
+applied to all Gen11 steppings, according to a programming note in the
+SARCHKMD documentation.
+
+Using the Intel Gallium driver, I have seen intermittent failures in
+the dEQP-GLES31.functional.copy_image.non_compressed.* tests. After
+applying this workaround, the tests reliably pass.
+
+v2: Remove the overlap with a pre-production w/a
+
+BSpec: 9663
+Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: stable@vger.kernel.org
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190625090655.19220-1-chris@chris-wilson.co.uk
+(cherry picked from commit f9a393875d3af13cc3267477746608dadb7f17c1)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_workarounds.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index 841b8e515f4d6..2fb70fab2d1c6 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -1167,8 +1167,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+- GEN7_DISABLE_DEMAND_PREFETCH |
+- GEN7_DISABLE_SAMPLER_PREFETCH);
++ GEN7_DISABLE_DEMAND_PREFETCH);
++
++ /* Wa_1606682166:icl */
++ wa_write_or(wal,
++ GEN7_SARCHKMD,
++ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN_RANGE(i915, 9, 11)) {
+--
+2.20.1
+
--- /dev/null
+From 35eca562038173aa73eafaf8918eb055c511b998 Mon Sep 17 00:00:00 2001
+From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Date: Fri, 28 Jun 2019 15:07:20 +0300
+Subject: drm/i915/icl: whitelist PS_(DEPTH|INVOCATION)_COUNT
+
+[ Upstream commit cf8f9aa1eda7d916bd23f6b8c226404deb11690c ]
+
+The same tests failing on CFL+ platforms are also failing on ICL.
+Documentation doesn't list the
+WaAllowPMDepthAndInvocationCountAccessFromUMD workaround for ICL but
+applying it fixes the same tests as CFL.
+
+v2: Use only one whitelist entry (Lionel)
+
+Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Tested-by: Anuj Phogat <anuj.phogat@gmail.com>
+Cc: stable@vger.kernel.org # 6883eab27481: drm/i915: Support flags in whitlist WAs
+Cc: stable@vger.kernel.org
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-4-lionel.g.landwerlin@intel.com
+(cherry picked from commit 3fe0107e45ab396342497e06b8924cdd485cde3b)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_workarounds.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index efea5a18fa6db..edd57a5e0495f 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -1107,6 +1107,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++
++ /*
++ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
++ *
++ * This covers 4 register which are next to one another :
++ * - PS_INVOCATION_COUNT
++ * - PS_INVOCATION_COUNT_UDW
++ * - PS_DEPTH_COUNT
++ * - PS_DEPTH_COUNT_UDW
++ */
++ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++ RING_FORCE_TO_NONPRIV_RD |
++ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
+
+ case VIDEO_DECODE_CLASS:
+--
+2.20.1
+
--- /dev/null
+From bfa4288ccacc6c5b4f9e809001226f69bc513ee0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 17 Jul 2019 14:45:36 +0300
+Subject: drm/i915: Make sure cdclk is high enough for DP audio on VLV/CHV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit a8f196a0fa6391a436f63f360a1fb57031fdf26c ]
+
+On VLV/CHV there is some kind of linkage between the cdclk frequency
+and the DP link frequency. The spec says:
+"For DP audio configuration, cdclk frequency shall be set to
+ meet the following requirements:
+ DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ 270 | 320 or higher
+ 162 | 200 or higher"
+
+I suspect that would more accurately be expressed as
+"cdclk >= DP link clock", and in any case we can express it like
+that in the code because of the limited set of cdclk (200, 266,
+320, 400 MHz) and link frequencies (162 and 270 MHz) we support.
+
+Without this we can end up in a situation where the cdclk
+is too low and enabling DP audio will kill the pipe. Happens
+eg. with 2560x1440 modes where the 266MHz cdclk is sufficient
+to pump the pixels (241.5 MHz dotclock) but is too low for
+the DP audio due to the link frequency being 270 MHz.
+
+v2: Spell out the cdclk and link frequencies we actually support
+
+Cc: stable@vger.kernel.org
+Tested-by: Stefan Gottwald <gottwald@igel.com>
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111149
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190717114536.22937-1-ville.syrjala@linux.intel.com
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+(cherry picked from commit bffb31f73b29a60ef693842d8744950c2819851d)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_cdclk.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
+index ae40a8679314e..fd5236da039fb 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2269,6 +2269,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ min_cdclk = max(2 * 96000, min_cdclk);
+
++ /*
++ * "For DP audio configuration, cdclk frequency shall be set to
++ * meet the following requirements:
++ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
++ * 270 | 320 or higher
++ * 162 | 200 or higher"
++ */
++ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
++ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
++ min_cdclk = max(crtc_state->port_clock, min_cdclk);
++
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+--
+2.20.1
+
--- /dev/null
+From d8a7f089233e781b0567495e7cee75a5489c18ee Mon Sep 17 00:00:00 2001
+From: John Harrison <John.C.Harrison@Intel.com>
+Date: Mon, 17 Jun 2019 18:01:05 -0700
+Subject: drm/i915: Support flags in whitlist WAs
+
+[ Upstream commit 6883eab274813d158bfcfb499aa225ece61c0f29 ]
+
+Newer hardware adds flags to the whitelist work-around register. These
+allow per access direction privileges and ranges.
+
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: Robert M. Fosha <robert.m.fosha@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-2-John.C.Harrison@Intel.com
+(cherry picked from commit 5380d0b781c491d94b4f4690ecf9762c1946c4ec)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 7 +++++++
+ drivers/gpu/drm/i915/intel_workarounds.c | 9 ++++++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 13d6bd4e17b20..cf748b80e6401 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2510,6 +2510,13 @@ enum i915_power_well_id {
+ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+
+ #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
++#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
++#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
++#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
++#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
++#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
++#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
++#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+ #define RING_MAX_NONPRIV_SLOTS 12
+
+ #define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index 2fb70fab2d1c6..1db826b12774e 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -981,7 +981,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
+ }
+
+ static void
+-whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
+ {
+ struct i915_wa wa = {
+ .reg = reg
+@@ -990,9 +990,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
++ wa.reg.reg |= flags;
+ _wa_add(wal, &wa);
+ }
+
++static void
++whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++{
++ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
++}
++
+ static void gen9_whitelist_build(struct i915_wa_list *w)
+ {
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+--
+2.20.1
+
--- /dev/null
+From 204276537b61e1bfcbc484ab59c8b65bffd0c669 Mon Sep 17 00:00:00 2001
+From: John Harrison <John.C.Harrison@Intel.com>
+Date: Mon, 17 Jun 2019 18:01:06 -0700
+Subject: drm/i915: Support whitelist workarounds on all engines
+
+[ Upstream commit ebd2de47a19f1c17ae47f8331aae3cd436766663 ]
+
+Newer hardware requires setting up whitelists on engines other than
+render. So, extend the whitelist code to support all engines.
+
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Signed-off-by: Robert M. Fosha <robert.m.fosha@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190618010108.27499-3-John.C.Harrison@Intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_workarounds.c | 65 +++++++++++++++++-------
+ 1 file changed, 47 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index 1db826b12774e..0b80fde927899 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -1012,48 +1012,79 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
+ whitelist_reg(w, GEN8_HDC_CHICKEN1);
+ }
+
+-static void skl_whitelist_build(struct i915_wa_list *w)
++static void skl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+
+-static void bxt_whitelist_build(struct i915_wa_list *w)
++static void bxt_whitelist_build(struct intel_engine_cs *engine)
+ {
+- gen9_whitelist_build(w);
++ if (engine->class != RENDER_CLASS)
++ return;
++
++ gen9_whitelist_build(&engine->whitelist);
+ }
+
+-static void kbl_whitelist_build(struct i915_wa_list *w)
++static void kbl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+
+-static void glk_whitelist_build(struct i915_wa_list *w)
++static void glk_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ }
+
+-static void cfl_whitelist_build(struct i915_wa_list *w)
++static void cfl_whitelist_build(struct intel_engine_cs *engine)
+ {
+- gen9_whitelist_build(w);
++ if (engine->class != RENDER_CLASS)
++ return;
++
++ gen9_whitelist_build(&engine->whitelist);
+ }
+
+-static void cnl_whitelist_build(struct i915_wa_list *w)
++static void cnl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ whitelist_reg(w, GEN8_CS_CHICKEN1);
+ }
+
+-static void icl_whitelist_build(struct i915_wa_list *w)
++static void icl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+@@ -1069,24 +1100,22 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
+
+- GEM_BUG_ON(engine->id != RCS0);
+-
+ wa_init_start(w, "whitelist");
+
+ if (IS_GEN(i915, 11))
+- icl_whitelist_build(w);
++ icl_whitelist_build(engine);
+ else if (IS_CANNONLAKE(i915))
+- cnl_whitelist_build(w);
++ cnl_whitelist_build(engine);
+ else if (IS_COFFEELAKE(i915))
+- cfl_whitelist_build(w);
++ cfl_whitelist_build(engine);
+ else if (IS_GEMINILAKE(i915))
+- glk_whitelist_build(w);
++ glk_whitelist_build(engine);
+ else if (IS_KABYLAKE(i915))
+- kbl_whitelist_build(w);
++ kbl_whitelist_build(engine);
+ else if (IS_BROXTON(i915))
+- bxt_whitelist_build(w);
++ bxt_whitelist_build(engine);
+ else if (IS_SKYLAKE(i915))
+- skl_whitelist_build(w);
++ skl_whitelist_build(engine);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
+ else
+--
+2.20.1
+
--- /dev/null
+From b41ed2c47cbb5ae1172d46eff4f97133cfef02dd Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 8 Jul 2019 15:03:27 +0100
+Subject: drm/i915/userptr: Acquire the page lock around set_page_dirty()
+
+[ Upstream commit aa56a292ce623734ddd30f52d73f527d1f3529b5 ]
+
+set_page_dirty says:
+
+ For pages with a mapping this should be done under the page lock
+ for the benefit of asynchronous memory errors who prefer a
+ consistent dirty state. This rule can be broken in some special
+ cases, but should be better not to.
+
+Under those rules, it is only safe for us to use the plain set_page_dirty
+calls for shmemfs/anonymous memory. Userptr may be used with real
+mappings and so needs to use the locked version (set_page_dirty_lock).
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190708140327.26825-1-chris@chris-wilson.co.uk
+(cherry picked from commit cb6d7c7dc7ff8cace666ddec66334117a6068ce2)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_gem_userptr.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index 8079ea3af1039..b1fc15c7f5997 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -678,7 +678,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
+- set_page_dirty(page);
++ /*
++ * As this may not be anonymous memory (e.g. shmem)
++ * but exist on a real mapping, we have to lock
++ * the page in order to dirty it -- holding
++ * the page reference is not sufficient to
++ * prevent the inode from being truncated.
++ * Play safe and take the lock.
++ */
++ set_page_dirty_lock(page);
+
+ mark_page_accessed(page);
+ put_page(page);
+--
+2.20.1
+
--- /dev/null
+From e61d374e92c9f7c8eb88eb68140016807ebce148 Mon Sep 17 00:00:00 2001
+From: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Date: Fri, 28 Jun 2019 15:07:19 +0300
+Subject: drm/i915: whitelist PS_(DEPTH|INVOCATION)_COUNT
+
+[ Upstream commit 6ce5bfe936ac31d5c52c4b1328d0bfda5f97e7ca ]
+
+CFL:C0+ changed the status of those registers which are now
+blacklisted by default.
+
+This is breaking a number of CTS tests on GL & Vulkan :
+
+ KHR-GL45.pipeline_statistics_query_tests_ARB.functional_fragment_shader_invocations (GL)
+
+ dEQP-VK.query_pool.statistics_query.fragment_shader_invocations.* (Vulkan)
+
+v2: Only use one whitelist entry (Lionel)
+
+Bspec: 14091
+Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Cc: stable@vger.kernel.org # 6883eab27481: drm/i915: Support flags in whitlist WAs
+Cc: stable@vger.kernel.org
+Acked-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-3-lionel.g.landwerlin@intel.com
+(cherry picked from commit 2c903da50f5a9522b134e488bd0f92646c46f3c0)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_workarounds.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index 0b80fde927899..be3688908f0ce 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -1061,10 +1061,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
+
+ static void cfl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
+ if (engine->class != RENDER_CLASS)
+ return;
+
+- gen9_whitelist_build(&engine->whitelist);
++ gen9_whitelist_build(w);
++
++ /*
++ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
++ *
++ * This covers 4 register which are next to one another :
++ * - PS_INVOCATION_COUNT
++ * - PS_INVOCATION_COUNT_UDW
++ * - PS_DEPTH_COUNT
++ * - PS_DEPTH_COUNT_UDW
++ */
++ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++ RING_FORCE_TO_NONPRIV_RD |
++ RING_FORCE_TO_NONPRIV_RANGE_4);
+ }
+
+ static void cnl_whitelist_build(struct intel_engine_cs *engine)
+--
+2.20.1
+
--- /dev/null
+From c433a0e680746c2f330c0778a5db89578b692bb9 Mon Sep 17 00:00:00 2001
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 15 Jul 2019 12:45:28 -0400
+Subject: IB/hfi1: Unreserve a flushed OPFN request
+
+When an OPFN request is flushed, the request is completed without
+unreserving itself from the send queue. Subsequently, when a new
+request is post sent, the following warning will be triggered:
+
+WARNING: CPU: 4 PID: 8130 at rdmavt/qp.c:1761 rvt_post_send+0x72a/0x880 [rdmavt]
+Call Trace:
+[<ffffffffbbb61e41>] dump_stack+0x19/0x1b
+[<ffffffffbb497688>] __warn+0xd8/0x100
+[<ffffffffbb4977cd>] warn_slowpath_null+0x1d/0x20
+[<ffffffffc01c941a>] rvt_post_send+0x72a/0x880 [rdmavt]
+[<ffffffffbb4dcabe>] ? account_entity_dequeue+0xae/0xd0
+[<ffffffffbb61d645>] ? __kmalloc+0x55/0x230
+[<ffffffffc04e1a4c>] ib_uverbs_post_send+0x37c/0x5d0 [ib_uverbs]
+[<ffffffffc04e5e36>] ? rdma_lookup_put_uobject+0x26/0x60 [ib_uverbs]
+[<ffffffffc04dbce6>] ib_uverbs_write+0x286/0x460 [ib_uverbs]
+[<ffffffffbb6f9457>] ? security_file_permission+0x27/0xa0
+[<ffffffffbb641650>] vfs_write+0xc0/0x1f0
+[<ffffffffbb64246f>] SyS_write+0x7f/0xf0
+[<ffffffffbbb74ddb>] system_call_fastpath+0x22/0x27
+
+This patch fixes the problem by moving rvt_qp_wqe_unreserve() into
+rvt_qp_complete_swqe() to simplify the code and make it less
+error-prone.
+
+Fixes: ca95f802ef51 ("IB/hfi1: Unreserve a reserved request when it is completed")
+Link: https://lore.kernel.org/r/20190715164528.74174.31364.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 2 --
+ include/rdma/rdmavt_qp.h | 9 ++++-----
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 235bdbc706acc..5c0d90418e8c4 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ trdma_clean_swqe(qp, wqe);
+- rvt_qp_wqe_unreserve(qp, wqe);
+ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
+ rvt_qp_complete_swqe(qp,
+ wqe,
+@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ trdma_clean_swqe(qp, wqe);
+- rvt_qp_wqe_unreserve(qp, wqe);
+ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
+ rvt_qp_complete_swqe(qp,
+ wqe,
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index 84d0f36afc2f7..85544777587db 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -540,7 +540,7 @@ static inline void rvt_qp_wqe_reserve(
+ /**
+ * rvt_qp_wqe_unreserve - clean reserved operation
+ * @qp - the rvt qp
+- * @wqe - the send wqe
++ * @flags - send wqe flags
+ *
+ * This decrements the reserve use count.
+ *
+@@ -552,11 +552,9 @@ static inline void rvt_qp_wqe_reserve(
+ * the compiler does not juggle the order of the s_last
+ * ring index and the decrementing of s_reserved_used.
+ */
+-static inline void rvt_qp_wqe_unreserve(
+- struct rvt_qp *qp,
+- struct rvt_swqe *wqe)
++static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
+ {
+- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
++ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
+ atomic_dec(&qp->s_reserved_used);
+ /* insure no compiler re-order up to s_last change */
+ smp_mb__after_atomic();
+@@ -743,6 +741,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
+ u32 byte_len, last;
+ int flags = wqe->wr.send_flags;
+
++ rvt_qp_wqe_unreserve(qp, flags);
+ rvt_put_qp_swqe(qp, wqe);
+
+ need_completion =
+--
+2.20.1
+
--- /dev/null
+From 7ce945111c9855716f07333131e9d0b9afcfbf2a Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 13 Jun 2019 08:30:44 -0400
+Subject: IB/rdmavt: Add new completion inline
+
+There is opencoded send completion logic all over all
+the drivers.
+
+We need to convert to this routine to enforce ordering
+issues for completions. This routine fixes an ordering
+issue where the read of the SWQE fields necessary for creating
+the completion can race with a post send if the post send catches
+a send queue at the edge of being full. Is is possible in that situation
+to read SWQE fields that are being written.
+
+This new routine insures that SWQE fields are read prior to advancing
+the index that post send uses to determine queue fullness.
+
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ include/rdma/rdmavt_qp.h | 72 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 72 insertions(+)
+
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index 68e38c20afc04..6014f17669071 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -737,6 +737,78 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+ }
+
++/**
++ * rvt_qp_sqwe_incr - increment ring index
++ * @qp: the qp
++ * @val: the starting value
++ *
++ * Return: the new value wrapping as appropriate
++ */
++static inline u32
++rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
++{
++ if (++val >= qp->s_size)
++ val = 0;
++ return val;
++}
++
++/**
++ * rvt_qp_complete_swqe - insert send completion
++ * @qp - the qp
++ * @wqe - the send wqe
++ * @opcode - wc operation (driver dependent)
++ * @status - completion status
++ *
++ * Update the s_last information, and then insert a send
++ * completion into the completion
++ * queue if the qp indicates it should be done.
++ *
++ * See IBTA 10.7.3.1 for info on completion
++ * control.
++ *
++ * Return: new last
++ */
++static inline u32
++rvt_qp_complete_swqe(struct rvt_qp *qp,
++ struct rvt_swqe *wqe,
++ enum ib_wc_opcode opcode,
++ enum ib_wc_status status)
++{
++ bool need_completion;
++ u64 wr_id;
++ u32 byte_len, last;
++ int flags = wqe->wr.send_flags;
++
++ rvt_put_qp_swqe(qp, wqe);
++
++ need_completion =
++ !(flags & RVT_SEND_RESERVE_USED) &&
++ (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
++ (flags & IB_SEND_SIGNALED) ||
++ status != IB_WC_SUCCESS);
++ if (need_completion) {
++ wr_id = wqe->wr.wr_id;
++ byte_len = wqe->length;
++ /* above fields required before writing s_last */
++ }
++ last = rvt_qp_swqe_incr(qp, qp->s_last);
++ /* see rvt_qp_is_avail() */
++ smp_store_release(&qp->s_last, last);
++ if (need_completion) {
++ struct ib_wc w = {
++ .wr_id = wr_id,
++ .status = status,
++ .opcode = opcode,
++ .qp = &qp->ibqp,
++ .byte_len = byte_len,
++ };
++
++ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &w,
++ status != IB_WC_SUCCESS);
++ }
++ return last;
++}
++
+ extern const int ib_rvt_state_ops[];
+
+ struct rvt_dev_info;
+--
+2.20.1
+
--- /dev/null
+From 59b66cad032a025b857b5f82fdebf3d1d75fc6a8 Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 13 Jun 2019 08:30:52 -0400
+Subject: IB/{rdmavt, qib, hfi1}: Convert to new completion API
+
+Convert all completions to use the new completion routine that
+fixes a race between post send and completion where fields from
+a SWQE can be read after SWQE has been freed.
+
+This patch also addresses issues reported in
+https://marc.info/?l=linux-kernel&m=155656897409107&w=2.
+
+The reserved operation path has no need for any barrier.
+
+The barrier for the other path is addressed by the
+smp_load_acquire() barrier.
+
+Cc: Andrea Parri <andrea.parri@amarulasolutions.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 26 ++++-----------------
+ drivers/infiniband/hw/qib/qib_rc.c | 26 ++++-----------------
+ drivers/infiniband/sw/rdmavt/qp.c | 31 ++++++++-----------------
+ include/rdma/rdmavt_qp.h | 36 ------------------------------
+ 4 files changed, 17 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 7c8cfb149da09..235bdbc706acc 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1830,23 +1830,14 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+ }
+
+ while (qp->s_last != qp->s_acked) {
+- u32 s_last;
+-
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ trdma_clean_swqe(qp, wqe);
+ rvt_qp_wqe_unreserve(qp, wqe);
+- s_last = qp->s_last;
+- trace_hfi1_qp_send_completion(qp, wqe, s_last);
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+- rvt_qp_swqe_complete(qp,
++ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+@@ -1890,19 +1881,10 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ trace_hfi1_rc_completion(qp, wqe->lpsn);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+- u32 s_last;
+-
+ trdma_clean_swqe(qp, wqe);
+- rvt_put_qp_swqe(qp, wqe);
+ rvt_qp_wqe_unreserve(qp, wqe);
+- s_last = qp->s_last;
+- trace_hfi1_qp_send_completion(qp, wqe, s_last);
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_qp_swqe_complete(qp,
++ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index 2ac4c67f5ba1a..8d9a94d6f6856 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -921,20 +921,11 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
+ rvt_add_retry_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+- u32 s_last;
+-
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+- s_last = qp->s_last;
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+- rvt_qp_swqe_complete(qp,
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_qib_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+@@ -972,21 +963,12 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ * is finished.
+ */
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+- u32 s_last;
+-
+- rvt_put_qp_swqe(qp, wqe);
+- s_last = qp->s_last;
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_qp_swqe_complete(qp,
++ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_qib_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+- } else
++ else
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
+
+ qp->s_retry = qp->s_retry_cnt;
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index c5a50614a6c63..cb9e171d7e7bf 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -1856,10 +1856,9 @@ static inline int rvt_qp_is_avail(
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+- reserved_used = atomic_read(&qp->s_reserved_used);
+ if (unlikely(reserved_op)) {
+ /* see rvt_qp_wqe_unreserve() */
+- smp_mb__before_atomic();
++ reserved_used = atomic_read(&qp->s_reserved_used);
+ if (reserved_used >= rdi->dparms.reserved_operations)
+ return -ENOMEM;
+ return 0;
+@@ -1867,14 +1866,13 @@ static inline int rvt_qp_is_avail(
+ /* non-reserved operations */
+ if (likely(qp->s_avail))
+ return 0;
+- slast = READ_ONCE(qp->s_last);
++ /* See rvt_qp_complete_swqe() */
++ slast = smp_load_acquire(&qp->s_last);
+ if (qp->s_head >= slast)
+ avail = qp->s_size - (qp->s_head - slast);
+ else
+ avail = slast - qp->s_head;
+
+- /* see rvt_qp_wqe_unreserve() */
+- smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ avail = avail - 1 -
+ (rdi->dparms.reserved_operations - reserved_used);
+@@ -2667,27 +2665,16 @@ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+ {
+ u32 old_last, last;
+- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
++ struct rvt_dev_info *rdi;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
++ rdi = ib_to_rvt(qp->ibqp.device);
+
+- last = qp->s_last;
+- old_last = last;
+- trace_rvt_qp_send_completion(qp, wqe, last);
+- if (++last >= qp->s_size)
+- last = 0;
+- trace_rvt_qp_send_completion(qp, wqe, last);
+- qp->s_last = last;
+- /* See post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+-
+- rvt_qp_swqe_complete(qp,
+- wqe,
+- rdi->wc_opcode[wqe->wr.opcode],
+- status);
+-
++ old_last = qp->s_last;
++ trace_rvt_qp_send_completion(qp, wqe, old_last);
++ last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
++ status);
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index 6014f17669071..84d0f36afc2f7 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -565,42 +565,6 @@ static inline void rvt_qp_wqe_unreserve(
+
+ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
+
+-/**
+- * rvt_qp_swqe_complete() - insert send completion
+- * @qp - the qp
+- * @wqe - the send wqe
+- * @status - completion status
+- *
+- * Insert a send completion into the completion
+- * queue if the qp indicates it should be done.
+- *
+- * See IBTA 10.7.3.1 for info on completion
+- * control.
+- */
+-static inline void rvt_qp_swqe_complete(
+- struct rvt_qp *qp,
+- struct rvt_swqe *wqe,
+- enum ib_wc_opcode opcode,
+- enum ib_wc_status status)
+-{
+- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
+- return;
+- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+- status != IB_WC_SUCCESS) {
+- struct ib_wc wc;
+-
+- memset(&wc, 0, sizeof(wc));
+- wc.wr_id = wqe->wr.wr_id;
+- wc.status = status;
+- wc.opcode = opcode;
+- wc.qp = &qp->ibqp;
+- wc.byte_len = wqe->length;
+- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+- status != IB_WC_SUCCESS);
+- }
+-}
+-
+ /*
+ * Compare the lower 24 bits of the msn values.
+ * Returns an integer <, ==, or > than zero.
+--
+2.20.1
+
--- /dev/null
+From cff71a4b701723017888d57bcb42e091e68afde2 Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linaro.org>
+Date: Thu, 25 Jul 2019 11:14:22 +0800
+Subject: mmc: sdhci-sprd: Fix the incorrect soft reset operation when runtime
+ resuming
+
+[ Upstream commit c6303c5d52d5ec3e5bce2e6a5480fa2a1baa45e6 ]
+
+The SD host controller specification defines 3 types software reset:
+software reset for data line, software reset for command line and software
+reset for all. Software reset for all means this reset affects the entire
+Host controller except for the card detection circuit.
+
+In sdhci_runtime_resume_host() we always do a software "reset for all",
+which causes the Spreadtrum variant controller to work abnormally after
+resuming. To fix the problem, let's do a software reset for the data and
+the command part, rather than "for all".
+
+However, as sdhci_runtime_resume() is a common sdhci function and we don't
+want to change the behaviour for other variants, let's introduce a new
+in-parameter for it. This enables the caller to decide if a "reset for all"
+shall be done or not.
+
+Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
+Fixes: fb8bd90f83c4 ("mmc: sdhci-sprd: Add Spreadtrum's initial host controller")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-acpi.c | 2 +-
+ drivers/mmc/host/sdhci-esdhc-imx.c | 2 +-
+ drivers/mmc/host/sdhci-of-at91.c | 2 +-
+ drivers/mmc/host/sdhci-pci-core.c | 4 ++--
+ drivers/mmc/host/sdhci-pxav3.c | 2 +-
+ drivers/mmc/host/sdhci-s3c.c | 2 +-
+ drivers/mmc/host/sdhci-sprd.c | 2 +-
+ drivers/mmc/host/sdhci-xenon.c | 2 +-
+ drivers/mmc/host/sdhci.c | 4 ++--
+ drivers/mmc/host/sdhci.h | 2 +-
+ 10 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b3a130a9ee233..1604f512c7bd1 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
+
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
+- return sdhci_runtime_resume_host(c->host);
++ return sdhci_runtime_resume_host(c->host, 0);
+ }
+
+ #endif
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index c391510e9ef40..776a942162488 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1705,7 +1705,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
+ }
+
+- err = sdhci_runtime_resume_host(host);
++ err = sdhci_runtime_resume_host(host, 0);
+ if (err)
+ goto disable_ipg_clk;
+
+diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
+index d4993582f0f63..e7d1920729fbc 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
+ }
+
+ out:
+- return sdhci_runtime_resume_host(host);
++ return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif /* CONFIG_PM */
+
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 4154ee11b47dc..267b90374fa48 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
+
+ err_pci_runtime_suspend:
+ while (--i >= 0)
+- sdhci_runtime_resume_host(chip->slots[i]->host);
++ sdhci_runtime_resume_host(chip->slots[i]->host, 0);
+ return ret;
+ }
+
+@@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
+ if (!slot)
+ continue;
+
+- ret = sdhci_runtime_resume_host(slot->host);
++ ret = sdhci_runtime_resume_host(slot->host, 0);
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 3ddecf4792958..e55037ceda734 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
+ if (!IS_ERR(pxa->clk_core))
+ clk_prepare_enable(pxa->clk_core);
+
+- return sdhci_runtime_resume_host(host);
++ return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif
+
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 8e4a8ba33f050..f5753aef71511 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -745,7 +745,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
+ clk_prepare_enable(busclk);
+ if (ourhost->cur_clk >= 0)
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
+- ret = sdhci_runtime_resume_host(host);
++ ret = sdhci_runtime_resume_host(host, 0);
+ return ret;
+ }
+ #endif
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index fc892a8d882fd..53f3af53b3fba 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -497,7 +497,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
+ return ret;
+ }
+
+- sdhci_runtime_resume_host(host);
++ sdhci_runtime_resume_host(host, 1);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 8a18f14cf842d..1dea1ba66f7b4 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev)
+ priv->restore_needed = false;
+ }
+
+- ret = sdhci_runtime_resume_host(host);
++ ret = sdhci_runtime_resume_host(host, 0);
+ if (ret)
+ goto out;
+ return 0;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 59acf8e3331ee..a5dc5aae973e6 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3320,7 +3320,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+
+-int sdhci_runtime_resume_host(struct sdhci_host *host)
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
+ {
+ struct mmc_host *mmc = host->mmc;
+ unsigned long flags;
+@@ -3331,7 +3331,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
+ host->ops->enable_dma(host);
+ }
+
+- sdhci_init(host, 0);
++ sdhci_init(host, soft_reset);
+
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
+ mmc->ios.power_mode != MMC_POWER_OFF) {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 199712e7adbb3..d2c7c9c436c97 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -781,7 +781,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
+ int sdhci_suspend_host(struct sdhci_host *host);
+ int sdhci_resume_host(struct sdhci_host *host);
+ int sdhci_runtime_suspend_host(struct sdhci_host *host);
+-int sdhci_runtime_resume_host(struct sdhci_host *host);
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+ #endif
+
+ void sdhci_cqe_enable(struct mmc_host *mmc);
+--
+2.20.1
+
powerpc-tm-fix-restoring-fp-vmx-facility-incorrectly-on-interrupts.patch
batman-adv-fix-uninit-value-in-batadv_netlink_get_ifindex.patch
batman-adv-only-read-ogm-tvlv_len-after-buffer-len-check.patch
+bcache-only-clear-btree_node_dirty-bit-when-it-is-se.patch
+bcache-add-comments-for-mutex_lock-b-write_lock.patch
+bcache-fix-race-in-btree_flush_write.patch
+ib-rdmavt-add-new-completion-inline.patch
+ib-rdmavt-qib-hfi1-convert-to-new-completion-api.patch
+ib-hfi1-unreserve-a-flushed-opfn-request.patch
+drm-i915-disable-sampler_state-prefetching-on-all-ge.patch
+drm-i915-userptr-acquire-the-page-lock-around-set_pa.patch
+drm-i915-make-sure-cdclk-is-high-enough-for-dp-audio.patch
+mmc-sdhci-sprd-fix-the-incorrect-soft-reset-operatio.patch
+usb-chipidea-imx-add-imx7ulp-support.patch
+usb-chipidea-imx-fix-eprobe_defer-support-during-dri.patch
+virtio-s390-fix-race-on-airq_areas.patch
+drm-i915-support-flags-in-whitlist-was.patch
+drm-i915-support-whitelist-workarounds-on-all-engine.patch
+drm-i915-whitelist-ps_-depth-invocation-_count.patch
+drm-i915-add-whitelist-workarounds-for-icl.patch
+drm-i915-icl-whitelist-ps_-depth-invocation-_count.patch
--- /dev/null
+From e2fb9dd273f221d926cf3a679f721269ff8e66e4 Mon Sep 17 00:00:00 2001
+From: Peter Chen <peter.chen@nxp.com>
+Date: Sun, 28 Apr 2019 10:35:31 +0800
+Subject: usb: chipidea: imx: add imx7ulp support
+
+In this commit, we add CI_HDRC_PMQOS to avoid system entering idle,
+at imx7ulp, if the system enters idle, the DMA will stop, so the USB
+transfer can't work at this case.
+
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+---
+ drivers/usb/chipidea/ci_hdrc_imx.c | 28 +++++++++++++++++++++++++++-
+ drivers/usb/chipidea/usbmisc_imx.c | 4 ++++
+ include/linux/usb/chipidea.h | 1 +
+ 3 files changed, 32 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index ceec8d5985d46..a76708501236d 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -13,6 +13,7 @@
+ #include <linux/usb/of.h>
+ #include <linux/clk.h>
+ #include <linux/pinctrl/consumer.h>
++#include <linux/pm_qos.h>
+
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+@@ -63,6 +64,11 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
+ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
++ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++ CI_HDRC_PMQOS,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -72,6 +78,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
+ { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
++ { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+@@ -93,6 +100,8 @@ struct ci_hdrc_imx_data {
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+ /* --------------------------------- */
++ struct pm_qos_request pm_qos_req;
++ const struct ci_hdrc_imx_platform_flag *plat_data;
+ };
+
+ /* Common functions shared by usbmisc drivers */
+@@ -309,6 +318,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
++ data->plat_data = imx_platform_flag;
++ pdata.flags |= imx_platform_flag->flags;
+ platform_set_drvdata(pdev, data);
+ data->usbmisc_data = usbmisc_get_init_data(dev);
+ if (IS_ERR(data->usbmisc_data))
+@@ -369,6 +380,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ }
+ }
+ }
++
++ if (pdata.flags & CI_HDRC_PMQOS)
++ pm_qos_add_request(&data->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY, 0);
++
+ ret = imx_get_clks(dev);
+ if (ret)
+ goto disable_hsic_regulator;
+@@ -396,7 +412,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ usb_phy_init(pdata.usb_phy);
+ }
+
+- pdata.flags |= imx_platform_flag->flags;
+ if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
+ data->supports_runtime_pm = true;
+
+@@ -439,6 +454,8 @@ err_clk:
+ disable_hsic_regulator:
+ if (data->hsic_pad_regulator)
+ ret = regulator_disable(data->hsic_pad_regulator);
++ if (pdata.flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
+ return ret;
+ }
+
+@@ -455,6 +472,8 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
+ if (data->override_phy_control)
+ usb_phy_shutdown(data->phy);
+ imx_disable_unprepare_clks(&pdev->dev);
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
+ if (data->hsic_pad_regulator)
+ regulator_disable(data->hsic_pad_regulator);
+
+@@ -480,6 +499,9 @@ static int __maybe_unused imx_controller_suspend(struct device *dev)
+ }
+
+ imx_disable_unprepare_clks(dev);
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
++
+ data->in_lpm = true;
+
+ return 0;
+@@ -497,6 +519,10 @@ static int __maybe_unused imx_controller_resume(struct device *dev)
+ return 0;
+ }
+
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_add_request(&data->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY, 0);
++
+ ret = imx_prepare_enable_clks(dev);
+ if (ret)
+ return ret;
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index d8b67e150b129..b7a5727d0c8a8 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -763,6 +763,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+ .compatible = "fsl,imx7d-usbmisc",
+ .data = &imx7d_usbmisc_ops,
+ },
++ {
++ .compatible = "fsl,imx7ulp-usbmisc",
++ .data = &imx7d_usbmisc_ops,
++ },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 911e05af671ea..edd89b7c8f184 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -61,6 +61,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
+ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
+ #define CI_HDRC_IMX_IS_HSIC BIT(14)
++#define CI_HDRC_PMQOS BIT(15)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+--
+2.20.1
+
--- /dev/null
+From 36f54886c2ecb2c8260c1c473be26b2bcf0020a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Andr=C3=A9=20Draszik?= <git@andred.net>
+Date: Sat, 10 Aug 2019 16:07:58 +0100
+Subject: usb: chipidea: imx: fix EPROBE_DEFER support during driver probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If driver probe needs to be deferred, e.g. because ci_hdrc_add_device()
+isn't ready yet, this driver currently misbehaves badly:
+ a) success is still reported to the driver core (meaning a 2nd
+ probe attempt will never be done), leaving the driver in
+ a dysfunctional state and the hardware unusable
+
+ b) driver remove / shutdown OOPSes:
+ [ 206.786916] Unable to handle kernel paging request at virtual address fffffdff
+ [ 206.794148] pgd = 880b9f82
+ [ 206.796890] [fffffdff] *pgd=abf5e861, *pte=00000000, *ppte=00000000
+ [ 206.803179] Internal error: Oops: 37 [#1] PREEMPT SMP ARM
+ [ 206.808581] Modules linked in: wl18xx evbug
+ [ 206.813308] CPU: 1 PID: 1 Comm: systemd-shutdow Not tainted 4.19.35+gf345c93b4195 #1
+ [ 206.821053] Hardware name: Freescale i.MX7 Dual (Device Tree)
+ [ 206.826813] PC is at ci_hdrc_remove_device+0x4/0x20
+ [ 206.831699] LR is at ci_hdrc_imx_remove+0x20/0xe8
+ [ 206.836407] pc : [<805cd4b0>] lr : [<805d62cc>] psr: 20000013
+ [ 206.842678] sp : a806be40 ip : 00000001 fp : 80adbd3c
+ [ 206.847906] r10: 80b1b794 r9 : 80d5dfe0 r8 : a8192c44
+ [ 206.853136] r7 : 80db93a0 r6 : a8192c10 r5 : a8192c00 r4 : a93a4a00
+ [ 206.859668] r3 : 00000000 r2 : a8192ce4 r1 : ffffffff r0 : fffffdfb
+ [ 206.866201] Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
+ [ 206.873341] Control: 10c5387d Table: a9e0c06a DAC: 00000051
+ [ 206.879092] Process systemd-shutdow (pid: 1, stack limit = 0xb271353c)
+ [ 206.885624] Stack: (0xa806be40 to 0xa806c000)
+ [ 206.889992] be40: a93a4a00 805d62cc a8192c1c a8170e10 a8192c10 8049a490 80d04d08 00000000
+ [ 206.898179] be60: 00000000 80d0da2c fee1dead 00000000 a806a000 00000058 00000000 80148b08
+ [ 206.906366] be80: 01234567 80148d8c a9858600 00000000 00000000 00000000 00000000 80d04d08
+ [ 206.914553] bea0: 00000000 00000000 a82741e0 a9858600 00000024 00000002 a9858608 00000005
+ [ 206.922740] bec0: 0000001e 8022c058 00000000 00000000 a806bf14 a9858600 00000000 a806befc
+ [ 206.930927] bee0: a806bf78 00000000 7ee12c30 8022c18c a806bef8 a806befc 00000000 00000001
+ [ 206.939115] bf00: 00000000 00000024 a806bf14 00000005 7ee13b34 7ee12c68 00000004 7ee13f20
+ [ 206.947302] bf20: 00000010 7ee12c7c 00000005 7ee12d04 0000000a 76e7dc00 00000001 80d0f140
+ [ 206.955490] bf40: ab637880 a974de40 60000013 80d0f140 ab6378a0 80d04d08 a8080470 a9858600
+ [ 206.963677] bf60: a9858600 00000000 00000000 8022c24c 00000000 80144310 00000000 00000000
+ [ 206.971864] bf80: 80101204 80d04d08 00000000 80d04d08 00000000 00000000 00000003 00000058
+ [ 206.980051] bfa0: 80101204 80101000 00000000 00000000 fee1dead 28121969 01234567 00000000
+ [ 206.988237] bfc0: 00000000 00000000 00000003 00000058 00000000 00000000 00000000 00000000
+ [ 206.996425] bfe0: 0049ffb0 7ee13d58 0048a84b 76f245a6 60000030 fee1dead 00000000 00000000
+ [ 207.004622] [<805cd4b0>] (ci_hdrc_remove_device) from [<805d62cc>] (ci_hdrc_imx_remove+0x20/0xe8)
+ [ 207.013509] [<805d62cc>] (ci_hdrc_imx_remove) from [<8049a490>] (device_shutdown+0x16c/0x218)
+ [ 207.022050] [<8049a490>] (device_shutdown) from [<80148b08>] (kernel_restart+0xc/0x50)
+ [ 207.029980] [<80148b08>] (kernel_restart) from [<80148d8c>] (sys_reboot+0xf4/0x1f0)
+ [ 207.037648] [<80148d8c>] (sys_reboot) from [<80101000>] (ret_fast_syscall+0x0/0x54)
+ [ 207.045308] Exception stack(0xa806bfa8 to 0xa806bff0)
+ [ 207.050368] bfa0: 00000000 00000000 fee1dead 28121969 01234567 00000000
+ [ 207.058554] bfc0: 00000000 00000000 00000003 00000058 00000000 00000000 00000000 00000000
+ [ 207.066737] bfe0: 0049ffb0 7ee13d58 0048a84b 76f245a6
+ [ 207.071799] Code: ebffffa8 e3a00000 e8bd8010 e92d4010 (e5904004)
+ [ 207.078021] ---[ end trace be47424e3fd46e9f ]---
+ [ 207.082647] Kernel panic - not syncing: Fatal exception
+ [ 207.087894] ---[ end Kernel panic - not syncing: Fatal exception ]---
+
+ c) the error path in combination with driver removal causes
+ imbalanced calls to the clk_*() and pm_()* APIs
+
+a) happens because the original intended return value is
+ overwritten (with 0) by the return code of
+ regulator_disable() in ci_hdrc_imx_probe()'s error path
+b) happens because ci_pdev is -EPROBE_DEFER, which causes
+ ci_hdrc_remove_device() to OOPS
+
+Fix a) by being more careful in ci_hdrc_imx_probe()'s error
+path and not overwriting the real error code
+
+Fix b) by calling the respective cleanup functions during
+remove only when needed (when ci_pdev != NULL, i.e. when
+everything was initialised correctly). This also has the
+side effect of not causing imbalanced clk_*() and pm_*()
+API calls as part of the error code path.
+
+Fixes: 7c8e8909417e ("usb: chipidea: imx: add HSIC support")
+Signed-off-by: André Draszik <git@andred.net>
+Cc: stable <stable@vger.kernel.org>
+CC: Peter Chen <Peter.Chen@nxp.com>
+CC: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+CC: Shawn Guo <shawnguo@kernel.org>
+CC: Sascha Hauer <s.hauer@pengutronix.de>
+CC: Pengutronix Kernel Team <kernel@pengutronix.de>
+CC: Fabio Estevam <festevam@gmail.com>
+CC: NXP Linux Team <linux-imx@nxp.com>
+CC: linux-usb@vger.kernel.org
+CC: linux-arm-kernel@lists.infradead.org
+CC: linux-kernel@vger.kernel.org
+Link: https://lore.kernel.org/r/20190810150758.17694-1-git@andred.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/chipidea/ci_hdrc_imx.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index a76708501236d..5faae96735e62 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -453,9 +453,11 @@ err_clk:
+ imx_disable_unprepare_clks(dev);
+ disable_hsic_regulator:
+ if (data->hsic_pad_regulator)
+- ret = regulator_disable(data->hsic_pad_regulator);
++ /* don't overwrite original ret (cf. EPROBE_DEFER) */
++ regulator_disable(data->hsic_pad_regulator);
+ if (pdata.flags & CI_HDRC_PMQOS)
+ pm_qos_remove_request(&data->pm_qos_req);
++ data->ci_pdev = NULL;
+ return ret;
+ }
+
+@@ -468,14 +470,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+- ci_hdrc_remove_device(data->ci_pdev);
++ if (data->ci_pdev)
++ ci_hdrc_remove_device(data->ci_pdev);
+ if (data->override_phy_control)
+ usb_phy_shutdown(data->phy);
+- imx_disable_unprepare_clks(&pdev->dev);
+- if (data->plat_data->flags & CI_HDRC_PMQOS)
+- pm_qos_remove_request(&data->pm_qos_req);
+- if (data->hsic_pad_regulator)
+- regulator_disable(data->hsic_pad_regulator);
++ if (data->ci_pdev) {
++ imx_disable_unprepare_clks(&pdev->dev);
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
++ if (data->hsic_pad_regulator)
++ regulator_disable(data->hsic_pad_regulator);
++ }
+
+ return 0;
+ }
+--
+2.20.1
+
--- /dev/null
+From 7333b42cde0b8c89cce37cfe33f967a822919176 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Tue, 23 Jul 2019 17:11:01 +0200
+Subject: virtio/s390: fix race on airq_areas[]
+
+[ Upstream commit 4f419eb14272e0698e8c55bb5f3f266cc2a21c81 ]
+
+The access to airq_areas was racy ever since the adapter interrupts got
+introduced to virtio-ccw, but since commit 39c7dcb15892 ("virtio/s390:
+make airq summary indicators DMA") this became an issue in practice as
+well. Namely before that commit the airq_info that got overwritten was
+still functional. After that commit however the two infos share a
+summary_indicator, which aggravates the situation. Which means
+auto-online mechanism occasionally hangs the boot with virtio_blk.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 96b14536d935 ("virtio-ccw: virtio-ccw adapter interrupt support.")
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/virtio/virtio_ccw.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 6a30768813219..8d47ad61bac3d 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -132,6 +132,7 @@ struct airq_info {
+ struct airq_iv *aiv;
+ };
+ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
++static DEFINE_MUTEX(airq_areas_lock);
+
+ #define CCW_CMD_SET_VQ 0x13
+ #define CCW_CMD_VDEV_RESET 0x33
+@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ unsigned long bit, flags;
+
+ for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
++ mutex_lock(&airq_areas_lock);
+ if (!airq_areas[i])
+ airq_areas[i] = new_airq_info();
+ info = airq_areas[i];
++ mutex_unlock(&airq_areas_lock);
+ if (!info)
+ return 0;
+ write_lock_irqsave(&info->lock, flags);
+--
+2.20.1
+