--- /dev/null
+From 1358c13a48c43f5e4de0c1835291837a27b9720c Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Thu, 7 Feb 2019 15:36:11 +0200
+Subject: crypto: ccree - fix resume race condition on init
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 1358c13a48c43f5e4de0c1835291837a27b9720c upstream.
+
+We were enabling autosuspend, which is using data set by the
+hash module, prior to the hash module being inited, casuing
+a crash on resume as part of the startup sequence if the race
+was lost.
+
+This was never a real problem because the PM infra was using low
+res timers so we were always winning the race, until commit 8234f6734c5d
+("PM-runtime: Switch autosuspend over to using hrtimers") changed that :-)
+
+Fix this by seperating the PM setup and enablement and doing the
+latter only at the end of the init sequence.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: stable@kernel.org # v4.20
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_driver.c | 7 ++++---
+ drivers/crypto/ccree/cc_pm.c | 13 ++++++-------
+ drivers/crypto/ccree/cc_pm.h | 3 +++
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+--- a/drivers/crypto/ccree/cc_driver.c
++++ b/drivers/crypto/ccree/cc_driver.c
+@@ -364,7 +364,7 @@ static int init_cc_resources(struct plat
+ rc = cc_ivgen_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_ivgen_init failed\n");
+- goto post_power_mgr_err;
++ goto post_buf_mgr_err;
+ }
+
+ /* Allocate crypto algs */
+@@ -387,6 +387,9 @@ static int init_cc_resources(struct plat
+ goto post_hash_err;
+ }
+
++ /* All set, we can allow autosuspend */
++ cc_pm_go(new_drvdata);
++
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+@@ -401,8 +404,6 @@ post_cipher_err:
+ cc_cipher_free(new_drvdata);
+ post_ivgen_err:
+ cc_ivgen_fini(new_drvdata);
+-post_power_mgr_err:
+- cc_pm_fini(new_drvdata);
+ post_buf_mgr_err:
+ cc_buffer_mgr_fini(new_drvdata);
+ post_req_mgr_err:
+--- a/drivers/crypto/ccree/cc_pm.c
++++ b/drivers/crypto/ccree/cc_pm.c
+@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev
+
+ int cc_pm_init(struct cc_drvdata *drvdata)
+ {
+- int rc = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* activate the PM module */
+- rc = pm_runtime_set_active(dev);
+- if (rc)
+- return rc;
+- /* enable the PM module*/
+- pm_runtime_enable(dev);
++ return pm_runtime_set_active(dev);
++}
+
+- return rc;
++/* enable the PM module*/
++void cc_pm_go(struct cc_drvdata *drvdata)
++{
++ pm_runtime_enable(drvdata_to_dev(drvdata));
+ }
+
+ void cc_pm_fini(struct cc_drvdata *drvdata)
+--- a/drivers/crypto/ccree/cc_pm.h
++++ b/drivers/crypto/ccree/cc_pm.h
+@@ -16,6 +16,7 @@
+ extern const struct dev_pm_ops ccree_pm;
+
+ int cc_pm_init(struct cc_drvdata *drvdata);
++void cc_pm_go(struct cc_drvdata *drvdata);
+ void cc_pm_fini(struct cc_drvdata *drvdata);
+ int cc_pm_suspend(struct device *dev);
+ int cc_pm_resume(struct device *dev);
+@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_d
+ return 0;
+ }
+
++static void cc_pm_go(struct cc_drvdata *drvdata) {}
++
+ static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+ static inline int cc_pm_suspend(struct device *dev)
--- /dev/null
+From ff0c129d3b5ecb3df7c8f5e2236582bf745b6c5f Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Fri, 8 Feb 2019 10:52:07 -0500
+Subject: dm crypt: don't overallocate the integrity tag space
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit ff0c129d3b5ecb3df7c8f5e2236582bf745b6c5f upstream.
+
+bio_sectors() returns the value in the units of 512-byte sectors (no
+matter what the real sector size of the device). dm-crypt multiplies
+bio_sectors() by on_disk_tag_size to calculate the space allocated for
+integrity tags. If dm-crypt is running with sector size larger than
+512b, it allocates more data than is needed.
+
+Device Mapper trims the extra space when passing the bio to
+dm-integrity, so this bug didn't result in any visible misbehavior.
+But it must be fixed to avoid wasteful memory allocation for the block
+integrity payload.
+
+Fixes: ef43aa38063a6 ("dm crypt: add cryptographic data integrity protection (authenticated encryption)")
+Cc: stable@vger.kernel.org # 4.12+
+Reported-by: Milan Broz <mbroz@redhat.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-crypt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(s
+ if (IS_ERR(bip))
+ return PTR_ERR(bip);
+
+- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
++ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
+
+ bip->bip_iter.bi_size = tag_len;
+ bip->bip_iter.bi_sector = io->cc->start + io->sector;
--- /dev/null
+From 4ae280b4ee3463fa57bbe6eede26b97daff8a0f1 Mon Sep 17 00:00:00 2001
+From: Nikos Tsironis <ntsironis@arrikto.com>
+Date: Thu, 14 Feb 2019 20:38:47 +0200
+Subject: dm thin: fix bug where bio that overwrites thin block ignores FUA
+
+From: Nikos Tsironis <ntsironis@arrikto.com>
+
+commit 4ae280b4ee3463fa57bbe6eede26b97daff8a0f1 upstream.
+
+When provisioning a new data block for a virtual block, either because
+the block was previously unallocated or because we are breaking sharing,
+if the whole block of data is being overwritten the bio that triggered
+the provisioning is issued immediately, skipping copying or zeroing of
+the data block.
+
+When this bio completes the new mapping is inserted in to the pool's
+metadata by process_prepared_mapping(), where the bio completion is
+signaled to the upper layers.
+
+This completion is signaled without first committing the metadata. If
+the bio in question has the REQ_FUA flag set and the system crashes
+right after its completion and before the next metadata commit, then the
+write is lost despite the REQ_FUA flag requiring that I/O completion for
+this request must only be signaled after the data has been committed to
+non-volatile storage.
+
+Fix this by deferring the completion of overwrite bios, with the REQ_FUA
+flag set, until after the metadata has been committed.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Nikos Tsironis <ntsironis@arrikto.com>
+Acked-by: Joe Thornber <ejt@redhat.com>
+Acked-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-thin.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 50 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -257,6 +257,7 @@ struct pool {
+
+ spinlock_t lock;
+ struct bio_list deferred_flush_bios;
++ struct bio_list deferred_flush_completions;
+ struct list_head prepared_mappings;
+ struct list_head prepared_discards;
+ struct list_head prepared_discards_pt2;
+@@ -956,6 +957,39 @@ static void process_prepared_mapping_fai
+ mempool_free(m, &m->tc->pool->mapping_pool);
+ }
+
++static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
++{
++ struct pool *pool = tc->pool;
++ unsigned long flags;
++
++ /*
++ * If the bio has the REQ_FUA flag set we must commit the metadata
++ * before signaling its completion.
++ */
++ if (!bio_triggers_commit(tc, bio)) {
++ bio_endio(bio);
++ return;
++ }
++
++ /*
++ * Complete bio with an error if earlier I/O caused changes to the
++ * metadata that can't be committed, e.g, due to I/O errors on the
++ * metadata device.
++ */
++ if (dm_thin_aborted_changes(tc->td)) {
++ bio_io_error(bio);
++ return;
++ }
++
++ /*
++ * Batch together any bios that trigger commits and then issue a
++ * single commit for them in process_deferred_bios().
++ */
++ spin_lock_irqsave(&pool->lock, flags);
++ bio_list_add(&pool->deferred_flush_completions, bio);
++ spin_unlock_irqrestore(&pool->lock, flags);
++}
++
+ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ {
+ struct thin_c *tc = m->tc;
+@@ -988,7 +1022,7 @@ static void process_prepared_mapping(str
+ */
+ if (bio) {
+ inc_remap_and_issue_cell(tc, m->cell, m->data_block);
+- bio_endio(bio);
++ complete_overwrite_bio(tc, bio);
+ } else {
+ inc_all_io_entry(tc->pool, m->cell->holder);
+ remap_and_issue(tc, m->cell->holder, m->data_block);
+@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct
+ {
+ unsigned long flags;
+ struct bio *bio;
+- struct bio_list bios;
++ struct bio_list bios, bio_completions;
+ struct thin_c *tc;
+
+ tc = get_first_thin(pool);
+@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct
+ }
+
+ /*
+- * If there are any deferred flush bios, we must commit
+- * the metadata before issuing them.
++ * If there are any deferred flush bios, we must commit the metadata
++ * before issuing them or signaling their completion.
+ */
+ bio_list_init(&bios);
++ bio_list_init(&bio_completions);
++
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_merge(&bios, &pool->deferred_flush_bios);
+ bio_list_init(&pool->deferred_flush_bios);
++
++ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
++ bio_list_init(&pool->deferred_flush_completions);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+- if (bio_list_empty(&bios) &&
++ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
+ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
+ return;
+
+ if (commit(pool)) {
++ bio_list_merge(&bios, &bio_completions);
++
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ return;
+ }
+ pool->last_commit_jiffies = jiffies;
+
++ while ((bio = bio_list_pop(&bio_completions)))
++ bio_endio(bio);
++
+ while ((bio = bio_list_pop(&bios)))
+ generic_make_request(bio);
+ }
+@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct m
+ INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
+ spin_lock_init(&pool->lock);
+ bio_list_init(&pool->deferred_flush_bios);
++ bio_list_init(&pool->deferred_flush_completions);
+ INIT_LIST_HEAD(&pool->prepared_mappings);
+ INIT_LIST_HEAD(&pool->prepared_discards);
+ INIT_LIST_HEAD(&pool->prepared_discards_pt2);
--- /dev/null
+From e8a8fedd57fdcebf0e4f24ef0fc7e29323df8e66 Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Tue, 29 Jan 2019 14:09:59 -0500
+Subject: drm/i915: Block fbdev HPD processing during suspend
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit e8a8fedd57fdcebf0e4f24ef0fc7e29323df8e66 upstream.
+
+When resuming, we check whether or not any previously connected
+MST topologies are still present and if so, attempt to resume them. If
+this fails, we disable said MST topologies and fire off a hotplug event
+so that userspace knows to reprobe.
+
+However, sending a hotplug event involves calling
+drm_fb_helper_hotplug_event(), which in turn results in fbcon doing a
+connector reprobe in the caller's thread - something we can't do at the
+point in which i915 calls drm_dp_mst_topology_mgr_resume() since
+hotplugging hasn't been fully initialized yet.
+
+This currently causes some rather subtle but fatal issues. For example,
+on my T480s the laptop dock connected to it usually disappears during a
+suspend cycle, and comes back up a short while after the system has been
+resumed. This guarantees pretty much every suspend and resume cycle,
+drm_dp_mst_topology_mgr_set_mst(mgr, false); will be caused and in turn,
+a connector hotplug will occur. Now it's Rute Goldberg time: when the
+connector hotplug occurs, i915 reprobes /all/ of the connectors,
+including eDP. However, eDP probing requires that we power on the panel
+VDD which in turn, grabs a wakeref to the appropriate power domain on
+the GPU (on my T480s, this is the PORT_DDI_A_IO domain). This is where
+things start breaking, since this all happens before
+intel_power_domains_enable() is called we end up leaking the wakeref
+that was acquired and never releasing it later. Come next suspend/resume
+cycle, this causes us to fail to shut down the GPU properly, which
+causes it not to resume properly and die a horrible complicated death.
+
+(as a note: this only happens when there's both an eDP panel and MST
+topology connected which is removed mid-suspend. One or the other seems
+to always be OK).
+
+We could try to fix the VDD wakeref leak, but this doesn't seem like
+it's worth it at all since we aren't able to handle hotplug detection
+while resuming anyway. So, let's go with a more robust solution inspired
+by nouveau: block fbdev from handling hotplug events until we resume
+fbdev. This allows us to still send sysfs hotplug events to be handled
+later by user space while we're resuming, while also preventing us from
+actually processing any hotplug events we receive until it's safe.
+
+This fixes the wakeref leak observed on the T480s and as such, also
+fixes suspend/resume with MST topologies connected on this machine.
+
+Changes since v2:
+* Don't call drm_fb_helper_hotplug_event() under lock, do it after lock
+ (Chris Wilson)
+* Don't call drm_fb_helper_hotplug_event() in
+ intel_fbdev_output_poll_changed() under lock (Chris Wilson)
+* Always set ifbdev->hpd_waiting (Chris Wilson)
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 0e32b39ceed6 ("drm/i915: add DP 1.2 MST support (v0.7)")
+Cc: Todd Previte <tprevite@gmail.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Imre Deak <imre.deak@intel.com>
+Cc: intel-gfx@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v3.17+
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190129191001.442-2-lyude@redhat.com
+(cherry picked from commit fe5ec65668cdaa4348631d8ce1766eed43b33c10)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/intel_drv.h | 10 ++++++++++
+ drivers/gpu/drm/i915/intel_fbdev.c | 33 ++++++++++++++++++++++++++++++++-
+ 2 files changed, 42 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -209,6 +209,16 @@ struct intel_fbdev {
+ unsigned long vma_flags;
+ async_cookie_t cookie;
+ int preferred_bpp;
++
++ /* Whether or not fbdev hpd processing is temporarily suspended */
++ bool hpd_suspended : 1;
++ /* Set when a hotplug was received while HPD processing was
++ * suspended
++ */
++ bool hpd_waiting : 1;
++
++ /* Protects hpd_suspended */
++ struct mutex hpd_lock;
+ };
+
+ struct intel_encoder {
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *
+ if (ifbdev == NULL)
+ return -ENOMEM;
+
++ mutex_init(&ifbdev->hpd_lock);
+ drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
+ if (!intel_fbdev_init_bios(dev, ifbdev))
+@@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_pr
+ intel_fbdev_destroy(ifbdev);
+ }
+
++/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
++ * processing, fbdev will perform a full connector reprobe if a hotplug event
++ * was received while HPD was suspended.
++ */
++static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
++{
++ bool send_hpd = false;
++
++ mutex_lock(&ifbdev->hpd_lock);
++ ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
++ send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
++ ifbdev->hpd_waiting = false;
++ mutex_unlock(&ifbdev->hpd_lock);
++
++ if (send_hpd) {
++ DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
++ drm_fb_helper_hotplug_event(&ifbdev->helper);
++ }
++}
++
+ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+ {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+@@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_
+ */
+ if (state != FBINFO_STATE_RUNNING)
+ flush_work(&dev_priv->fbdev_suspend_work);
++
+ console_lock();
+ } else {
+ /*
+@@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_
+
+ drm_fb_helper_set_suspend(&ifbdev->helper, state);
+ console_unlock();
++
++ intel_fbdev_hpd_set_suspend(ifbdev, state);
+ }
+
+ void intel_fbdev_output_poll_changed(struct drm_device *dev)
+ {
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
++ bool send_hpd;
+
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+- if (ifbdev->vma || ifbdev->helper.deferred_setup)
++
++ mutex_lock(&ifbdev->hpd_lock);
++ send_hpd = !ifbdev->hpd_suspended;
++ ifbdev->hpd_waiting = true;
++ mutex_unlock(&ifbdev->hpd_lock);
++
++ if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+
--- /dev/null
+From 2e7bd10e05afb866b5fb13eda25095c35d7a27cc Mon Sep 17 00:00:00 2001
+From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Date: Thu, 7 Feb 2019 10:54:53 +0200
+Subject: drm/i915: Prevent a race during I915_GEM_MMAP ioctl with WC set
+
+From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+
+commit 2e7bd10e05afb866b5fb13eda25095c35d7a27cc upstream.
+
+Make sure the underlying VMA in the process address space is the
+same as it was during vm_mmap to avoid applying WC to wrong VMA.
+
+A more long-term solution would be to have vm_mmap_locked variant
+in linux/mmap.h for when caller wants to hold mmap_sem for an
+extended duration.
+
+v2:
+- Refactor the compare function
+
+Fixes: 1816f9236303 ("drm/i915: Support creation of unbound wc user mappings for objects")
+Reported-by: Adam Zabrocki <adamza@microsoft.com>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v4.0+
+Cc: Akash Goel <akash.goel@intel.com>
+Cc: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
+Cc: Adam Zabrocki <adamza@microsoft.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> #v1
+Link: https://patchwork.freedesktop.org/patch/msgid/20190207085454.10598-1-joonas.lahtinen@linux.intel.com
+(cherry picked from commit 5c4604e757ba9b193b09768d75a7d2105a5b883f)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1826,6 +1826,16 @@ i915_gem_sw_finish_ioctl(struct drm_devi
+ return 0;
+ }
+
++static inline bool
++__vma_matches(struct vm_area_struct *vma, struct file *filp,
++ unsigned long addr, unsigned long size)
++{
++ if (vma->vm_file != filp)
++ return false;
++
++ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
++}
++
+ /**
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ * it is mapped to.
+@@ -1884,7 +1894,7 @@ i915_gem_mmap_ioctl(struct drm_device *d
+ return -EINTR;
+ }
+ vma = find_vma(mm, addr);
+- if (vma)
++ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
--- /dev/null
+From 82abf33766712d8446ea137a3400165e31bd12c7 Mon Sep 17 00:00:00 2001
+From: Eric Anholt <eric@anholt.net>
+Date: Fri, 7 Dec 2018 11:16:53 -0800
+Subject: drm/sched: Always trace the dependencies we wait on, to fix a race.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eric Anholt <eric@anholt.net>
+
+commit 82abf33766712d8446ea137a3400165e31bd12c7 upstream.
+
+The entity->dependency can go away completely once we've called
+drm_sched_entity_add_dependency_cb() (if the cb is called before we
+get around to tracing). The tracepoint is more useful if we trace
+every dependency instead of just ones that get callbacks installed,
+anyway, so just do that.
+
+Fixes any easy-to-produce OOPS when tracing the scheduler on V3D with
+"perf record -a -e gpu_scheduler:.\* glxgears" and DEBUG_SLAB enabled.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/scheduler/sched_entity.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -434,13 +434,10 @@ struct drm_sched_job *drm_sched_entity_p
+
+ while ((entity->dependency =
+ sched->ops->dependency(sched_job, entity))) {
++ trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
+
+- if (drm_sched_entity_add_dependency_cb(entity)) {
+-
+- trace_drm_sched_job_wait_dep(sched_job,
+- entity->dependency);
++ if (drm_sched_entity_add_dependency_cb(entity))
+ return NULL;
+- }
+ }
+
+ /* skip jobs from entity that marked guilty */
--- /dev/null
+From 69ef943dbc14b21987c79f8399ffea08f9a1b446 Mon Sep 17 00:00:00 2001
+From: Matthew Wilcox <willy@infradead.org>
+Date: Thu, 14 Feb 2019 11:03:48 -0800
+Subject: drm: Use array_size() when creating lease
+
+From: Matthew Wilcox <willy@infradead.org>
+
+commit 69ef943dbc14b21987c79f8399ffea08f9a1b446 upstream.
+
+Passing an object_count of sufficient size will make
+object_count * 4 wrap around to be very small, then a later function
+will happily iterate off the end of the object_ids array. Using
+array_size() will saturate at SIZE_MAX, the kmalloc() will fail and
+we'll return an -ENOMEM to the norty userspace.
+
+Fixes: 62884cd386b8 ("drm: Add four ioctls for managing drm mode object leases [v7]")
+Signed-off-by: Matthew Wilcox <willy@infradead.org>
+Acked-by: Kees Cook <keescook@chromium.org>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: <stable@vger.kernel.org> # v4.15+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_lease.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct d
+
+ object_count = cl->object_count;
+
+- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
++ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
++ array_size(object_count, sizeof(__u32)));
+ if (IS_ERR(object_ids))
+ return PTR_ERR(object_ids);
+
--- /dev/null
+From 7fd56e0260a22c0cfaf9adb94a2427b76e239dd0 Mon Sep 17 00:00:00 2001
+From: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+Date: Wed, 6 Feb 2019 12:01:16 -0200
+Subject: drm/vkms: Fix license inconsistent
+
+From: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+
+commit 7fd56e0260a22c0cfaf9adb94a2427b76e239dd0 upstream.
+
+Fixes license inconsistent related to the VKMS driver and remove the
+redundant boilerplate comment.
+
+Fixes: 854502fa0a38 ("drm/vkms: Add basic CRTC initialization")
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
+Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190206140116.7qvy2lpwbcd7wds6@smtp.gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vkms/vkms_crc.c | 3 ++-
+ drivers/gpu/drm/vkms/vkms_crtc.c | 8 +-------
+ drivers/gpu/drm/vkms/vkms_drv.c | 7 +------
+ drivers/gpu/drm/vkms/vkms_drv.h | 2 ++
+ drivers/gpu/drm/vkms/vkms_gem.c | 8 +-------
+ drivers/gpu/drm/vkms/vkms_output.c | 8 +-------
+ drivers/gpu/drm/vkms/vkms_plane.c | 8 +-------
+ 7 files changed, 9 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/vkms/vkms_crc.c
++++ b/drivers/gpu/drm/vkms/vkms_crc.c
+@@ -1,4 +1,5 @@
+-// SPDX-License-Identifier: GPL-2.0
++// SPDX-License-Identifier: GPL-2.0+
++
+ #include "vkms_drv.h"
+ #include <linux/crc32.h>
+ #include <drm/drm_atomic.h>
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+
+ #include "vkms_drv.h"
+ #include <drm/drm_atomic_helper.h>
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -1,9 +1,4 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+
+ /**
+ * DOC: vkms (Virtual Kernel Modesetting)
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -1,3 +1,5 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
+ #ifndef _VKMS_DRV_H_
+ #define _VKMS_DRV_H_
+
+--- a/drivers/gpu/drm/vkms/vkms_gem.c
++++ b/drivers/gpu/drm/vkms/vkms_gem.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+
+ #include <linux/shmem_fs.h>
+
+--- a/drivers/gpu/drm/vkms/vkms_output.c
++++ b/drivers/gpu/drm/vkms/vkms_output.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+
+ #include "vkms_drv.h"
+ #include <drm/drm_crtc_helper.h>
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+
+ #include "vkms_drv.h"
+ #include <drm/drm_plane_helper.h>
--- /dev/null
+From dfcc34c99f3ebc16b787b118763bf9cb6b1efc7a Mon Sep 17 00:00:00 2001
+From: Nate Dailey <nate.dailey@stratus.com>
+Date: Thu, 7 Feb 2019 14:19:01 -0500
+Subject: md/raid1: don't clear bitmap bits on interrupted recovery.
+
+From: Nate Dailey <nate.dailey@stratus.com>
+
+commit dfcc34c99f3ebc16b787b118763bf9cb6b1efc7a upstream.
+
+sync_request_write no longer submits writes to a Faulty device. This has
+the unfortunate side effect that bitmap bits can be incorrectly cleared
+if a recovery is interrupted (previously, end_sync_write would have
+prevented this). This means the next recovery may not copy everything
+it should, potentially corrupting data.
+
+Add a function for doing the proper md_bitmap_end_sync, called from
+end_sync_write and the Faulty case in sync_request_write.
+
+backport note to 4.14: s/md_bitmap_end_sync/bitmap_end_sync
+Cc: stable@vger.kernel.org 4.14+
+Fixes: 0c9d5b127f69 ("md/raid1: avoid reusing a resync bio after error handling.")
+Reviewed-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Tested-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Signed-off-by: Nate Dailey <nate.dailey@stratus.com>
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid1.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bi
+ reschedule_retry(r1_bio);
+ }
+
++static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
++{
++ sector_t sync_blocks = 0;
++ sector_t s = r1_bio->sector;
++ long sectors_to_go = r1_bio->sectors;
++
++ /* make sure these bits don't get cleared. */
++ do {
++ md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
++ s += sync_blocks;
++ sectors_to_go -= sync_blocks;
++ } while (sectors_to_go > 0);
++}
++
+ static void end_sync_write(struct bio *bio)
+ {
+ int uptodate = !bio->bi_status;
+@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *b
+ struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
+
+ if (!uptodate) {
+- sector_t sync_blocks = 0;
+- sector_t s = r1_bio->sector;
+- long sectors_to_go = r1_bio->sectors;
+- /* make sure these bits doesn't get cleared. */
+- do {
+- md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+- s += sync_blocks;
+- sectors_to_go -= sync_blocks;
+- } while (sectors_to_go > 0);
++ abort_sync_write(mddev, r1_bio);
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+@@ -2172,8 +2178,10 @@ static void sync_request_write(struct md
+ (i == r1_bio->read_disk ||
+ !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
+ continue;
+- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
++ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
++ abort_sync_write(mddev, r1_bio);
+ continue;
++ }
+
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
--- /dev/null
+From a58007621be33e9f7c7bed5d5ff8ecb914e1044a Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 14 Feb 2019 15:00:36 +1100
+Subject: powerpc/64s: Fix possible corruption on big endian due to pgd/pud_present()
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit a58007621be33e9f7c7bed5d5ff8ecb914e1044a upstream.
+
+In v4.20 we changed our pgd/pud_present() to check for _PAGE_PRESENT
+rather than just checking that the value is non-zero, e.g.:
+
+ static inline int pgd_present(pgd_t pgd)
+ {
+ - return !pgd_none(pgd);
+ + return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+ }
+
+Unfortunately this is broken on big endian, as the result of the
+bitwise & is truncated to int, which is always zero because
+_PAGE_PRESENT is 0x8000000000000000ul. This means pgd_present() and
+pud_present() are always false at compile time, and the compiler
+elides the subsequent code.
+
+Remarkably with that bug present we are still able to boot and run
+with few noticeable effects. However under some work loads we are able
+to trigger a warning in the ext4 code:
+
+ WARNING: CPU: 11 PID: 29593 at fs/ext4/inode.c:3927 .ext4_set_page_dirty+0x70/0xb0
+ CPU: 11 PID: 29593 Comm: debugedit Not tainted 4.20.0-rc1 #1
+ ...
+ NIP .ext4_set_page_dirty+0x70/0xb0
+ LR .set_page_dirty+0xa0/0x150
+ Call Trace:
+ .set_page_dirty+0xa0/0x150
+ .unmap_page_range+0xbf0/0xe10
+ .unmap_vmas+0x84/0x130
+ .unmap_region+0xe8/0x190
+ .__do_munmap+0x2f0/0x510
+ .__vm_munmap+0x80/0x110
+ .__se_sys_munmap+0x14/0x30
+ system_call+0x5c/0x70
+
+The fix is simple, we need to convert the result of the bitwise & to
+an int before returning it.
+
+Thanks to Erhard, Jan Kara and Aneesh for help with debugging.
+
+Fixes: da7ad366b497 ("powerpc/mm/book3s: Update pmd_present to look at _PAGE_PRESENT bit")
+Cc: stable@vger.kernel.org # v4.20+
+Reported-by: Erhard F. <erhard_f@mailbox.org>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/book3s/64/pgtable.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud)
+
+ static inline int pud_present(pud_t pud)
+ {
+- return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
++ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
+ }
+
+ extern struct page *pud_page(pud_t pud);
+@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd)
+
+ static inline int pgd_present(pgd_t pgd)
+ {
+- return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
++ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+ }
+
+ static inline pte_t pgd_pte(pgd_t pgd)
--- /dev/null
+From e4a056987c86f402f1286e050b1dee3f4ce7c7eb Mon Sep 17 00:00:00 2001
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+Date: Tue, 12 Feb 2019 08:05:25 -0800
+Subject: scsi: sd: fix entropy gathering for most rotational disks
+
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+
+commit e4a056987c86f402f1286e050b1dee3f4ce7c7eb upstream.
+
+The problem is that the default for MQ is not to gather entropy, whereas
+the default for the legacy queue was always to gather it. The original
+attempt to fix entropy gathering for rotational disks under MQ added an
+else branch in sd_read_block_characteristics(). Unfortunately, the entire
+check isn't reached if the device has no characteristics VPD page. Since
+this page was only introduced in SBC-3 and its optional anyway, most less
+expensive rotational disks don't have one, meaning they all stopped
+gathering entropy when we made MQ the default. In a wholly unrelated
+change, openssl and openssh won't function until the random number
+generator is initialised, meaning lots of people have been seeing large
+delays before they could log into systems with default MQ kernels due to
+this lack of entropy, because it now can take tens of minutes to initialise
+the kernel random number generator.
+
+The fix is to set the non-rotational and add-randomness flags
+unconditionally early on in the disk initialization path, so they can be
+reset only if the device actually reports being non-rotational via the VPD
+page.
+
+Reported-by: Mikael Pettersson <mikpelinux@gmail.com>
+Fixes: 83e32a591077 ("scsi: sd: Contribute to randomness when running rotational device")
+Cc: stable@vger.kernel.org
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Reviewed-by: Jens Axboe <axboe@kernel.dk>
+Reviewed-by: Xuewei Zhang <xueweiz@google.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sd.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2960,9 +2960,6 @@ static void sd_read_block_characteristic
+ if (rot == 1) {
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+- } else {
+- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+- blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+ }
+
+ if (sdkp->device->type == TYPE_ZBC) {
+@@ -3099,6 +3096,15 @@ static int sd_revalidate_disk(struct gen
+ if (sdkp->media_present) {
+ sd_read_capacity(sdkp, buffer);
+
++ /*
++ * set the default to rotational. All non-rotational devices
++ * support the block characteristics VPD page, which will
++ * cause this to be updated correctly and any device which
++ * doesn't support it should be treated as rotational.
++ */
++ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
++ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
++
+ if (scsi_device_supports_vpd(sdp)) {
+ sd_read_block_provisioning(sdkp);
+ sd_read_block_limits(sdkp);
alpha-fix-eiger-nr_irqs-to-128.patch
s390-suspend-fix-stack-setup-in-swsusp_arch_suspend.patch
s390-zcrypt-fix-specification-exception-on-z196-during-ap-probe.patch
+tracing-probeevent-correctly-update-remaining-space-in-dynamic-area.patch
+x86-platform-uv-use-efi_runtime_lock-to-serialise-bios-calls.patch
+powerpc-64s-fix-possible-corruption-on-big-endian-due-to-pgd-pud_present.patch
+scsi-sd-fix-entropy-gathering-for-most-rotational-disks.patch
+signal-restore-the-stop-ptrace_event_exit.patch
+crypto-ccree-fix-resume-race-condition-on-init.patch
+md-raid1-don-t-clear-bitmap-bits-on-interrupted-recovery.patch
+x86-a.out-clear-the-dump-structure-initially.patch
+sunrpc-fix-4-more-call-sites-that-were-using-stack-memory-with-a-scatterlist.patch
+dm-crypt-don-t-overallocate-the-integrity-tag-space.patch
+dm-thin-fix-bug-where-bio-that-overwrites-thin-block-ignores-fua.patch
+drm-use-array_size-when-creating-lease.patch
+drm-vkms-fix-license-inconsistent.patch
+drm-sched-always-trace-the-dependencies-we-wait-on-to-fix-a-race.patch
+drm-i915-block-fbdev-hpd-processing-during-suspend.patch
+drm-i915-prevent-a-race-during-i915_gem_mmap-ioctl-with-wc-set.patch
--- /dev/null
+From cf43a757fd49442bc38f76088b70c2299eed2c2f Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Mon, 11 Feb 2019 23:27:42 -0600
+Subject: signal: Restore the stop PTRACE_EVENT_EXIT
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit cf43a757fd49442bc38f76088b70c2299eed2c2f upstream.
+
+In the middle of do_exit() there is there is a call
+"ptrace_event(PTRACE_EVENT_EXIT, code);" That call places the process
+in TACKED_TRACED aka "(TASK_WAKEKILL | __TASK_TRACED)" and waits for
+for the debugger to release the task or SIGKILL to be delivered.
+
+Skipping past dequeue_signal when we know a fatal signal has already
+been delivered resulted in SIGKILL remaining pending and
+TIF_SIGPENDING remaining set. This in turn caused the
+scheduler to not sleep in PTACE_EVENT_EXIT as it figured
+a fatal signal was pending. This also caused ptrace_freeze_traced
+in ptrace_check_attach to fail because it left a per thread
+SIGKILL pending which is what fatal_signal_pending tests for.
+
+This difference in signal state caused strace to report
+strace: Exit of unknown pid NNNNN ignored
+
+Therefore update the signal handling state like dequeue_signal
+would when removing a per thread SIGKILL, by removing SIGKILL
+from the per thread signal mask and clearing TIF_SIGPENDING.
+
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Oleg Nesterov <oleg@redhat.com>
+Reported-by: Ivan Delalande <colona@arista.com>
+Cc: stable@vger.kernel.org
+Fixes: 35634ffa1751 ("signal: Always notice exiting tasks")
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/signal.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2436,9 +2436,12 @@ relock:
+ }
+
+ /* Has this task already been marked for death? */
+- ksig->info.si_signo = signr = SIGKILL;
+- if (signal_group_exit(signal))
++ if (signal_group_exit(signal)) {
++ ksig->info.si_signo = signr = SIGKILL;
++ sigdelset(¤t->pending.signal, SIGKILL);
++ recalc_sigpending();
+ goto fatal;
++ }
+
+ for (;;) {
+ struct k_sigaction *ka;
--- /dev/null
+From e7afe6c1d486b516ed586dcc10b3e7e3e85a9c2b Mon Sep 17 00:00:00 2001
+From: Scott Mayhew <smayhew@redhat.com>
+Date: Fri, 15 Feb 2019 13:42:02 -0500
+Subject: sunrpc: fix 4 more call sites that were using stack memory with a scatterlist
+
+From: Scott Mayhew <smayhew@redhat.com>
+
+commit e7afe6c1d486b516ed586dcc10b3e7e3e85a9c2b upstream.
+
+While trying to reproduce a reported kernel panic on arm64, I discovered
+that AUTH_GSS basically doesn't work at all with older enctypes on arm64
+systems with CONFIG_VMAP_STACK enabled. It turns out there still a few
+places using stack memory with scatterlists, causing krb5_encrypt() and
+krb5_decrypt() to produce incorrect results (or a BUG if CONFIG_DEBUG_SG
+is enabled).
+
+Tested with cthon on v4.0/v4.1/v4.2 with krb5/krb5i/krb5p using
+des3-cbc-sha1 and arcfour-hmac-md5.
+
+Signed-off-by: Scott Mayhew <smayhew@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/gss_krb5_seqnum.c | 49 ++++++++++++++++++++++++++--------
+ 1 file changed, 38 insertions(+), 11 deletions(-)
+
+--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
++++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *k
+ unsigned char *cksum, unsigned char *buf)
+ {
+ struct crypto_sync_skcipher *cipher;
+- unsigned char plain[8];
++ unsigned char *plain;
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *k
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
++ plain = kmalloc(8, GFP_NOFS);
++ if (!plain)
++ return -ENOMEM;
++
+ plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
+ plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
+ plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
+@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *k
+
+ code = krb5_encrypt(cipher, cksum, plain, buf, 8);
+ out:
++ kfree(plain);
+ crypto_free_sync_skcipher(cipher);
+ return code;
+ }
+@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
+ u32 seqnum,
+ unsigned char *cksum, unsigned char *buf)
+ {
+- unsigned char plain[8];
++ unsigned char *plain;
++ s32 code;
+
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_make_rc4_seq_num(kctx, direction, seqnum,
+ cksum, buf);
+
++ plain = kmalloc(8, GFP_NOFS);
++ if (!plain)
++ return -ENOMEM;
++
+ plain[0] = (unsigned char) (seqnum & 0xff);
+ plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
+ plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
+@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
+ plain[6] = direction;
+ plain[7] = direction;
+
+- return krb5_encrypt(key, cksum, plain, buf, 8);
++ code = krb5_encrypt(key, cksum, plain, buf, 8);
++ kfree(plain);
++ return code;
+ }
+
+ static s32
+@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kc
+ unsigned char *buf, int *direction, s32 *seqnum)
+ {
+ struct crypto_sync_skcipher *cipher;
+- unsigned char plain[8];
++ unsigned char *plain;
+ s32 code;
+
+ dprintk("RPC: %s:\n", __func__);
+@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kc
+ if (code)
+ goto out;
+
++ plain = kmalloc(8, GFP_NOFS);
++ if (!plain) {
++ code = -ENOMEM;
++ goto out;
++ }
++
+ code = krb5_decrypt(cipher, cksum, buf, plain, 8);
+ if (code)
+- goto out;
++ goto out_plain;
+
+ if ((plain[4] != plain[5]) || (plain[4] != plain[6])
+ || (plain[4] != plain[7])) {
+ code = (s32)KG_BAD_SEQ;
+- goto out;
++ goto out_plain;
+ }
+
+ *direction = plain[4];
+
+ *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
+ (plain[2] << 8) | (plain[3]));
++out_plain:
++ kfree(plain);
+ out:
+ crypto_free_sync_skcipher(cipher);
+ return code;
+@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
+ int *direction, u32 *seqnum)
+ {
+ s32 code;
+- unsigned char plain[8];
++ unsigned char *plain;
+ struct crypto_sync_skcipher *key = kctx->seq;
+
+ dprintk("RPC: krb5_get_seq_num:\n");
+@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
+ if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
+ return krb5_get_rc4_seq_num(kctx, cksum, buf,
+ direction, seqnum);
++ plain = kmalloc(8, GFP_NOFS);
++ if (!plain)
++ return -ENOMEM;
+
+ if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
+- return code;
++ goto out;
+
+ if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
+- (plain[4] != plain[7]))
+- return (s32)KG_BAD_SEQ;
++ (plain[4] != plain[7])) {
++ code = (s32)KG_BAD_SEQ;
++ goto out;
++ }
+
+ *direction = plain[4];
+
+ *seqnum = ((plain[0]) |
+ (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
+
+- return 0;
++out:
++ kfree(plain);
++ return code;
+ }
--- /dev/null
+From f6675872db57305fa957021efc788f9983ed3b67 Mon Sep 17 00:00:00 2001
+From: Andreas Ziegler <andreas.ziegler@fau.de>
+Date: Wed, 6 Feb 2019 20:00:13 +0100
+Subject: tracing: probeevent: Correctly update remaining space in dynamic area
+
+From: Andreas Ziegler <andreas.ziegler@fau.de>
+
+commit f6675872db57305fa957021efc788f9983ed3b67 upstream.
+
+Commit 9178412ddf5a ("tracing: probeevent: Return consumed
+bytes of dynamic area") improved the string fetching
+mechanism by returning the number of required bytes after
+copying the argument to the dynamic area. However, this
+return value is now only used to increment the pointer
+inside the dynamic area but misses updating the 'maxlen'
+variable which indicates the remaining space in the dynamic
+area.
+
+This means that fetch_store_string() always reads the *total*
+size of the dynamic area from the data_loc pointer instead of
+the *remaining* size (and passes it along to
+strncpy_from_{user,unsafe}) even if we're already about to
+copy data into the middle of the dynamic area.
+
+Link: http://lkml.kernel.org/r/20190206190013.16405-1-andreas.ziegler@fau.de
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 9178412ddf5a ("tracing: probeevent: Return consumed bytes of dynamic area")
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Andreas Ziegler <andreas.ziegler@fau.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_probe_tmpl.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_probe_tmpl.h
++++ b/kernel/trace/trace_probe_tmpl.h
+@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trac
+ if (unlikely(arg->dynamic))
+ *dl = make_data_loc(maxlen, dyndata - base);
+ ret = process_fetch_insn(arg->code, regs, dl, base);
+- if (unlikely(ret < 0 && arg->dynamic))
++ if (unlikely(ret < 0 && arg->dynamic)) {
+ *dl = make_data_loc(0, dyndata - base);
+- else
++ } else {
+ dyndata += ret;
++ maxlen -= ret;
++ }
+ }
+ }
+
--- /dev/null
+From 10970e1b4be9c74fce8ab6e3c34a7d718f063f2c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Tue, 12 Feb 2019 14:28:03 +0100
+Subject: x86/a.out: Clear the dump structure initially
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 10970e1b4be9c74fce8ab6e3c34a7d718f063f2c upstream.
+
+dump_thread32() in aout_core_dump() does not clear the user32 structure
+allocated on the stack as the first thing on function entry.
+
+As a result, the dump.u_comm, dump.u_ar0 and dump.signal which get
+assigned before the clearing, get overwritten.
+
+Rename that function to fill_dump() to make it clear what it does and
+call it first thing.
+
+This was caught while staring at a patch by Derek Robson
+<robsonde@gmail.com>.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Derek Robson <robsonde@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michael Matz <matz@suse.de>
+Cc: x86@kernel.org
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20190202005512.3144-1-robsonde@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/ia32/ia32_aout.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
+ /*
+ * fill in the user structure for a core dump..
+ */
+-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
++static void fill_dump(struct pt_regs *regs, struct user32 *dump)
+ {
+ u32 fs, gs;
+ memset(dump, 0, sizeof(*dump));
+@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredum
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
++
++ fill_dump(cprm->regs, &dump);
++
+ strncpy(dump.u_comm, current->comm, sizeof(current->comm));
+ dump.u_ar0 = offsetof(struct user32, regs);
+ dump.signal = cprm->siginfo->si_signo;
+- dump_thread32(cprm->regs, &dump);
+
+ /*
+ * If the size of the dump file exceeds the rlimit, then see
--- /dev/null
+From f331e766c4be33f4338574f3c9f7f77e98ab4571 Mon Sep 17 00:00:00 2001
+From: Hedi Berriche <hedi.berriche@hpe.com>
+Date: Wed, 13 Feb 2019 19:34:13 +0000
+Subject: x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls
+
+From: Hedi Berriche <hedi.berriche@hpe.com>
+
+commit f331e766c4be33f4338574f3c9f7f77e98ab4571 upstream.
+
+Calls into UV firmware must be protected against concurrency, expose the
+efi_runtime_lock to the UV platform, and use it to serialise UV BIOS
+calls.
+
+Signed-off-by: Hedi Berriche <hedi.berriche@hpe.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Russ Anderson <rja@hpe.com>
+Reviewed-by: Dimitri Sivanich <sivanich@hpe.com>
+Reviewed-by: Mike Travis <mike.travis@hpe.com>
+Cc: Andy Shevchenko <andy@infradead.org>
+Cc: Bhupesh Sharma <bhsharma@redhat.com>
+Cc: Darren Hart <dvhart@infradead.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: linux-efi <linux-efi@vger.kernel.org>
+Cc: platform-driver-x86@vger.kernel.org
+Cc: stable@vger.kernel.org # v4.9+
+Cc: Steve Wahl <steve.wahl@hpe.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20190213193413.25560-5-hedi.berriche@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/uv/bios.h | 8 +++++++-
+ arch/x86/platform/uv/bios_uv.c | 23 +++++++++++++++++++++--
+ drivers/firmware/efi/runtime-wrappers.c | 7 +++++++
+ 3 files changed, 35 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/uv/bios.h
++++ b/arch/x86/include/asm/uv/bios.h
+@@ -48,7 +48,8 @@ enum {
+ BIOS_STATUS_SUCCESS = 0,
+ BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
+ BIOS_STATUS_EINVAL = -EINVAL,
+- BIOS_STATUS_UNAVAIL = -EBUSY
++ BIOS_STATUS_UNAVAIL = -EBUSY,
++ BIOS_STATUS_ABORT = -EINTR,
+ };
+
+ /* Address map parameters */
+@@ -167,4 +168,9 @@ extern long system_serial_number;
+
+ extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+
++/*
++ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
++ */
++extern struct semaphore __efi_uv_runtime_lock;
++
+ #endif /* _ASM_X86_UV_BIOS_H */
+--- a/arch/x86/platform/uv/bios_uv.c
++++ b/arch/x86/platform/uv/bios_uv.c
+@@ -29,7 +29,8 @@
+
+ struct uv_systab *uv_systab;
+
+-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
++static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
++ u64 a4, u64 a5)
+ {
+ struct uv_systab *tab = uv_systab;
+ s64 ret;
+@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which,
+
+ return ret;
+ }
++
++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
++{
++ s64 ret;
++
++ if (down_interruptible(&__efi_uv_runtime_lock))
++ return BIOS_STATUS_ABORT;
++
++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
++ up(&__efi_uv_runtime_lock);
++
++ return ret;
++}
+ EXPORT_SYMBOL_GPL(uv_bios_call);
+
+ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cm
+ unsigned long bios_flags;
+ s64 ret;
+
++ if (down_interruptible(&__efi_uv_runtime_lock))
++ return BIOS_STATUS_ABORT;
++
+ local_irq_save(bios_flags);
+- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
++ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ local_irq_restore(bios_flags);
+
++ up(&__efi_uv_runtime_lock);
++
+ return ret;
+ }
+
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned
+ static DEFINE_SEMAPHORE(efi_runtime_lock);
+
+ /*
++ * Expose the EFI runtime lock to the UV platform
++ */
++#ifdef CONFIG_X86_UV
++extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
++#endif
++
++/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *