--- /dev/null
+From 587562d0c7cd6861f4f90a2eb811cccb1a376f5f Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Wed, 26 Sep 2018 14:35:50 +0200
+Subject: blk-mq: I/O and timer unplugs are inverted in blktrace
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 587562d0c7cd6861f4f90a2eb811cccb1a376f5f upstream.
+
+trace_block_unplug() takes true for explicit unplugs and false for
+implicit unplugs. schedule() unplugs are implicit and should be
+reported as timer unplugs. While correct in the legacy code, this has
+been inverted in blk-mq since 4.11.
+
+Cc: stable@vger.kernel.org
+Fixes: bd166ef183c2 ("blk-mq-sched: add framework for MQ capable IO schedulers")
+Reviewed-by: Omar Sandoval <osandov@fb.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1512,7 +1512,7 @@ void blk_mq_flush_plug_list(struct blk_p
+ BUG_ON(!rq->q);
+ if (rq->mq_ctx != this_ctx) {
+ if (this_ctx) {
+- trace_block_unplug(this_q, depth, from_schedule);
++ trace_block_unplug(this_q, depth, !from_schedule);
+ blk_mq_sched_insert_requests(this_q, this_ctx,
+ &ctx_list,
+ from_schedule);
+@@ -1532,7 +1532,7 @@ void blk_mq_flush_plug_list(struct blk_p
+ * on 'ctx_list'. Do those.
+ */
+ if (this_ctx) {
+- trace_block_unplug(this_q, depth, from_schedule);
++ trace_block_unplug(this_q, depth, !from_schedule);
+ blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
+ from_schedule);
+ }
--- /dev/null
+From 52bf4a900d9cede3eb14982d0f2c5e6db6d97cc3 Mon Sep 17 00:00:00 2001
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 25 Apr 2018 12:14:39 +0200
+Subject: clocksource/drivers/timer-atmel-pit: Properly handle error cases
+
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+commit 52bf4a900d9cede3eb14982d0f2c5e6db6d97cc3 upstream.
+
+The smatch utility reports a possible leak:
+
+smatch warnings:
+drivers/clocksource/timer-atmel-pit.c:183 at91sam926x_pit_dt_init() warn: possible memory leak of 'data'
+
+Ensure data is freed before exiting with an error.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clocksource/timer-atmel-pit.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/drivers/clocksource/timer-atmel-pit.c
++++ b/drivers/clocksource/timer-atmel-pit.c
+@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_ini
+ data->base = of_iomap(node, 0);
+ if (!data->base) {
+ pr_err("Could not map PIT address\n");
+- return -ENXIO;
++ ret = -ENXIO;
++ goto exit;
+ }
+
+ data->mck = of_clk_get(node, 0);
+ if (IS_ERR(data->mck)) {
+ pr_err("Unable to get mck clk\n");
+- return PTR_ERR(data->mck);
++ ret = PTR_ERR(data->mck);
++ goto exit;
+ }
+
+ ret = clk_prepare_enable(data->mck);
+ if (ret) {
+ pr_err("Unable to enable mck\n");
+- return ret;
++ goto exit;
+ }
+
+ /* Get the interrupts property */
+ data->irq = irq_of_parse_and_map(node, 0);
+ if (!data->irq) {
+ pr_err("Unable to get IRQ from DT\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto exit;
+ }
+
+ /*
+@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_ini
+ ret = clocksource_register_hz(&data->clksrc, pit_rate);
+ if (ret) {
+ pr_err("Failed to register clocksource\n");
+- return ret;
++ goto exit;
+ }
+
+ /* Set up irq handler */
+@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_ini
+ "at91_tick", data);
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+- return ret;
++ clocksource_unregister(&data->clksrc);
++ goto exit;
+ }
+
+ /* Set up and register clockevents */
+@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_ini
+ clockevents_register_device(&data->clkevt);
+
+ return 0;
++
++exit:
++ kfree(data);
++ return ret;
+ }
+ TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
+ at91sam926x_pit_dt_init);
--- /dev/null
+From 5d07384a666d4b2f781dc056bfeec2c27fbdf383 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 25 Sep 2018 20:56:02 -0400
+Subject: dm cache: fix resize crash if user doesn't reload cache table
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 5d07384a666d4b2f781dc056bfeec2c27fbdf383 upstream.
+
+A reload of the cache's DM table is needed during resize because
+otherwise a crash will occur when attempting to access smq policy
+entries associated with the portion of the cache that was recently
+extended.
+
+The reason is cache-size based data structures in the policy will not be
+resized, the only way to safely extend the cache is to allow for a
+proper cache policy initialization that occurs when the cache table is
+loaded. For example the smq policy's space_init(), init_allocator(),
+calc_hotspot_params() must be sized based on the extended cache size.
+
+The fix for this is to disallow cache resizes of this pattern:
+1) suspend "cache" target's device
+2) resize the fast device used for the cache
+3) resume "cache" target's device
+
+Instead, the last step must be a full reload of the cache's DM table.
+
+Fixes: 66a636356 ("dm cache: add stochastic-multi-queue (smq) policy")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-target.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -3097,8 +3097,13 @@ static dm_cblock_t get_cache_dev_size(st
+
+ static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+ {
+- if (from_cblock(new_size) > from_cblock(cache->cache_size))
+- return true;
++ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
++ if (cache->sized) {
++ DMERR("%s: unable to extend cache due to missing cache table reload",
++ cache_device_name(cache));
++ return false;
++ }
++ }
+
+ /*
+ * We can't drop a dirty block when shrinking the cache.
--- /dev/null
+From 4561ffca88c546f96367f94b8f1e4715a9c62314 Mon Sep 17 00:00:00 2001
+From: Joe Thornber <ejt@redhat.com>
+Date: Mon, 24 Sep 2018 16:19:30 -0400
+Subject: dm cache metadata: ignore hints array being too small during resize
+
+From: Joe Thornber <ejt@redhat.com>
+
+commit 4561ffca88c546f96367f94b8f1e4715a9c62314 upstream.
+
+Commit fd2fa9541 ("dm cache metadata: save in-core policy_hint_size to
+on-disk superblock") enabled previously written policy hints to be
+used after a cache is reactivated. But in doing so the cache
+metadata's hint array was left exposed to out of bounds access because
+on resize the metadata's on-disk hint array wasn't ever extended.
+
+Fix this by ignoring that there are no on-disk hints associated with the
+newly added cache blocks. An expanded on-disk hint array is later
+rewritten upon the next clean shutdown of the cache.
+
+Fixes: fd2fa9541 ("dm cache metadata: save in-core policy_hint_size to on-disk superblock")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joe Thornber <ejt@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-cache-metadata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -1454,8 +1454,8 @@ static int __load_mappings(struct dm_cac
+ if (hints_valid) {
+ r = dm_array_cursor_next(&cmd->hint_cursor);
+ if (r) {
+- DMERR("dm_array_cursor_next for hint failed");
+- goto out;
++ dm_array_cursor_end(&cmd->hint_cursor);
++ hints_valid = false;
+ }
+ }
+
--- /dev/null
+From 61ea6f5831974ebd1a57baffd7cc30600a2e26fc Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Thu, 27 Sep 2018 20:48:39 +0800
+Subject: drm/amdgpu: Fix vce work queue was not cancelled when suspend
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Rex Zhu <Rex.Zhu@amd.com>
+
+commit 61ea6f5831974ebd1a57baffd7cc30600a2e26fc upstream.
+
+The vce cancel_delayed_work_sync never be called.
+driver call the function in error path.
+
+This caused the A+A suspend hang when runtime pm enebled.
+As we will visit the smu in the idle queue. this will cause
+smu hang because the dgpu has been suspend, and the dgpu also
+will be waked up. As the smu has been hang, so the dgpu resume
+will failed.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 ++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -231,6 +231,8 @@ int amdgpu_vce_suspend(struct amdgpu_dev
+ {
+ int i;
+
++ cancel_delayed_work_sync(&adev->vce.idle_work);
++
+ if (adev->vce.vcpu_bo == NULL)
+ return 0;
+
+@@ -241,7 +243,6 @@ int amdgpu_vce_suspend(struct amdgpu_dev
+ if (i == AMDGPU_MAX_VCE_HANDLES)
+ return 0;
+
+- cancel_delayed_work_sync(&adev->vce.idle_work);
+ /* TODO: suspending running encoding sessions isn't supported */
+ return -EINVAL;
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -155,11 +155,11 @@ int amdgpu_vcn_suspend(struct amdgpu_dev
+ unsigned size;
+ void *ptr;
+
++ cancel_delayed_work_sync(&adev->vcn.idle_work);
++
+ if (adev->vcn.vcpu_bo == NULL)
+ return 0;
+
+- cancel_delayed_work_sync(&adev->vcn.idle_work);
+-
+ size = amdgpu_bo_size(adev->vcn.vcpu_bo);
+ ptr = adev->vcn.cpu_addr;
+
--- /dev/null
+From 337fe9f5c1e7de1f391c6a692531379d2aa2ee11 Mon Sep 17 00:00:00 2001
+From: Jason Ekstrand <jason@jlekstrand.net>
+Date: Wed, 26 Sep 2018 02:17:03 -0500
+Subject: drm/syncobj: Don't leak fences when WAIT_FOR_SUBMIT is set
+
+From: Jason Ekstrand <jason@jlekstrand.net>
+
+commit 337fe9f5c1e7de1f391c6a692531379d2aa2ee11 upstream.
+
+We attempt to get fences earlier in the hopes that everything will
+already have fences and no callbacks will be needed. If we do succeed
+in getting a fence, getting one a second time will result in a duplicate
+ref with no unref. This is causing memory leaks in Vulkan applications
+that create a lot of fences; playing for a few hours can, apparently,
+bring down the system.
+
+Cc: stable@vger.kernel.org
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107899
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
+Signed-off-by: Sean Paul <seanpaul@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180926071703.15257-1-jason.ekstrand@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_syncobj.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -96,6 +96,8 @@ static int drm_syncobj_fence_get_or_add_
+ {
+ int ret;
+
++ WARN_ON(*fence);
++
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
+@@ -656,6 +658,9 @@ static signed long drm_syncobj_array_wai
+
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ for (i = 0; i < count; ++i) {
++ if (entries[i].fence)
++ continue;
++
+ drm_syncobj_fence_get_or_add_callback(syncobjs[i],
+ &entries[i].fence,
+ &entries[i].syncobj_cb,
--- /dev/null
+From 1bafcbf59fed92af58955024452f45430d3898c5 Mon Sep 17 00:00:00 2001
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Date: Wed, 26 Sep 2018 18:11:22 +0200
+Subject: fbdev/omapfb: fix omapfb_memory_read infoleak
+
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+commit 1bafcbf59fed92af58955024452f45430d3898c5 upstream.
+
+OMAPFB_MEMORY_READ ioctl reads pixels from the LCD's memory and copies
+them to a userspace buffer. The code has two issues:
+
+- The user provided width and height could be large enough to overflow
+ the calculations
+- The copy_to_user() can copy uninitialized memory to the userspace,
+ which might contain sensitive kernel information.
+
+Fix these by limiting the width & height parameters, and only copying
+the amount of data that we actually received from the LCD.
+
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Reported-by: Jann Horn <jannh@google.com>
+Cc: stable@vger.kernel.org
+Cc: security@kernel.org
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_
+ if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
+ return -EFAULT;
+
++ if (mr->w > 4096 || mr->h > 4096)
++ return -EINVAL;
++
+ if (mr->w * mr->h * 3 > mr->buffer_size)
+ return -EINVAL;
+
+@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_
+ mr->x, mr->y, mr->w, mr->h);
+
+ if (r > 0) {
+- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
++ if (copy_to_user(mr->buffer, buf, r))
+ r = -EFAULT;
+ }
+
--- /dev/null
+From daa07cbc9ae3da2d61b7ce900c0b9107d134f2c1 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Tue, 25 Sep 2018 13:20:00 -0700
+Subject: KVM: x86: fix L1TF's MMIO GFN calculation
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit daa07cbc9ae3da2d61b7ce900c0b9107d134f2c1 upstream.
+
+One defense against L1TF in KVM is to always set the upper five bits
+of the *legal* physical address in the SPTEs for non-present and
+reserved SPTEs, e.g. MMIO SPTEs. In the MMIO case, the GFN of the
+MMIO SPTE may overlap with the upper five bits that are being usurped
+to defend against L1TF. To preserve the GFN, the bits of the GFN that
+overlap with the repurposed bits are shifted left into the reserved
+bits, i.e. the GFN in the SPTE will be split into high and low parts.
+When retrieving the GFN from the MMIO SPTE, e.g. to check for an MMIO
+access, get_mmio_spte_gfn() unshifts the affected bits and restores
+the original GFN for comparison. Unfortunately, get_mmio_spte_gfn()
+neglects to mask off the reserved bits in the SPTE that were used to
+store the upper chunk of the GFN. As a result, KVM fails to detect
+MMIO accesses whose GPA overlaps the repurprosed bits, which in turn
+causes guest panics and hangs.
+
+Fix the bug by generating a mask that covers the lower chunk of the
+GFN, i.e. the bits that aren't shifted by the L1TF mitigation. The
+alternative approach would be to explicitly zero the five reserved
+bits that are used to store the upper chunk of the GFN, but that
+requires additional run-time computation and makes an already-ugly
+bit of code even more inscrutable.
+
+I considered adding a WARN_ON_ONCE(low_phys_bits-1 <= PAGE_SHIFT) to
+warn if GENMASK_ULL() generated a nonsensical value, but that seemed
+silly since that would mean a system that supports VMX has less than
+18 bits of physical address space...
+
+Reported-by: Sakari Ailus <sakari.ailus@iki.fi>
+Fixes: d9b47449c1a1 ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
+Cc: Junaid Shahid <junaids@google.com>
+Cc: Jim Mattson <jmattson@google.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Junaid Shahid <junaids@google.com>
+Tested-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -231,6 +231,17 @@ static u64 __read_mostly shadow_nonprese
+ */
+ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
+
++/*
++ * In some cases, we need to preserve the GFN of a non-present or reserved
++ * SPTE when we usurp the upper five bits of the physical address space to
++ * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
++ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
++ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
++ * high and low parts. This mask covers the lower bits of the GFN.
++ */
++static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
++
++
+ static void mmu_spte_set(u64 *sptep, u64 spte);
+ static void mmu_free_roots(struct kvm_vcpu *vcpu);
+
+@@ -338,9 +349,7 @@ static bool is_mmio_spte(u64 spte)
+
+ static gfn_t get_mmio_spte_gfn(u64 spte)
+ {
+- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
+- shadow_nonpresent_or_rsvd_mask;
+- u64 gpa = spte & ~mask;
++ u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+ gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
+ & shadow_nonpresent_or_rsvd_mask;
+@@ -404,6 +413,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes)
+
+ static void kvm_mmu_reset_all_pte_masks(void)
+ {
++ u8 low_phys_bits;
++
+ shadow_user_mask = 0;
+ shadow_accessed_mask = 0;
+ shadow_dirty_mask = 0;
+@@ -418,12 +429,17 @@ static void kvm_mmu_reset_all_pte_masks(
+ * appropriate mask to guard against L1TF attacks. Otherwise, it is
+ * assumed that the CPU is not vulnerable to L1TF.
+ */
++ low_phys_bits = boot_cpu_data.x86_phys_bits;
+ if (boot_cpu_data.x86_phys_bits <
+- 52 - shadow_nonpresent_or_rsvd_mask_len)
++ 52 - shadow_nonpresent_or_rsvd_mask_len) {
+ shadow_nonpresent_or_rsvd_mask =
+ rsvd_bits(boot_cpu_data.x86_phys_bits -
+ shadow_nonpresent_or_rsvd_mask_len,
+ boot_cpu_data.x86_phys_bits - 1);
++ low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
++ }
++ shadow_nonpresent_or_rsvd_lower_gfn_mask =
++ GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
+ }
+
+ static int is_cpuid_PSE36(void)
--- /dev/null
+From 211710ca74adf790b46ab3867fcce8047b573cd1 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 29 Sep 2018 16:01:58 +0200
+Subject: mac80211: fix setting IEEE80211_KEY_FLAG_RX_MGMT for AP mode keys
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 211710ca74adf790b46ab3867fcce8047b573cd1 upstream.
+
+key->sta is only valid after ieee80211_key_link, which is called later
+in this function. Because of that, the IEEE80211_KEY_FLAG_RX_MGMT is
+never set when management frame protection is enabled.
+
+Fixes: e548c49e6dc6b ("mac80211: add key flag for management keys")
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/cfg.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -426,7 +426,7 @@ static int ieee80211_add_key(struct wiph
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* Keys without a station are used for TX only */
+- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
++ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
+ key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+ break;
+ case NL80211_IFTYPE_ADHOC:
--- /dev/null
+From 017b1660df89f5fb4bfe66c34e35f7d2031100c7 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Fri, 5 Oct 2018 15:51:29 -0700
+Subject: mm: migration: fix migration of huge PMD shared pages
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 017b1660df89f5fb4bfe66c34e35f7d2031100c7 upstream.
+
+The page migration code employs try_to_unmap() to try and unmap the source
+page. This is accomplished by using rmap_walk to find all vmas where the
+page is mapped. This search stops when page mapcount is zero. For shared
+PMD huge pages, the page map count is always 1 no matter the number of
+mappings. Shared mappings are tracked via the reference count of the PMD
+page. Therefore, try_to_unmap stops prematurely and does not completely
+unmap all mappings of the source page.
+
+This problem can result is data corruption as writes to the original
+source page can happen after contents of the page are copied to the target
+page. Hence, data is lost.
+
+This problem was originally seen as DB corruption of shared global areas
+after a huge page was soft offlined due to ECC memory errors. DB
+developers noticed they could reproduce the issue by (hotplug) offlining
+memory used to back huge pages. A simple testcase can reproduce the
+problem by creating a shared PMD mapping (note that this must be at least
+PUD_SIZE in size and PUD_SIZE aligned (1GB on x86)), and using
+migrate_pages() to migrate process pages between nodes while continually
+writing to the huge pages being migrated.
+
+To fix, have the try_to_unmap_one routine check for huge PMD sharing by
+calling huge_pmd_unshare for hugetlbfs huge pages. If it is a shared
+mapping it will be 'unshared' which removes the page table entry and drops
+the reference on the PMD page. After this, flush caches and TLB.
+
+mmu notifiers are called before locking page tables, but we can not be
+sure of PMD sharing until page tables are locked. Therefore, check for
+the possibility of PMD sharing before locking so that notifiers can
+prepare for the worst possible case.
+
+Link: http://lkml.kernel.org/r/20180823205917.16297-2-mike.kravetz@oracle.com
+[mike.kravetz@oracle.com: make _range_in_vma() a static inline]
+ Link: http://lkml.kernel.org/r/6063f215-a5c8-2f0c-465a-2c515ddc952d@oracle.com
+Fixes: 39dde65c9940 ("shared page table for hugetlb page")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/hugetlb.h | 14 ++++++++++++++
+ include/linux/mm.h | 6 ++++++
+ mm/hugetlb.c | 37 +++++++++++++++++++++++++++++++++++--
+ mm/rmap.c | 42 +++++++++++++++++++++++++++++++++++++++---
+ 4 files changed, 94 insertions(+), 5 deletions(-)
+
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *
+ pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++ unsigned long *start, unsigned long *end);
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write);
+ struct page *follow_huge_pd(struct vm_area_struct *vma,
+@@ -169,6 +171,18 @@ static inline unsigned long hugetlb_tota
+ return 0;
+ }
+
++static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
++ pte_t *ptep)
++{
++ return 0;
++}
++
++static inline void adjust_range_if_pmd_sharing_possible(
++ struct vm_area_struct *vma,
++ unsigned long *start, unsigned long *end)
++{
++}
++
+ #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
+ #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
+ #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2322,6 +2322,12 @@ static inline struct vm_area_struct *fin
+ return vma;
+ }
+
++static inline bool range_in_vma(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ return (vma && vma->vm_start <= start && end <= vma->vm_end);
++}
++
+ #ifdef CONFIG_MMU
+ pgprot_t vm_get_page_prot(unsigned long vm_flags);
+ void vma_set_page_prot(struct vm_area_struct *vma);
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4517,13 +4517,41 @@ static bool vma_shareable(struct vm_area
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+- if (vma->vm_flags & VM_MAYSHARE &&
+- vma->vm_start <= base && end <= vma->vm_end)
++ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
+ return true;
+ return false;
+ }
+
+ /*
++ * Determine if start,end range within vma could be mapped by shared pmd.
++ * If yes, adjust start and end to cover range associated with possible
++ * shared pmd mappings.
++ */
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++ unsigned long *start, unsigned long *end)
++{
++ unsigned long check_addr = *start;
++
++ if (!(vma->vm_flags & VM_MAYSHARE))
++ return;
++
++ for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
++ unsigned long a_start = check_addr & PUD_MASK;
++ unsigned long a_end = a_start + PUD_SIZE;
++
++ /*
++ * If sharing is possible, adjust start/end if necessary.
++ */
++ if (range_in_vma(vma, a_start, a_end)) {
++ if (a_start < *start)
++ *start = a_start;
++ if (a_end > *end)
++ *end = a_end;
++ }
++ }
++}
++
++/*
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+@@ -4620,6 +4648,11 @@ int huge_pmd_unshare(struct mm_struct *m
+ {
+ return 0;
+ }
++
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++ unsigned long *start, unsigned long *end)
++{
++}
+ #define want_pmd_share() (0)
+ #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1358,11 +1358,21 @@ static bool try_to_unmap_one(struct page
+ }
+
+ /*
+- * We have to assume the worse case ie pmd for invalidation. Note that
+- * the page can not be free in this function as call of try_to_unmap()
+- * must hold a reference on the page.
++ * For THP, we have to assume the worse case ie pmd for invalidation.
++ * For hugetlb, it could be much worse if we need to do pud
++ * invalidation in the case of pmd sharing.
++ *
++ * Note that the page can not be free in this function as call of
++ * try_to_unmap() must hold a reference on the page.
+ */
+ end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
++ if (PageHuge(page)) {
++ /*
++ * If sharing is possible, start and end will be adjusted
++ * accordingly.
++ */
++ adjust_range_if_pmd_sharing_possible(vma, &start, &end);
++ }
+ mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
+
+ while (page_vma_mapped_walk(&pvmw)) {
+@@ -1408,6 +1418,32 @@ static bool try_to_unmap_one(struct page
+ subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ address = pvmw.address;
+
++ if (PageHuge(page)) {
++ if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
++ /*
++ * huge_pmd_unshare unmapped an entire PMD
++ * page. There is no way of knowing exactly
++ * which PMDs may be cached for this mm, so
++ * we must flush them all. start/end were
++ * already adjusted above to cover this range.
++ */
++ flush_cache_range(vma, start, end);
++ flush_tlb_range(vma, start, end);
++ mmu_notifier_invalidate_range(mm, start, end);
++
++ /*
++ * The ref count of the PMD page was dropped
++ * which is part of the way map counting
++ * is done for shared PMDs. Return 'true'
++ * here. When there is no other sharing,
++ * huge_pmd_unshare returns false and we will
++ * unmap the actual page and drop map count
++ * to zero.
++ */
++ page_vma_mapped_walk_done(&pvmw);
++ break;
++ }
++ }
+
+ if (IS_ENABLED(CONFIG_MIGRATION) &&
+ (flags & TTU_MIGRATION) &&
--- /dev/null
+From e125fe405abedc1dc8a5b2229b80ee91c1434015 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Fri, 5 Oct 2018 15:51:41 -0700
+Subject: mm, thp: fix mlocking THP page with migration enabled
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit e125fe405abedc1dc8a5b2229b80ee91c1434015 upstream.
+
+A transparent huge page is represented by a single entry on an LRU list.
+Therefore, we can only make unevictable an entire compound page, not
+individual subpages.
+
+If a user tries to mlock() part of a huge page, we want the rest of the
+page to be reclaimable.
+
+We handle this by keeping PTE-mapped huge pages on normal LRU lists: the
+PMD on border of VM_LOCKED VMA will be split into PTE table.
+
+Introduction of THP migration breaks[1] the rules around mlocking THP
+pages. If we had a single PMD mapping of the page in mlocked VMA, the
+page will get mlocked, regardless of PTE mappings of the page.
+
+For tmpfs/shmem it's easy to fix by checking PageDoubleMap() in
+remove_migration_pmd().
+
+Anon THP pages can only be shared between processes via fork(). Mlocked
+page can only be shared if parent mlocked it before forking, otherwise CoW
+will be triggered on mlock().
+
+For Anon-THP, we can fix the issue by munlocking the page on removing PTE
+migration entry for the page. PTEs for the page will always come after
+mlocked PMD: rmap walks VMAs from oldest to newest.
+
+Test-case:
+
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <sys/wait.h>
+ #include <linux/mempolicy.h>
+ #include <numaif.h>
+
+ int main(void)
+ {
+ unsigned long nodemask = 4;
+ void *addr;
+
+ addr = mmap((void *)0x20000000UL, 2UL << 20, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_LOCKED, -1, 0);
+
+ if (fork()) {
+ wait(NULL);
+ return 0;
+ }
+
+ mlock(addr, 4UL << 10);
+ mbind(addr, 2UL << 20, MPOL_PREFERRED | MPOL_F_RELATIVE_NODES,
+ &nodemask, 4, MPOL_MF_MOVE);
+
+ return 0;
+ }
+
+[1] https://lkml.kernel.org/r/CAOMGZ=G52R-30rZvhGxEbkTw7rLLwBGadVYeo--iizcD3upL3A@mail.gmail.com
+
+Link: http://lkml.kernel.org/r/20180917133816.43995-1-kirill.shutemov@linux.intel.com
+Fixes: 616b8371539a ("mm: thp: enable thp migration in generic path")
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
+Reviewed-by: Zi Yan <zi.yan@cs.rutgers.edu>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org> [4.14+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 2 +-
+ mm/migrate.c | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2886,7 +2886,7 @@ void remove_migration_pmd(struct page_vm
+ flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
+ page_add_anon_rmap(new, vma, mmun_start, true);
+ set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
+- if (vma->vm_flags & VM_LOCKED)
++ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
+ mlock_vma_page(new);
+ update_mmu_cache_pmd(vma, address, pvmw->pmd);
+ }
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -274,6 +274,9 @@ static bool remove_migration_pte(struct
+ if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
+ mlock_vma_page(new);
+
++ if (PageTransHuge(page) && PageMlocked(page))
++ clear_page_mlock(page);
++
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, pvmw.address, pvmw.pte);
+ }
--- /dev/null
+From 58bc4c34d249bf1bc50730a9a209139347cfacfe Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 5 Oct 2018 15:52:07 -0700
+Subject: mm/vmstat.c: skip NR_TLB_REMOTE_FLUSH* properly
+
+From: Jann Horn <jannh@google.com>
+
+commit 58bc4c34d249bf1bc50730a9a209139347cfacfe upstream.
+
+5dd0b16cdaff ("mm/vmstat: Make NR_TLB_REMOTE_FLUSH_RECEIVED available even
+on UP") made the availability of the NR_TLB_REMOTE_FLUSH* counters inside
+the kernel unconditional to reduce #ifdef soup, but (either to avoid
+showing dummy zero counters to userspace, or because that code was missed)
+didn't update the vmstat_array, meaning that all following counters would
+be shown with incorrect values.
+
+This only affects kernel builds with
+CONFIG_VM_EVENT_COUNTERS=y && CONFIG_DEBUG_TLBFLUSH=y && CONFIG_SMP=n.
+
+Link: http://lkml.kernel.org/r/20181001143138.95119-2-jannh@google.com
+Fixes: 5dd0b16cdaff ("mm/vmstat: Make NR_TLB_REMOTE_FLUSH_RECEIVED available even on UP")
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Roman Gushchin <guro@fb.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Christoph Lameter <clameter@sgi.com>
+Cc: Kemi Wang <kemi.wang@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmstat.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1203,6 +1203,9 @@ const char * const vmstat_text[] = {
+ #ifdef CONFIG_SMP
+ "nr_tlb_remote_flush",
+ "nr_tlb_remote_flush_received",
++#else
++ "", /* nr_tlb_remote_flush */
++ "", /* nr_tlb_remote_flush_received */
+ #endif /* CONFIG_SMP */
+ "nr_tlb_local_flush_all",
+ "nr_tlb_local_flush_one",
--- /dev/null
+From 083874549fdfefa629dfa752785e20427dde1511 Mon Sep 17 00:00:00 2001
+From: Daniel Drake <drake@endlessm.com>
+Date: Thu, 27 Sep 2018 15:47:33 -0500
+Subject: PCI: Reprogram bridge prefetch registers on resume
+
+From: Daniel Drake <drake@endlessm.com>
+
+commit 083874549fdfefa629dfa752785e20427dde1511 upstream.
+
+On 38+ Intel-based ASUS products, the NVIDIA GPU becomes unusable after S3
+suspend/resume. The affected products include multiple generations of
+NVIDIA GPUs and Intel SoCs. After resume, nouveau logs many errors such
+as:
+
+ fifo: fault 00 [READ] at 0000005555555000 engine 00 [GR] client 04
+ [HUB/FE] reason 4a [] on channel -1 [007fa91000 unknown]
+ DRM: failed to idle channel 0 [DRM]
+
+Similarly, the NVIDIA proprietary driver also fails after resume (black
+screen, 100% CPU usage in Xorg process). We shipped a sample to NVIDIA for
+diagnosis, and their response indicated that it's a problem with the parent
+PCI bridge (on the Intel SoC), not the GPU.
+
+Runtime suspend/resume works fine, only S3 suspend is affected.
+
+We found a workaround: on resume, rewrite the Intel PCI bridge
+'Prefetchable Base Upper 32 Bits' register (PCI_PREF_BASE_UPPER32). In the
+cases that I checked, this register has value 0 and we just have to rewrite
+that value.
+
+Linux already saves and restores PCI config space during suspend/resume,
+but this register was being skipped because upon resume, it already has
+value 0 (the correct, pre-suspend value).
+
+Intel appear to have previously acknowledged this behaviour and the
+requirement to rewrite this register:
+https://bugzilla.kernel.org/show_bug.cgi?id=116851#c23
+
+Based on that, rewrite the prefetch register values even when that appears
+unnecessary.
+
+We have confirmed this solution on all the affected models we have in-hands
+(X542UQ, UX533FD, X530UN, V272UN).
+
+Additionally, this solves an issue where r8169 MSI-X interrupts were broken
+after S3 suspend/resume on ASUS X441UAR. This issue was recently worked
+around in commit 7bb05b85bc2d ("r8169: don't use MSI-X on RTL8106e"). It
+also fixes the same issue on RTL6186evl/8111evl on an Aimfor-tech laptop
+that we had not yet patched. I suspect it will also fix the issue that was
+worked around in commit 7c53a722459c ("r8169: don't use MSI-X on
+RTL8168g").
+
+Thomas Martitz reports that this change also solves an issue where the AMD
+Radeon Polaris 10 GPU on the HP Zbook 14u G5 is unresponsive after S3
+suspend/resume.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=201069
+Signed-off-by: Daniel Drake <drake@endlessm.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-By: Peter Wu <peter@lekensteyn.nl>
+CC: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c | 27 +++++++++++++++++++--------
+ 1 file changed, 19 insertions(+), 8 deletions(-)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1112,12 +1112,12 @@ int pci_save_state(struct pci_dev *dev)
+ EXPORT_SYMBOL(pci_save_state);
+
+ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
+- u32 saved_val, int retry)
++ u32 saved_val, int retry, bool force)
+ {
+ u32 val;
+
+ pci_read_config_dword(pdev, offset, &val);
+- if (val == saved_val)
++ if (!force && val == saved_val)
+ return;
+
+ for (;;) {
+@@ -1136,25 +1136,36 @@ static void pci_restore_config_dword(str
+ }
+
+ static void pci_restore_config_space_range(struct pci_dev *pdev,
+- int start, int end, int retry)
++ int start, int end, int retry,
++ bool force)
+ {
+ int index;
+
+ for (index = end; index >= start; index--)
+ pci_restore_config_dword(pdev, 4 * index,
+ pdev->saved_config_space[index],
+- retry);
++ retry, force);
+ }
+
+ static void pci_restore_config_space(struct pci_dev *pdev)
+ {
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+- pci_restore_config_space_range(pdev, 10, 15, 0);
++ pci_restore_config_space_range(pdev, 10, 15, 0, false);
+ /* Restore BARs before the command register. */
+- pci_restore_config_space_range(pdev, 4, 9, 10);
+- pci_restore_config_space_range(pdev, 0, 3, 0);
++ pci_restore_config_space_range(pdev, 4, 9, 10, false);
++ pci_restore_config_space_range(pdev, 0, 3, 0, false);
++ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
++ pci_restore_config_space_range(pdev, 12, 15, 0, false);
++
++ /*
++ * Force rewriting of prefetch registers to avoid S3 resume
++ * issues on Intel PCI bridges that occur when these
++ * registers are not explicitly written.
++ */
++ pci_restore_config_space_range(pdev, 9, 11, 0, true);
++ pci_restore_config_space_range(pdev, 0, 8, 0, false);
+ } else {
+- pci_restore_config_space_range(pdev, 0, 15, 0);
++ pci_restore_config_space_range(pdev, 0, 15, 0, false);
+ }
+ }
+
--- /dev/null
+From befb1b3c2703897c5b8ffb0044dc5d0e5f27c5d7 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Wed, 19 Sep 2018 10:29:06 -0700
+Subject: perf/core: Add sanity check to deal with pinned event failure
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit befb1b3c2703897c5b8ffb0044dc5d0e5f27c5d7 upstream.
+
+It is possible that a failure can occur during the scheduling of a
+pinned event. The initial portion of perf_event_read_local() contains
+the various error checks an event should pass before it can be
+considered valid. Ensure that the potential scheduling failure
+of a pinned event is checked for and have a credible error.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: fenghua.yu@intel.com
+Cc: tony.luck@intel.com
+Cc: acme@kernel.org
+Cc: gavin.hindman@intel.com
+Cc: jithu.joseph@intel.com
+Cc: dave.hansen@intel.com
+Cc: hpa@zytor.com
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/6486385d1f30336e9973b24c8c65f5079543d3d3.1537377064.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3757,6 +3757,12 @@ int perf_event_read_local(struct perf_ev
+ goto out;
+ }
+
++ /* If this is a pinned event it must be running on this CPU */
++ if (event->attr.pinned && event->oncpu != smp_processor_id()) {
++ ret = -EBUSY;
++ goto out;
++ }
++
+ /*
+ * If the event is currently on this CPU, its either a per-task event,
+ * or local to this CPU. Furthermore it means its ACTIVE (otherwise
--- /dev/null
+From 69e445ab8b66a9f30519842ef18be555d3ee9b51 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Thu, 4 Oct 2018 11:08:12 +0200
+Subject: PM / core: Clear the direct_complete flag on errors
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit 69e445ab8b66a9f30519842ef18be555d3ee9b51 upstream.
+
+If __device_suspend() runs asynchronously (in which case the device
+passed to it is in dpm_suspended_list at that point) and it returns
+early on an error or pending wakeup, and the power.direct_complete
+flag has been set for the device already, the subsequent
+device_resume() will be confused by that and it will call
+pm_runtime_enable() incorrectly, as runtime PM has not been
+disabled for the device by __device_suspend().
+
+To avoid that, clear power.direct_complete if __device_suspend()
+is not going to disable runtime PM for the device before returning.
+
+Fixes: aae4518b3124 (PM / sleep: Mechanism to avoid resuming runtime-suspended devices unnecessarily)
+Reported-by: Al Cooper <alcooperx@gmail.com>
+Tested-by: Al Cooper <alcooperx@gmail.com>
+Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
+Cc: 3.16+ <stable@vger.kernel.org> # 3.16+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/power/main.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1462,8 +1462,10 @@ static int __device_suspend(struct devic
+
+ dpm_wait_for_subordinate(dev, async);
+
+- if (async_error)
++ if (async_error) {
++ dev->power.direct_complete = false;
+ goto Complete;
++ }
+
+ /*
+ * If a device configured to wake up the system from sleep states
+@@ -1475,6 +1477,7 @@ static int __device_suspend(struct devic
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
++ dev->power.direct_complete = false;
+ async_error = -EBUSY;
+ goto Complete;
+ }
--- /dev/null
+From 7c03e7035ac1cf2a6165754e4f3a49c2f1977838 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Mon, 1 Oct 2018 12:52:16 -0700
+Subject: selftests/x86: Add clock_gettime() tests to test_vdso
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 7c03e7035ac1cf2a6165754e4f3a49c2f1977838 upstream.
+
+Now that the vDSO implementation of clock_gettime() is getting
+reworked, add a selftest for it. This tests that its output is
+consistent with the syscall version.
+
+This is marked for stable to serve as a test for commit
+
+ 715bd9d12f84 ("x86/vdso: Fix asm constraints on vDSO syscall fallbacks")
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/082399674de2619b2befd8c0dde49b260605b126.1538422295.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/x86/test_vdso.c | 99 ++++++++++++++++++++++++++++++++
+ 1 file changed, 99 insertions(+)
+
+--- a/tools/testing/selftests/x86/test_vdso.c
++++ b/tools/testing/selftests/x86/test_vdso.c
+@@ -17,6 +17,7 @@
+ #include <errno.h>
+ #include <sched.h>
+ #include <stdbool.h>
++#include <limits.h>
+
+ #ifndef SYS_getcpu
+ # ifdef __x86_64__
+@@ -31,6 +32,10 @@
+
+ int nerrs = 0;
+
++typedef int (*vgettime_t)(clockid_t, struct timespec *);
++
++vgettime_t vdso_clock_gettime;
++
+ typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+ getcpu_t vgetcpu;
+@@ -95,6 +100,10 @@ static void fill_function_pointers()
+ printf("Warning: failed to find getcpu in vDSO\n");
+
+ vgetcpu = (getcpu_t) vsyscall_getcpu();
++
++ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
++ if (!vdso_clock_gettime)
++ printf("Warning: failed to find clock_gettime in vDSO\n");
+ }
+
+ static long sys_getcpu(unsigned * cpu, unsigned * node,
+@@ -103,6 +112,11 @@ static long sys_getcpu(unsigned * cpu, u
+ return syscall(__NR_getcpu, cpu, node, cache);
+ }
+
++static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
++{
++ return syscall(__NR_clock_gettime, id, ts);
++}
++
+ static void test_getcpu(void)
+ {
+ printf("[RUN]\tTesting getcpu...\n");
+@@ -155,10 +169,95 @@ static void test_getcpu(void)
+ }
+ }
+
++static bool ts_leq(const struct timespec *a, const struct timespec *b)
++{
++ if (a->tv_sec != b->tv_sec)
++ return a->tv_sec < b->tv_sec;
++ else
++ return a->tv_nsec <= b->tv_nsec;
++}
++
++static char const * const clocknames[] = {
++ [0] = "CLOCK_REALTIME",
++ [1] = "CLOCK_MONOTONIC",
++ [2] = "CLOCK_PROCESS_CPUTIME_ID",
++ [3] = "CLOCK_THREAD_CPUTIME_ID",
++ [4] = "CLOCK_MONOTONIC_RAW",
++ [5] = "CLOCK_REALTIME_COARSE",
++ [6] = "CLOCK_MONOTONIC_COARSE",
++ [7] = "CLOCK_BOOTTIME",
++ [8] = "CLOCK_REALTIME_ALARM",
++ [9] = "CLOCK_BOOTTIME_ALARM",
++ [10] = "CLOCK_SGI_CYCLE",
++ [11] = "CLOCK_TAI",
++};
++
++static void test_one_clock_gettime(int clock, const char *name)
++{
++ struct timespec start, vdso, end;
++ int vdso_ret, end_ret;
++
++ printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
++
++ if (sys_clock_gettime(clock, &start) < 0) {
++ if (errno == EINVAL) {
++ vdso_ret = vdso_clock_gettime(clock, &vdso);
++ if (vdso_ret == -EINVAL) {
++ printf("[OK]\tNo such clock.\n");
++ } else {
++ printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
++ nerrs++;
++ }
++ } else {
++ printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
++ }
++ return;
++ }
++
++ vdso_ret = vdso_clock_gettime(clock, &vdso);
++ end_ret = sys_clock_gettime(clock, &end);
++
++ if (vdso_ret != 0 || end_ret != 0) {
++ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
++ vdso_ret, errno);
++ nerrs++;
++ return;
++ }
++
++ printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
++ (unsigned long long)start.tv_sec, start.tv_nsec,
++ (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
++ (unsigned long long)end.tv_sec, end.tv_nsec);
++
++ if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
++ printf("[FAIL]\tTimes are out of sequence\n");
++ nerrs++;
++ }
++}
++
++static void test_clock_gettime(void)
++{
++ for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
++ clock++) {
++ test_one_clock_gettime(clock, clocknames[clock]);
++ }
++
++ /* Also test some invalid clock ids */
++ test_one_clock_gettime(-1, "invalid");
++ test_one_clock_gettime(INT_MIN, "invalid");
++ test_one_clock_gettime(INT_MAX, "invalid");
++}
++
+ int main(int argc, char **argv)
+ {
+ fill_function_pointers();
+
++ test_clock_gettime();
++
++ /*
++ * Test getcpu() last so that, if something goes wrong setting affinity,
++ * we still run the other tests.
++ */
+ test_getcpu();
+
+ return nerrs ? 1 : 0;
--- /dev/null
+perf-core-add-sanity-check-to-deal-with-pinned-event-failure.patch
+mm-migration-fix-migration-of-huge-pmd-shared-pages.patch
+mm-thp-fix-mlocking-thp-page-with-migration-enabled.patch
+mm-vmstat.c-skip-nr_tlb_remote_flush-properly.patch
+kvm-x86-fix-l1tf-s-mmio-gfn-calculation.patch
+blk-mq-i-o-and-timer-unplugs-are-inverted-in-blktrace.patch
+clocksource-drivers-timer-atmel-pit-properly-handle-error-cases.patch
+fbdev-omapfb-fix-omapfb_memory_read-infoleak.patch
+xen-netback-fix-input-validation-in-xenvif_set_hash_mapping.patch
+drm-amdgpu-fix-vce-work-queue-was-not-cancelled-when-suspend.patch
+drm-syncobj-don-t-leak-fences-when-wait_for_submit-is-set.patch
+x86-vdso-fix-asm-constraints-on-vdso-syscall-fallbacks.patch
+selftests-x86-add-clock_gettime-tests-to-test_vdso.patch
+x86-vdso-only-enable-vdso-retpolines-when-enabled-and-supported.patch
+x86-vdso-fix-vdso-syscall-fallback-asm-constraint-regression.patch
+pci-reprogram-bridge-prefetch-registers-on-resume.patch
+mac80211-fix-setting-ieee80211_key_flag_rx_mgmt-for-ap-mode-keys.patch
+pm-core-clear-the-direct_complete-flag-on-errors.patch
+dm-cache-metadata-ignore-hints-array-being-too-small-during-resize.patch
+dm-cache-fix-resize-crash-if-user-doesn-t-reload-cache-table.patch
+xhci-add-missing-cas-workaround-for-intel-sunrise-point-xhci.patch
+usb-xhci-mtk-resume-usb3-roothub-first.patch
+usb-serial-simple-add-motorola-tetra-mtp6550-id.patch
+usb-cdc_acm-do-not-leak-urb-buffers.patch
+tty-drop-tty-count-on-tty_reopen-failure.patch
--- /dev/null
+From fe32416790093b31364c08395727de17ec96ace1 Mon Sep 17 00:00:00 2001
+From: Dmitry Safonov <dima@arista.com>
+Date: Tue, 18 Sep 2018 00:52:52 +0100
+Subject: tty: Drop tty->count on tty_reopen() failure
+
+From: Dmitry Safonov <dima@arista.com>
+
+commit fe32416790093b31364c08395727de17ec96ace1 upstream.
+
+In case of tty_ldisc_reinit() failure, tty->count should be decremented
+back, otherwise we will never release_tty().
+Tetsuo reported that it fixes noisy warnings on tty release like:
+ pts pts4033: tty_release: tty->count(10529) != (#fd's(7) + #kopen's(0))
+
+Fixes: commit 892d1fa7eaae ("tty: Destroy ldisc instance on hangup")
+
+Cc: stable@vger.kernel.org # v4.6+
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jiri Slaby <jslaby@suse.com>
+Reviewed-by: Jiri Slaby <jslaby@suse.cz>
+Tested-by: Jiri Slaby <jslaby@suse.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Dmitry Safonov <dima@arista.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/tty_io.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1254,6 +1254,7 @@ static void tty_driver_remove_tty(struct
+ static int tty_reopen(struct tty_struct *tty)
+ {
+ struct tty_driver *driver = tty->driver;
++ int retval;
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER)
+@@ -1267,10 +1268,14 @@ static int tty_reopen(struct tty_struct
+
+ tty->count++;
+
+- if (!tty->ldisc)
+- return tty_ldisc_reinit(tty, tty->termios.c_line);
++ if (tty->ldisc)
++ return 0;
+
+- return 0;
++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
++ if (retval)
++ tty->count--;
++
++ return retval;
+ }
+
+ /**
--- /dev/null
+From f2924d4b16ae138c2de6a0e73f526fb638330858 Mon Sep 17 00:00:00 2001
+From: Romain Izard <romain.izard.pro@gmail.com>
+Date: Thu, 20 Sep 2018 16:49:04 +0200
+Subject: usb: cdc_acm: Do not leak URB buffers
+
+From: Romain Izard <romain.izard.pro@gmail.com>
+
+commit f2924d4b16ae138c2de6a0e73f526fb638330858 upstream.
+
+When the ACM TTY port is disconnected, the URBs it uses must be killed, and
+then the buffers must be freed. Unfortunately a previous refactor removed
+the code freeing the buffers because it looked extremely similar to the
+code killing the URBs.
+
+As a result, there were many new leaks for each plug/unplug cycle of a
+CDC-ACM device, that were detected by kmemleak.
+
+Restore the missing code, and the memory leak is removed.
+
+Fixes: ba8c931ded8d ("cdc-acm: refactor killing urbs")
+Signed-off-by: Romain Izard <romain.izard.pro@gmail.com>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/class/cdc-acm.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1527,6 +1527,7 @@ static void acm_disconnect(struct usb_in
+ {
+ struct acm *acm = usb_get_intfdata(intf);
+ struct tty_struct *tty;
++ int i;
+
+ /* sibling interface is already cleaning up */
+ if (!acm)
+@@ -1557,6 +1558,11 @@ static void acm_disconnect(struct usb_in
+
+ tty_unregister_device(acm_tty_driver, acm->minor);
+
++ usb_free_urb(acm->ctrlurb);
++ for (i = 0; i < ACM_NW; i++)
++ usb_free_urb(acm->wb[i].urb);
++ for (i = 0; i < acm->rx_buflimit; i++)
++ usb_free_urb(acm->read_urbs[i]);
+ acm_write_buffers_free(acm);
+ usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ acm_read_buffers_free(acm);
--- /dev/null
+From f5fad711c06e652f90f581fc7c2caee327c33d31 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Mon, 24 Sep 2018 15:28:10 +0200
+Subject: USB: serial: simple: add Motorola Tetra MTP6550 id
+
+From: Johan Hovold <johan@kernel.org>
+
+commit f5fad711c06e652f90f581fc7c2caee327c33d31 upstream.
+
+Add device-id for the Motorola Tetra radio MTP6550.
+
+Bus 001 Device 004: ID 0cad:9012 Motorola CGISS
+Device Descriptor:
+ bLength 18
+ bDescriptorType 1
+ bcdUSB 2.00
+ bDeviceClass 0 (Defined at Interface level)
+ bDeviceSubClass 0
+ bDeviceProtocol 0
+ bMaxPacketSize0 64
+ idVendor 0x0cad Motorola CGISS
+ idProduct 0x9012
+ bcdDevice 24.16
+ iManufacturer 1 Motorola Solutions, Inc.
+ iProduct 2 TETRA PEI interface
+ iSerial 0
+ bNumConfigurations 1
+ Configuration Descriptor:
+ bLength 9
+ bDescriptorType 2
+ wTotalLength 55
+ bNumInterfaces 2
+ bConfigurationValue 1
+ iConfiguration 3 Generic Serial config
+ bmAttributes 0x80
+ (Bus Powered)
+ MaxPower 500mA
+ Interface Descriptor:
+ bLength 9
+ bDescriptorType 4
+ bInterfaceNumber 0
+ bAlternateSetting 0
+ bNumEndpoints 2
+ bInterfaceClass 255 Vendor Specific Class
+ bInterfaceSubClass 0
+ bInterfaceProtocol 0
+ iInterface 0
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x81 EP 1 IN
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0200 1x 512 bytes
+ bInterval 0
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x01 EP 1 OUT
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0200 1x 512 bytes
+ Interface Descriptor:
+ bLength 9
+ bDescriptorType 4
+ bInterfaceNumber 1
+ bAlternateSetting 0
+ bNumEndpoints 2
+ bInterfaceClass 255 Vendor Specific Class
+ bInterfaceSubClass 0
+ bInterfaceProtocol 0
+ iInterface 0
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x82 EP 2 IN
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0200 1x 512 bytes
+ bInterval 0
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x02 EP 2 OUT
+ bmAttributes 2
+ Transfer Type Bulk
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0200 1x 512 bytes
+ bInterval 0
+Device Qualifier (for other device speed):
+ bLength 10
+ bDescriptorType 6
+ bcdUSB 2.00
+ bDeviceClass 0 (Defined at Interface level)
+ bDeviceSubClass 0
+ bDeviceProtocol 0
+ bMaxPacketSize0 64
+ bNumConfigurations 1
+Device Status: 0x0000
+ (Bus Powered)
+
+Reported-by: Hans Hult <hanshult35@gmail.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/serial/usb-serial-simple.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
+
+ /* Motorola Tetra driver */
+ #define MOTOROLA_TETRA_IDS() \
+- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
++ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
++ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
+ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
+
+ /* Novatel Wireless GPS driver */
--- /dev/null
+From 555df5820e733cded7eb8d0bf78b2a791be51d75 Mon Sep 17 00:00:00 2001
+From: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Date: Mon, 1 Oct 2018 18:36:08 +0300
+Subject: usb: xhci-mtk: resume USB3 roothub first
+
+From: Chunfeng Yun <chunfeng.yun@mediatek.com>
+
+commit 555df5820e733cded7eb8d0bf78b2a791be51d75 upstream.
+
+Give USB3 devices a better chance to enumerate at USB3 speeds if
+they are connected to a suspended host.
+Porting from "671ffdff5b13 xhci: resume USB 3 roothub first"
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-mtk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -780,10 +780,10 @@ static int __maybe_unused xhci_mtk_resum
+ xhci_mtk_host_enable(mtk);
+
+ xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+- usb_hcd_poll_rh_status(hcd);
+ set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ usb_hcd_poll_rh_status(xhci->shared_hcd);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ usb_hcd_poll_rh_status(hcd);
+ return 0;
+ }
+
--- /dev/null
+From 715bd9d12f84d8f5cc8ad21d888f9bc304a8eb0b Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Mon, 1 Oct 2018 12:52:15 -0700
+Subject: x86/vdso: Fix asm constraints on vDSO syscall fallbacks
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 715bd9d12f84d8f5cc8ad21d888f9bc304a8eb0b upstream.
+
+The syscall fallbacks in the vDSO have incorrect asm constraints.
+They are not marked as writing to their outputs -- instead, they are
+marked as clobbering "memory", which is useless. In particular, gcc
+is smart enough to know that the timespec parameter hasn't escaped,
+so a memory clobber doesn't clobber it. And passing a pointer as an
+asm *input* does not tell gcc that the pointed-to value is changed.
+
+Add in the fact that the asm instructions weren't volatile, and gcc
+was free to omit them entirely unless their sole output (the return
+value) is used. Which it is (phew!), but that stops happening with
+some upcoming patches.
+
+As a trivial example, the following code:
+
+void test_fallback(struct timespec *ts)
+{
+ vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
+}
+
+compiles to:
+
+00000000000000c0 <test_fallback>:
+ c0: c3 retq
+
+To add insult to injury, the RCX and R11 clobbers on 64-bit
+builds were missing.
+
+The "memory" clobber is also unnecessary -- no ordering with respect to
+other memory operations is needed, but that's going to be fixed in a
+separate not-for-stable patch.
+
+Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/vdso/vclock_gettime.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/entry/vdso/vclock_gettime.c
++++ b/arch/x86/entry/vdso/vclock_gettime.c
+@@ -43,8 +43,9 @@ extern u8 hvclock_page
+ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+ {
+ long ret;
+- asm("syscall" : "=a" (ret) :
+- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
++ asm ("syscall" : "=a" (ret), "=m" (*ts) :
++ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
++ "memory", "rcx", "r11");
+ return ret;
+ }
+
+@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(s
+ {
+ long ret;
+
+- asm("syscall" : "=a" (ret) :
+- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
++ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
++ "memory", "rcx", "r11");
+ return ret;
+ }
+
+@@ -64,12 +66,12 @@ notrace static long vdso_fallback_gettim
+ {
+ long ret;
+
+- asm(
++ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %2, %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+- : "=a" (ret)
++ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+ : "memory", "edx");
+ return ret;
+@@ -79,12 +81,12 @@ notrace static long vdso_fallback_gtod(s
+ {
+ long ret;
+
+- asm(
++ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %2, %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+- : "=a" (ret)
++ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+ : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+ : "memory", "edx");
+ return ret;
--- /dev/null
+From 02e425668f5c9deb42787d10001a3b605993ad15 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Wed, 3 Oct 2018 16:23:49 -0700
+Subject: x86/vdso: Fix vDSO syscall fallback asm constraint regression
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 02e425668f5c9deb42787d10001a3b605993ad15 upstream.
+
+When I added the missing memory outputs, I failed to update the
+index of the first argument (ebx) on 32-bit builds, which broke the
+fallbacks. Somehow I must have screwed up my testing or gotten
+lucky.
+
+Add another test to cover gettimeofday() as well.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Fixes: 715bd9d12f84 ("x86/vdso: Fix asm constraints on vDSO syscall fallbacks")
+Link: http://lkml.kernel.org/r/21bd45ab04b6d838278fa5bebfa9163eceffa13c.1538608971.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/vdso/vclock_gettime.c | 8 +--
+ tools/testing/selftests/x86/test_vdso.c | 73 ++++++++++++++++++++++++++++++++
+ 2 files changed, 77 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/entry/vdso/vclock_gettime.c
++++ b/arch/x86/entry/vdso/vclock_gettime.c
+@@ -68,11 +68,11 @@ notrace static long vdso_fallback_gettim
+
+ asm (
+ "mov %%ebx, %%edx \n"
+- "mov %2, %%ebx \n"
++ "mov %[clock], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*ts)
+- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
++ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
+ : "memory", "edx");
+ return ret;
+ }
+@@ -83,11 +83,11 @@ notrace static long vdso_fallback_gtod(s
+
+ asm (
+ "mov %%ebx, %%edx \n"
+- "mov %2, %%ebx \n"
++ "mov %[tv], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
++ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
+ : "memory", "edx");
+ return ret;
+ }
+--- a/tools/testing/selftests/x86/test_vdso.c
++++ b/tools/testing/selftests/x86/test_vdso.c
+@@ -36,6 +36,10 @@ typedef int (*vgettime_t)(clockid_t, str
+
+ vgettime_t vdso_clock_gettime;
+
++typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
++
++vgtod_t vdso_gettimeofday;
++
+ typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+ getcpu_t vgetcpu;
+@@ -104,6 +108,11 @@ static void fill_function_pointers()
+ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+ if (!vdso_clock_gettime)
+ printf("Warning: failed to find clock_gettime in vDSO\n");
++
++ vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
++ if (!vdso_gettimeofday)
++ printf("Warning: failed to find gettimeofday in vDSO\n");
++
+ }
+
+ static long sys_getcpu(unsigned * cpu, unsigned * node,
+@@ -117,6 +126,11 @@ static inline int sys_clock_gettime(cloc
+ return syscall(__NR_clock_gettime, id, ts);
+ }
+
++static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ return syscall(__NR_gettimeofday, tv, tz);
++}
++
+ static void test_getcpu(void)
+ {
+ printf("[RUN]\tTesting getcpu...\n");
+@@ -177,6 +191,14 @@ static bool ts_leq(const struct timespec
+ return a->tv_nsec <= b->tv_nsec;
+ }
+
++static bool tv_leq(const struct timeval *a, const struct timeval *b)
++{
++ if (a->tv_sec != b->tv_sec)
++ return a->tv_sec < b->tv_sec;
++ else
++ return a->tv_usec <= b->tv_usec;
++}
++
+ static char const * const clocknames[] = {
+ [0] = "CLOCK_REALTIME",
+ [1] = "CLOCK_MONOTONIC",
+@@ -248,11 +270,62 @@ static void test_clock_gettime(void)
+ test_one_clock_gettime(INT_MAX, "invalid");
+ }
+
++static void test_gettimeofday(void)
++{
++ struct timeval start, vdso, end;
++ struct timezone sys_tz, vdso_tz;
++ int vdso_ret, end_ret;
++
++ if (!vdso_gettimeofday)
++ return;
++
++ printf("[RUN]\tTesting gettimeofday...\n");
++
++ if (sys_gettimeofday(&start, &sys_tz) < 0) {
++ printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
++ nerrs++;
++ return;
++ }
++
++ vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
++ end_ret = sys_gettimeofday(&end, NULL);
++
++ if (vdso_ret != 0 || end_ret != 0) {
++ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
++ vdso_ret, errno);
++ nerrs++;
++ return;
++ }
++
++ printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
++ (unsigned long long)start.tv_sec, start.tv_usec,
++ (unsigned long long)vdso.tv_sec, vdso.tv_usec,
++ (unsigned long long)end.tv_sec, end.tv_usec);
++
++ if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
++ printf("[FAIL]\tTimes are out of sequence\n");
++ nerrs++;
++ }
++
++ if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
++ sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
++ printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
++ sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
++ } else {
++ printf("[FAIL]\ttimezones do not match\n");
++ nerrs++;
++ }
++
++ /* And make sure that passing NULL for tz doesn't crash. */
++ vdso_gettimeofday(&vdso, NULL);
++}
++
+ int main(int argc, char **argv)
+ {
+ fill_function_pointers();
+
+ test_clock_gettime();
++ test_gettimeofday();
+
+ /*
+ * Test getcpu() last so that, if something goes wrong setting affinity,
--- /dev/null
+From 4f166564014aba65ad6f15b612f6711fd0f117ee Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 2 Oct 2018 21:26:50 -0700
+Subject: x86/vdso: Only enable vDSO retpolines when enabled and supported
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 4f166564014aba65ad6f15b612f6711fd0f117ee upstream.
+
+When I fixed the vDSO build to use inline retpolines, I messed up
+the Makefile logic and made it unconditional. It should have
+depended on CONFIG_RETPOLINE and on the availability of compiler
+support. This broke the build on some older compilers.
+
+Reported-by: nikola.ciprich@linuxbox.cz
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Rickard <matt@softrans.com.au>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: jason.vas.dias@gmail.com
+Cc: stable@vger.kernel.org
+Fixes: 2e549b2ee0e3 ("x86/vdso: Fix vDSO build if a retpoline is emitted")
+Link: http://lkml.kernel.org/r/08a1f29f2c238dd1f493945e702a521f8a5aa3ae.1538540801.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/entry/vdso/Makefile | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -74,7 +74,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.d
+ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+ -fno-omit-frame-pointer -foptimize-sibling-calls \
+- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
++ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
++
++ifdef CONFIG_RETPOLINE
++ifneq ($(RETPOLINE_VDSO_CFLAGS),)
++ CFL += $(RETPOLINE_VDSO_CFLAGS)
++endif
++endif
+
+ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+
+@@ -153,7 +159,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -f
+ KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+ KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+ KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
++
++ifdef CONFIG_RETPOLINE
++ifneq ($(RETPOLINE_VDSO_CFLAGS),)
++ KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
++endif
++endif
++
+ $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+ $(obj)/vdso32.so.dbg: FORCE \
--- /dev/null
+From 780e83c259fc33e8959fed8dfdad17e378d72b62 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <JBeulich@suse.com>
+Date: Tue, 25 Sep 2018 02:12:30 -0600
+Subject: xen-netback: fix input validation in xenvif_set_hash_mapping()
+
+From: Jan Beulich <JBeulich@suse.com>
+
+commit 780e83c259fc33e8959fed8dfdad17e378d72b62 upstream.
+
+Both len and off are frontend specified values, so we need to make
+sure there's no overflow when adding the two for the bounds check. We
+also want to avoid undefined behavior and hence use off to index into
+->hash.mapping[] only after bounds checking. This at the same time
+allows to take care of not applying off twice for the bounds checking
+against vif->num_queues.
+
+It is also insufficient to bounds check copy_op.len, as this is len
+truncated to 16 bits.
+
+This is XSA-270 / CVE-2018-15471.
+
+Reported-by: Felix Wilhelm <fwilhelm@google.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Tested-by: Paul Durrant <paul.durrant@citrix.com>
+Cc: stable@vger.kernel.org [4.7 onwards]
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/hash.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct
+ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off)
+ {
+- u32 *mapping = &vif->hash.mapping[off];
++ u32 *mapping = vif->hash.mapping;
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+- .dest.u.gmfn = virt_to_gfn(mapping),
+ .dest.domid = DOMID_SELF,
+- .dest.offset = xen_offset_in_page(mapping),
+- .len = len * sizeof(u32),
++ .len = len * sizeof(*mapping),
+ .flags = GNTCOPY_source_gref
+ };
+
+- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
++ if ((off + len < off) || (off + len > vif->hash.size) ||
++ len > XEN_PAGE_SIZE / sizeof(*mapping))
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
++ copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
++ copy_op.dest.offset = xen_offset_in_page(mapping + off);
++
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
--- /dev/null
+From ffe84e01bb1b38c7eb9c6b6da127a6c136d251df Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Mon, 1 Oct 2018 18:36:07 +0300
+Subject: xhci: Add missing CAS workaround for Intel Sunrise Point xHCI
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit ffe84e01bb1b38c7eb9c6b6da127a6c136d251df upstream.
+
+The workaround for missing CAS bit is also needed for xHC on Intel
+sunrisepoint PCH. For more details see:
+
+Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci-pci.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -196,6 +196,8 @@ static void xhci_pci_quirks(struct devic
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
--- /dev/null
+perf-core-add-sanity-check-to-deal-with-pinned-event-failure.patch
+mm-migration-fix-migration-of-huge-pmd-shared-pages.patch
+mm-thp-fix-mlocking-thp-page-with-migration-enabled.patch
+mm-vmstat.c-skip-nr_tlb_remote_flush-properly.patch
+kvm-x86-fix-l1tf-s-mmio-gfn-calculation.patch
+kvm-vmx-check-for-existence-of-secondary-exec-controls-before-accessing.patch
+blk-mq-i-o-and-timer-unplugs-are-inverted-in-blktrace.patch
+pstore-ram-fix-failure-path-memory-leak-in-ramoops_init.patch
+clocksource-drivers-timer-atmel-pit-properly-handle-error-cases.patch
+fbdev-omapfb-fix-omapfb_memory_read-infoleak.patch
+xen-netback-fix-input-validation-in-xenvif_set_hash_mapping.patch
+mmc-core-fix-debounce-time-to-use-microseconds.patch
+mmc-slot-gpio-fix-debounce-time-to-use-miliseconds-again.patch
+mac80211-allocate-txqs-for-active-monitor-interfaces.patch
+drm-amdgpu-fix-vce-work-queue-was-not-cancelled-when-suspend.patch
+drm-syncobj-don-t-leak-fences-when-wait_for_submit-is-set.patch
+drm-fix-use-after-free-read-in-drm_mode_create_lease_ioctl.patch
+x86-vdso-fix-asm-constraints-on-vdso-syscall-fallbacks.patch
+selftests-x86-add-clock_gettime-tests-to-test_vdso.patch
+x86-vdso-only-enable-vdso-retpolines-when-enabled-and-supported.patch
+x86-vdso-fix-vdso-syscall-fallback-asm-constraint-regression.patch
+pci-reprogram-bridge-prefetch-registers-on-resume.patch
+mac80211-fix-setting-ieee80211_key_flag_rx_mgmt-for-ap-mode-keys.patch
+pm-core-clear-the-direct_complete-flag-on-errors.patch
+dm-mpath-fix-attached_handler_name-leak-and-dangling-hw_handler_name-pointer.patch
+dm-cache-metadata-ignore-hints-array-being-too-small-during-resize.patch
+dm-cache-fix-resize-crash-if-user-doesn-t-reload-cache-table.patch
+xhci-add-missing-cas-workaround-for-intel-sunrise-point-xhci.patch
+usb-xhci-mtk-resume-usb3-roothub-first.patch
+usb-serial-simple-add-motorola-tetra-mtp6550-id.patch
+usb-serial-option-improve-quectel-ep06-detection.patch
+usb-serial-option-add-two-endpoints-device-id-flag.patch
+usb-cdc_acm-do-not-leak-urb-buffers.patch
+tty-drop-tty-count-on-tty_reopen-failure.patch
--- /dev/null
+mm-vmstat.c-skip-nr_tlb_remote_flush-properly.patch
+fbdev-omapfb-fix-omapfb_memory_read-infoleak.patch
+x86-vdso-fix-asm-constraints-on-vdso-syscall-fallbacks.patch
+x86-vdso-fix-vdso-syscall-fallback-asm-constraint-regression.patch
+pci-reprogram-bridge-prefetch-registers-on-resume.patch
+mac80211-fix-setting-ieee80211_key_flag_rx_mgmt-for-ap-mode-keys.patch
+pm-core-clear-the-direct_complete-flag-on-errors.patch
+dm-cache-fix-resize-crash-if-user-doesn-t-reload-cache-table.patch
+xhci-add-missing-cas-workaround-for-intel-sunrise-point-xhci.patch
+usb-serial-simple-add-motorola-tetra-mtp6550-id.patch
--- /dev/null
+mm-vmstat.c-skip-nr_tlb_remote_flush-properly.patch
+fbdev-omapfb-fix-omapfb_memory_read-infoleak.patch
+xen-netback-fix-input-validation-in-xenvif_set_hash_mapping.patch
+x86-vdso-fix-asm-constraints-on-vdso-syscall-fallbacks.patch
+x86-vdso-fix-vdso-syscall-fallback-asm-constraint-regression.patch
+pci-reprogram-bridge-prefetch-registers-on-resume.patch
+mac80211-fix-setting-ieee80211_key_flag_rx_mgmt-for-ap-mode-keys.patch
+pm-core-clear-the-direct_complete-flag-on-errors.patch
+dm-cache-metadata-ignore-hints-array-being-too-small-during-resize.patch
+dm-cache-fix-resize-crash-if-user-doesn-t-reload-cache-table.patch
+xhci-add-missing-cas-workaround-for-intel-sunrise-point-xhci.patch
+usb-xhci-mtk-resume-usb3-roothub-first.patch
+usb-serial-simple-add-motorola-tetra-mtp6550-id.patch
+tty-drop-tty-count-on-tty_reopen-failure.patch