--- /dev/null
+From f183464684190bacbfb14623bd3e4e51b7575b4c Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 23 May 2018 10:56:32 -0700
+Subject: bdi: Move cgroup bdi_writeback to a dedicated low concurrency workqueue
+
+From: Tejun Heo <tj@kernel.org>
+
+commit f183464684190bacbfb14623bd3e4e51b7575b4c upstream.
+
+From 0aa2e9b921d6db71150633ff290199554f0842a8 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 23 May 2018 10:29:00 -0700
+
+cgwb_release() punts the actual release to cgwb_release_workfn() on
+system_wq. Depending on the number of cgroups or block devices, there
+can be a lot of cgwb_release_workfn() in flight at the same time.
+
+We're periodically seeing close to 256 kworkers getting stuck with the
+following stack trace and overtime the entire system gets stuck.
+
+ [<ffffffff810ee40c>] _synchronize_rcu_expedited.constprop.72+0x2fc/0x330
+ [<ffffffff810ee634>] synchronize_rcu_expedited+0x24/0x30
+ [<ffffffff811ccf23>] bdi_unregister+0x53/0x290
+ [<ffffffff811cd1e9>] release_bdi+0x89/0xc0
+ [<ffffffff811cd645>] wb_exit+0x85/0xa0
+ [<ffffffff811cdc84>] cgwb_release_workfn+0x54/0xb0
+ [<ffffffff810a68d0>] process_one_work+0x150/0x410
+ [<ffffffff810a71fd>] worker_thread+0x6d/0x520
+ [<ffffffff810ad3dc>] kthread+0x12c/0x160
+ [<ffffffff81969019>] ret_from_fork+0x29/0x40
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The events leading to the lockup are...
+
+1. A lot of cgwb_release_workfn() is queued at the same time and all
+ system_wq kworkers are assigned to execute them.
+
+2. They all end up calling synchronize_rcu_expedited(). One of them
+ wins and tries to perform the expedited synchronization.
+
+3. However, that invovles queueing rcu_exp_work to system_wq and
+ waiting for it. Because #1 is holding all available kworkers on
+ system_wq, rcu_exp_work can't be executed. cgwb_release_workfn()
+ is waiting for synchronize_rcu_expedited() which in turn is waiting
+ for cgwb_release_workfn() to free up some of the kworkers.
+
+We shouldn't be scheduling hundreds of cgwb_release_workfn() at the
+same time. There's nothing to be gained from that. This patch
+updates cgwb release path to use a dedicated percpu workqueue with
+@max_active of 1.
+
+While this resolves the problem at hand, it might be a good idea to
+isolate rcu_exp_work to its own workqueue too as it can be used from
+various paths and is prone to this sort of indirect A-A deadlocks.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/backing-dev.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -409,6 +409,7 @@ static void wb_exit(struct bdi_writeback
+ * protected.
+ */
+ static DEFINE_SPINLOCK(cgwb_lock);
++static struct workqueue_struct *cgwb_release_wq;
+
+ /**
+ * wb_congested_get_create - get or create a wb_congested
+@@ -519,7 +520,7 @@ static void cgwb_release(struct percpu_r
+ {
+ struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
+ refcnt);
+- schedule_work(&wb->release_work);
++ queue_work(cgwb_release_wq, &wb->release_work);
+ }
+
+ static void cgwb_kill(struct bdi_writeback *wb)
+@@ -783,6 +784,21 @@ static void cgwb_bdi_register(struct bac
+ spin_unlock_irq(&cgwb_lock);
+ }
+
++static int __init cgwb_init(void)
++{
++ /*
++ * There can be many concurrent release work items overwhelming
++ * system_wq. Put them in a separate wq and limit concurrency.
++ * There's no point in executing many of these in parallel.
++ */
++ cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
++ if (!cgwb_release_wq)
++ return -ENOMEM;
++
++ return 0;
++}
++subsys_initcall(cgwb_init);
++
+ #else /* CONFIG_CGROUP_WRITEBACK */
+
+ static int cgwb_bdi_init(struct backing_dev_info *bdi)
--- /dev/null
+From a347c7ad8edf4c5685154f3fdc3c12fc1db800ba Mon Sep 17 00:00:00 2001
+From: Roman Pen <roman.penyaev@profitbricks.com>
+Date: Sun, 10 Jun 2018 22:38:24 +0200
+Subject: blk-mq: reinit q->tag_set_list entry only after grace period
+
+From: Roman Pen <roman.penyaev@profitbricks.com>
+
+commit a347c7ad8edf4c5685154f3fdc3c12fc1db800ba upstream.
+
+It is not allowed to reinit q->tag_set_list list entry while RCU grace
+period has not completed yet, otherwise the following soft lockup in
+blk_mq_sched_restart() happens:
+
+[ 1064.252652] watchdog: BUG: soft lockup - CPU#12 stuck for 23s! [fio:9270]
+[ 1064.254445] task: ffff99b912e8b900 task.stack: ffffa6d54c758000
+[ 1064.254613] RIP: 0010:blk_mq_sched_restart+0x96/0x150
+[ 1064.256510] Call Trace:
+[ 1064.256664] <IRQ>
+[ 1064.256824] blk_mq_free_request+0xea/0x100
+[ 1064.256987] msg_io_conf+0x59/0xd0 [ibnbd_client]
+[ 1064.257175] complete_rdma_req+0xf2/0x230 [ibtrs_client]
+[ 1064.257340] ? ibtrs_post_recv_empty+0x4d/0x70 [ibtrs_core]
+[ 1064.257502] ibtrs_clt_rdma_done+0xd1/0x1e0 [ibtrs_client]
+[ 1064.257669] ib_create_qp+0x321/0x380 [ib_core]
+[ 1064.257841] ib_process_cq_direct+0xbd/0x120 [ib_core]
+[ 1064.258007] irq_poll_softirq+0xb7/0xe0
+[ 1064.258165] __do_softirq+0x106/0x2a2
+[ 1064.258328] irq_exit+0x92/0xa0
+[ 1064.258509] do_IRQ+0x4a/0xd0
+[ 1064.258660] common_interrupt+0x7a/0x7a
+[ 1064.258818] </IRQ>
+
+Meanwhile another context frees other queue but with the same set of
+shared tags:
+
+[ 1288.201183] INFO: task bash:5910 blocked for more than 180 seconds.
+[ 1288.201833] bash D 0 5910 5820 0x00000000
+[ 1288.202016] Call Trace:
+[ 1288.202315] schedule+0x32/0x80
+[ 1288.202462] schedule_timeout+0x1e5/0x380
+[ 1288.203838] wait_for_completion+0xb0/0x120
+[ 1288.204137] __wait_rcu_gp+0x125/0x160
+[ 1288.204287] synchronize_sched+0x6e/0x80
+[ 1288.204770] blk_mq_free_queue+0x74/0xe0
+[ 1288.204922] blk_cleanup_queue+0xc7/0x110
+[ 1288.205073] ibnbd_clt_unmap_device+0x1bc/0x280 [ibnbd_client]
+[ 1288.205389] ibnbd_clt_unmap_dev_store+0x169/0x1f0 [ibnbd_client]
+[ 1288.205548] kernfs_fop_write+0x109/0x180
+[ 1288.206328] vfs_write+0xb3/0x1a0
+[ 1288.206476] SyS_write+0x52/0xc0
+[ 1288.206624] do_syscall_64+0x68/0x1d0
+[ 1288.206774] entry_SYSCALL_64_after_hwframe+0x3d/0xa2
+
+What happened is the following:
+
+1. There are several MQ queues with shared tags.
+2. One queue is about to be freed and now task is in
+ blk_mq_del_queue_tag_set().
+3. Other CPU is in blk_mq_sched_restart() and loops over all queues in
+ tag list in order to find hctx to restart.
+
+Because linked list entry was modified in blk_mq_del_queue_tag_set()
+without proper waiting for a grace period, blk_mq_sched_restart()
+never ends, spining in list_for_each_entry_rcu_rr(), thus soft lockup.
+
+Fix is simple: reinit list entry after an RCU grace period elapsed.
+
+Fixes: Fixes: 705cda97ee3a ("blk-mq: Make it safe to use RCU to iterate over blk_mq_tag_set.tag_list")
+Cc: stable@vger.kernel.org
+Cc: Sagi Grimberg <sagi@grimberg.me>
+Cc: linux-block@vger.kernel.org
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
+Signed-off-by: Roman Pen <roman.penyaev@profitbricks.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-mq.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2252,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(str
+
+ mutex_lock(&set->tag_list_lock);
+ list_del_rcu(&q->tag_set_list);
+- INIT_LIST_HEAD(&q->tag_set_list);
+ if (list_is_singular(&set->tag_list)) {
+ /* just transitioned to unshared */
+ set->flags &= ~BLK_MQ_F_TAG_SHARED;
+@@ -2260,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(str
+ blk_mq_update_tag_set_depth(set, false);
+ }
+ mutex_unlock(&set->tag_list_lock);
+-
+ synchronize_rcu();
++ INIT_LIST_HEAD(&q->tag_set_list);
+ }
+
+ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
--- /dev/null
+From d81243c697ffc71f983736e7da2db31a8be0001f Mon Sep 17 00:00:00 2001
+From: Mark Syms <mark.syms@citrix.com>
+Date: Thu, 24 May 2018 09:47:31 +0100
+Subject: CIFS: 511c54a2f69195b28afb9dd119f03787b1625bb4 adds a check for session expiry
+
+From: Mark Syms <mark.syms@citrix.com>
+
+commit d81243c697ffc71f983736e7da2db31a8be0001f upstream.
+
+Handle this additional status in the same way as SESSION_EXPIRED.
+
+Signed-off-by: Mark Syms <mark.syms@citrix.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2ops.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1256,10 +1256,11 @@ smb2_is_session_expired(char *buf)
+ {
+ struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
+
+- if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
++ if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
++ shdr->Status != STATUS_USER_SESSION_DELETED)
+ return false;
+
+- cifs_dbg(FYI, "Session expired\n");
++ cifs_dbg(FYI, "Session expired or deleted\n");
+ return true;
+ }
+
--- /dev/null
+From ee25c6dd7b05113783ce1f4fab6b30fc00d29b8d Mon Sep 17 00:00:00 2001
+From: Shirish Pargaonkar <shirishpargaonkar@gmail.com>
+Date: Mon, 4 Jun 2018 06:46:22 -0500
+Subject: cifs: For SMB2 security informaion query, check for minimum sized security descriptor instead of sizeof FileAllInformation class
+
+From: Shirish Pargaonkar <shirishpargaonkar@gmail.com>
+
+commit ee25c6dd7b05113783ce1f4fab6b30fc00d29b8d upstream.
+
+Validate_buf () function checks for an expected minimum sized response
+passed to query_info() function.
+For security information, the size of a security descriptor can be
+smaller (one subauthority, no ACEs) than the size of the structure
+that defines FileInfoClass of FileAllInformation.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199725
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shirish Pargaonkar <shirishpargaonkar@gmail.com>
+Reviewed-by: Noah Morrison <noah.morrison@rubrik.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsacl.h | 14 ++++++++++++++
+ fs/cifs/smb2pdu.c | 3 +--
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/cifsacl.h
++++ b/fs/cifs/cifsacl.h
+@@ -98,4 +98,18 @@ struct cifs_ace {
+ struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
+ } __attribute__((packed));
+
++/*
++ * Minimum security identifier can be one for system defined Users
++ * and Groups such as NULL SID and World or Built-in accounts such
++ * as Administrator and Guest and consists of
++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
++ */
++#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */
++
++/*
++ * Minimum security descriptor can be one without any SACL and DACL and can
++ * consist of revision, type, and two sids of minimum size for owner and group
++ */
++#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
++
+ #endif /* _CIFSACL_H */
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2279,8 +2279,7 @@ SMB2_query_acl(const unsigned int xid, s
+
+ return query_info(xid, tcon, persistent_fid, volatile_fid,
+ 0, SMB2_O_INFO_SECURITY, additional_info,
+- SMB2_MAX_BUFFER_SIZE,
+- sizeof(struct smb2_file_all_info), data, plen);
++ SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
+ }
+
+ int
--- /dev/null
+From c7d1f119c48f64bebf0fa1e326af577c6152fe30 Mon Sep 17 00:00:00 2001
+From: Tao Wang <kevin.wangtao@hisilicon.com>
+Date: Sat, 26 May 2018 15:16:48 +0800
+Subject: cpufreq: Fix new policy initialization during limits updates via sysfs
+
+From: Tao Wang <kevin.wangtao@hisilicon.com>
+
+commit c7d1f119c48f64bebf0fa1e326af577c6152fe30 upstream.
+
+If the policy limits are updated via cpufreq_update_policy() and
+subsequently via sysfs, the limits stored in user_policy may be
+set incorrectly.
+
+For example, if both min and max are set via sysfs to the maximum
+available frequency, user_policy.min and user_policy.max will also
+be the maximum. If a policy notifier triggered by
+cpufreq_update_policy() lowers both the min and the max at this
+point, that change is not reflected by the user_policy limits, so
+if the max is updated again via sysfs to the same lower value,
+then user_policy.max will be lower than user_policy.min which
+shouldn't happen. In particular, if one of the policy CPUs is
+then taken offline and back online, cpufreq_set_policy() will
+fail for it due to a failing limits check.
+
+To prevent that from happening, initialize the min and max fields
+of the new_policy object to the ones stored in user_policy that
+were previously set via sysfs.
+
+Signed-off-by: Kevin Wangtao <kevin.wangtao@hisilicon.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+[ rjw: Subject & changelog ]
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -693,6 +693,8 @@ static ssize_t store_##file_name \
+ struct cpufreq_policy new_policy; \
+ \
+ memcpy(&new_policy, policy, sizeof(*policy)); \
++ new_policy.min = policy->user_policy.min; \
++ new_policy.max = policy->user_policy.max; \
+ \
+ ret = sscanf(buf, "%u", &new_policy.object); \
+ if (ret != 1) \
--- /dev/null
+From 7592019634f8473f0b0973ce79297183077bdbc2 Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 8 Jun 2018 09:07:33 +0800
+Subject: cpufreq: governors: Fix long idle detection logic in load calculation
+
+From: Chen Yu <yu.c.chen@intel.com>
+
+commit 7592019634f8473f0b0973ce79297183077bdbc2 upstream.
+
+According to current code implementation, detecting the long
+idle period is done by checking if the interval between two
+adjacent utilization update handlers is long enough. Although
+this mechanism can detect if the idle period is long enough
+(no utilization hooks invoked during idle period), it might
+not cover a corner case: if the task has occupied the CPU
+for too long which causes no context switches during that
+period, then no utilization handler will be launched until this
+high prio task is scheduled out. As a result, the idle_periods
+field might be calculated incorrectly because it regards the
+100% load as 0% and makes the conservative governor who uses
+this field confusing.
+
+Change the detection to compare the idle_time with sampling_rate
+directly.
+
+Reported-by: Artem S. Tashkinov <t.artem@mailcity.com>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq_governor.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_p
+ * calls, so the previous load value can be used then.
+ */
+ load = j_cdbs->prev_load;
+- } else if (unlikely(time_elapsed > 2 * sampling_rate &&
++ } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ j_cdbs->prev_load)) {
+ /*
+ * If the CPU had gone completely idle and a task has
+@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_p
+ * clear prev_load to guarantee that the load will be
+ * computed again next time.
+ *
+- * Detecting this situation is easy: the governor's
+- * utilization update handler would not have run during
+- * CPU-idle periods. Hence, an unusually large
+- * 'time_elapsed' (as compared to the sampling rate)
++ * Detecting this situation is easy: an unusually large
++ * 'idle_time' (as compared to the sampling rate)
+ * indicates this scenario.
+ */
+ load = j_cdbs->prev_load;
+@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_p
+ j_cdbs->prev_load = load;
+ }
+
+- if (time_elapsed > 2 * sampling_rate) {
+- unsigned int periods = time_elapsed / sampling_rate;
++ if (unlikely((int)idle_time > 2 * sampling_rate)) {
++ unsigned int periods = idle_time / sampling_rate;
+
+ if (periods < idle_periods)
+ idle_periods = periods;
--- /dev/null
+From 2cfce3a86b64b53f0a70e92a6a659c720c319b45 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Thu, 31 May 2018 13:21:07 +0200
+Subject: libata: Drop SanDisk SD7UB3Q*G1001 NOLPM quirk
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 2cfce3a86b64b53f0a70e92a6a659c720c319b45 upstream.
+
+Commit 184add2ca23c ("libata: Apply NOLPM quirk for SanDisk
+SD7UB3Q*G1001 SSDs") disabled LPM for SanDisk SD7UB3Q*G1001 SSDs.
+
+This has lead to several reports of users of that SSD where LPM
+was working fine and who know have a significantly increased idle
+power consumption on their laptops.
+
+Likely there is another problem on the T450s from the original
+reporter which gets exposed by the uncore reaching deeper sleep
+states (higher PC-states) due to LPM being enabled. The problem as
+reported, a hardfreeze about once a day, already did not sound like
+it would be caused by LPM and the reports of the SSD working fine
+confirm this. The original reporter is ok with dropping the quirk.
+
+A X250 user has reported the same hard freeze problem and for him
+the problem went away after unrelated updates, I suspect some GPU
+driver stack changes fixed things.
+
+TL;DR: The original reporters problem were triggered by LPM but not
+an LPM issue, so drop the quirk for the SSD in question.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1583207
+Cc: stable@vger.kernel.org
+Cc: Richard W.M. Jones <rjones@redhat.com>
+Cc: Lorenzo Dalrio <lorenzo.dalrio@gmail.com>
+Reported-by: Lorenzo Dalrio <lorenzo.dalrio@gmail.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Acked-by: "Richard W.M. Jones" <rjones@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-core.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4543,9 +4543,6 @@ static const struct ata_blacklist_entry
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NOLPM, },
+
+- /* Sandisk devices which are known to not handle LPM well */
+- { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+-
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
--- /dev/null
+From 18c9a99bce2a57dfd7e881658703b5d7469cc7b9 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 29 May 2018 12:13:24 +0300
+Subject: libata: zpodd: small read overflow in eject_tray()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 18c9a99bce2a57dfd7e881658703b5d7469cc7b9 upstream.
+
+We read from the cdb[] buffer in ata_exec_internal_sg(). It has to be
+ATAPI_CDB_LEN (16) bytes long, but this buffer is only 12 bytes.
+
+Fixes: 213342053db5 ("libata: handle power transition of ODD")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-zpodd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-zpodd.c
++++ b/drivers/ata/libata-zpodd.c
+@@ -35,7 +35,7 @@ struct zpodd {
+ static int eject_tray(struct ata_device *dev)
+ {
+ struct ata_taskfile tf;
+- static const char cdb[] = { GPCMD_START_STOP_UNIT,
++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
+ 0, 0, 0,
+ 0x02, /* LoEj */
+ 0, 0, 0, 0, 0, 0, 0,
--- /dev/null
+From 8364da4751cf22201d74933d5e634176f44ed407 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Wed, 16 May 2018 14:51:17 -0400
+Subject: nbd: fix nbd device deletion
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit 8364da4751cf22201d74933d5e634176f44ed407 upstream.
+
+This fixes a use after free bug, we shouldn't be doing disk->queue right
+after we do del_gendisk(disk). Save the queue and do the cleanup after
+the del_gendisk.
+
+Fixes: c6a4759ea0c9 ("nbd: add device refcounting")
+cc: stable@vger.kernel.org
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -173,9 +173,12 @@ static const struct device_attribute pid
+ static void nbd_dev_remove(struct nbd_device *nbd)
+ {
+ struct gendisk *disk = nbd->disk;
++ struct request_queue *q;
++
+ if (disk) {
++ q = disk->queue;
+ del_gendisk(disk);
+- blk_cleanup_queue(disk->queue);
++ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&nbd->tag_set);
+ disk->private_data = NULL;
+ put_disk(disk);
--- /dev/null
+From c3f7c9397609705ef848cc98a5fb429b3e90c3c4 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Wed, 16 May 2018 14:51:18 -0400
+Subject: nbd: update size when connected
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit c3f7c9397609705ef848cc98a5fb429b3e90c3c4 upstream.
+
+I messed up changing the size of an NBD device while it was connected by
+not actually updating the device or doing the uevent. Fix this by
+updating everything if we're connected and we change the size.
+
+cc: stable@vger.kernel.org
+Fixes: 639812a ("nbd: don't set the device size until we're connected")
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -246,6 +246,8 @@ static void nbd_size_set(struct nbd_devi
+ struct nbd_config *config = nbd->config;
+ config->blksize = blocksize;
+ config->bytesize = blocksize * nr_blocks;
++ if (nbd->task_recv != NULL)
++ nbd_size_update(nbd);
+ }
+
+ static void nbd_complete_rq(struct request *req)
--- /dev/null
+From 9e2b19675d1338d2a38e99194756f2db44a081df Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fb.com>
+Date: Wed, 16 May 2018 14:51:19 -0400
+Subject: nbd: use bd_set_size when updating disk size
+
+From: Josef Bacik <jbacik@fb.com>
+
+commit 9e2b19675d1338d2a38e99194756f2db44a081df upstream.
+
+When we stopped relying on the bdev everywhere I broke updating the
+block device size on the fly, which ceph relies on. We can't just do
+set_capacity, we also have to do bd_set_size so things like parted will
+notice the device size change.
+
+Fixes: 29eaadc ("nbd: stop using the bdev everywhere")
+cc: stable@vger.kernel.org
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -234,9 +234,18 @@ static void nbd_size_clear(struct nbd_de
+ static void nbd_size_update(struct nbd_device *nbd)
+ {
+ struct nbd_config *config = nbd->config;
++ struct block_device *bdev = bdget_disk(nbd->disk, 0);
++
+ blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
+ blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
+ set_capacity(nbd->disk, config->bytesize >> 9);
++ if (bdev) {
++ if (bdev->bd_disk)
++ bd_set_size(bdev, config->bytesize);
++ else
++ bdev->bd_invalidated = 1;
++ bdput(bdev);
++ }
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+ }
+
+@@ -1114,7 +1123,6 @@ static int nbd_start_device_ioctl(struct
+ if (ret)
+ return ret;
+
+- bd_set_size(bdev, config->bytesize);
+ if (max_part)
+ bdev->bd_invalidated = 1;
+ mutex_unlock(&nbd->config_lock);
--- /dev/null
+From cc1d5e749a2e1cf59fa940b976181e631d6985e1 Mon Sep 17 00:00:00 2001
+From: Keith Busch <keith.busch@intel.com>
+Date: Thu, 10 May 2018 08:34:20 -0600
+Subject: nvme/pci: Sync controller reset for AER slot_reset
+
+From: Keith Busch <keith.busch@intel.com>
+
+commit cc1d5e749a2e1cf59fa940b976181e631d6985e1 upstream.
+
+AER handling expects a successful return from slot_reset means the
+driver made the device functional again. The nvme driver had been using
+an asynchronous reset to recover the device, so the device
+may still be initializing after control is returned to the
+AER handler. This creates problems for subsequent event handling,
+causing the initializion to fail.
+
+This patch fixes that by syncing the controller reset before returning
+to the AER driver, and reporting the true state of the reset.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=199657
+Reported-by: Alex Gagniuc <mr.nuke.me@gmail.com>
+Cc: Sinan Kaya <okaya@codeaurora.org>
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Tested-by: Alex Gagniuc <mr.nuke.me@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2508,8 +2508,15 @@ static pci_ers_result_t nvme_slot_reset(
+
+ dev_info(dev->ctrl.device, "restart after slot reset\n");
+ pci_restore_state(pdev);
+- nvme_reset_ctrl(&dev->ctrl);
+- return PCI_ERS_RESULT_RECOVERED;
++ nvme_reset_ctrl_sync(&dev->ctrl);
++
++ switch (dev->ctrl.state) {
++ case NVME_CTRL_LIVE:
++ case NVME_CTRL_ADMIN_ONLY:
++ return PCI_ERS_RESULT_RECOVERED;
++ default:
++ return PCI_ERS_RESULT_DISCONNECT;
++ }
+ }
+
+ static void nvme_error_resume(struct pci_dev *pdev)
alsa-hda-add-dock-and-led-support-for-hp-elitebook-830-g5.patch
alsa-hda-add-dock-and-led-support-for-hp-probook-640-g4.patch
x86-mce-fix-stack-out-of-bounds-write-in-mce-inject.c-flags_read.patch
+smb3-fix-various-xid-leaks.patch
+smb3-on-reconnect-set-previoussessionid-field.patch
+cifs-511c54a2f69195b28afb9dd119f03787b1625bb4-adds-a-check-for-session-expiry.patch
+cifs-for-smb2-security-informaion-query-check-for-minimum-sized-security-descriptor-instead-of-sizeof-fileallinformation-class.patch
+nbd-fix-nbd-device-deletion.patch
+nbd-update-size-when-connected.patch
+nbd-use-bd_set_size-when-updating-disk-size.patch
+blk-mq-reinit-q-tag_set_list-entry-only-after-grace-period.patch
+bdi-move-cgroup-bdi_writeback-to-a-dedicated-low-concurrency-workqueue.patch
+cpufreq-fix-new-policy-initialization-during-limits-updates-via-sysfs.patch
+cpufreq-governors-fix-long-idle-detection-logic-in-load-calculation.patch
+libata-zpodd-small-read-overflow-in-eject_tray.patch
+libata-drop-sandisk-sd7ub3q-g1001-nolpm-quirk.patch
+nvme-pci-sync-controller-reset-for-aer-slot_reset.patch
+w1-mxc_w1-enable-clock-before-calling-clk_get_rate-on-it.patch
+x86-intel_rdt-enable-cmt-and-mbm-on-new-skylake-stepping.patch
--- /dev/null
+From cfe89091644c441a1ade6dae6d2e47b715648615 Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Sat, 19 May 2018 02:04:55 -0500
+Subject: smb3: fix various xid leaks
+
+From: Steve French <stfrench@microsoft.com>
+
+commit cfe89091644c441a1ade6dae6d2e47b715648615 upstream.
+
+Fix a few cases where we were not freeing the xid which led to
+active requests being non-zero at unmount time.
+
+Signed-off-by: Steve French <smfrench@gmail.com>
+CC: Stable <stable@vger.kernel.org>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2ops.c | 63 +++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 44 insertions(+), 19 deletions(-)
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1571,8 +1571,11 @@ get_smb2_acl_by_path(struct cifs_sb_info
+ oparms.create_options = 0;
+
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+- if (!utf16_path)
+- return ERR_PTR(-ENOMEM);
++ if (!utf16_path) {
++ rc = -ENOMEM;
++ free_xid(xid);
++ return ERR_PTR(rc);
++ }
+
+ oparms.tcon = tcon;
+ oparms.desired_access = READ_CONTROL;
+@@ -1630,8 +1633,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, _
+ access_flags = WRITE_DAC;
+
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+- if (!utf16_path)
+- return -ENOMEM;
++ if (!utf16_path) {
++ rc = -ENOMEM;
++ free_xid(xid);
++ return rc;
++ }
+
+ oparms.tcon = tcon;
+ oparms.desired_access = access_flags;
+@@ -1691,15 +1697,21 @@ static long smb3_zero_range(struct file
+
+ /* if file not oplocked can't be sure whether asking to extend size */
+ if (!CIFS_CACHE_READ(cifsi))
+- if (keep_size == false)
+- return -EOPNOTSUPP;
++ if (keep_size == false) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ /*
+ * Must check if file sparse since fallocate -z (zero range) assumes
+ * non-sparse allocation
+ */
+- if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
+- return -EOPNOTSUPP;
++ if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ /*
+ * need to make sure we are not asked to extend the file since the SMB3
+@@ -1708,8 +1720,11 @@ static long smb3_zero_range(struct file
+ * which for a non sparse file would zero the newly extended range
+ */
+ if (keep_size == false)
+- if (i_size_read(inode) < offset + len)
+- return -EOPNOTSUPP;
++ if (i_size_read(inode) < offset + len) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+@@ -1743,8 +1758,11 @@ static long smb3_punch_hole(struct file
+
+ /* Need to make file sparse, if not already, before freeing range. */
+ /* Consider adding equivalent for compressed since it could also work */
+- if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
+- return -EOPNOTSUPP;
++ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+
+@@ -1776,8 +1794,10 @@ static long smb3_simple_falloc(struct fi
+
+ /* if file not oplocked can't be sure whether asking to extend size */
+ if (!CIFS_CACHE_READ(cifsi))
+- if (keep_size == false)
+- return -EOPNOTSUPP;
++ if (keep_size == false) {
++ free_xid(xid);
++ return rc;
++ }
+
+ /*
+ * Files are non-sparse by default so falloc may be a no-op
+@@ -1786,14 +1806,16 @@ static long smb3_simple_falloc(struct fi
+ */
+ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
+ if (keep_size == true)
+- return 0;
++ rc = 0;
+ /* check if extending file */
+ else if (i_size_read(inode) >= off + len)
+ /* not extending file and already not sparse */
+- return 0;
++ rc = 0;
+ /* BB: in future add else clause to extend file */
+ else
+- return -EOPNOTSUPP;
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
+ }
+
+ if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
+@@ -1805,8 +1827,11 @@ static long smb3_simple_falloc(struct fi
+ * ie potentially making a few extra pages at the beginning
+ * or end of the file non-sparse via set_sparse is harmless.
+ */
+- if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
+- return -EOPNOTSUPP;
++ if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
+ }
--- /dev/null
+From b2adf22fdfba85a6701c481faccdbbb3a418ccfc Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Thu, 31 May 2018 15:19:25 -0500
+Subject: smb3: on reconnect set PreviousSessionId field
+
+From: Steve French <stfrench@microsoft.com>
+
+commit b2adf22fdfba85a6701c481faccdbbb3a418ccfc upstream.
+
+The server detects reconnect by the (non-zero) value in PreviousSessionId
+of SMB2/SMB3 SessionSetup request, but this behavior regressed due
+to commit 166cea4dc3a4f66f020cfb9286225ecd228ab61d
+("SMB2: Separate RawNTLMSSP authentication from SMB2_sess_setup")
+
+CC: Stable <stable@vger.kernel.org>
+CC: Sachin Prabhu <sprabhu@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1182,6 +1182,7 @@ SMB2_sess_setup(const unsigned int xid,
+ sess_data->ses = ses;
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+ sess_data->nls_cp = (struct nls_table *) nls_cp;
++ sess_data->previous_session = ses->Suid;
+
+ while (sess_data->func)
+ sess_data->func(sess_data);
--- /dev/null
+From 955bc61328dc0a297fb3baccd84e9d3aee501ed8 Mon Sep 17 00:00:00 2001
+From: Stefan Potyra <Stefan.Potyra@elektrobit.com>
+Date: Wed, 2 May 2018 10:55:31 +0200
+Subject: w1: mxc_w1: Enable clock before calling clk_get_rate() on it
+
+From: Stefan Potyra <Stefan.Potyra@elektrobit.com>
+
+commit 955bc61328dc0a297fb3baccd84e9d3aee501ed8 upstream.
+
+According to the API, you may only call clk_get_rate() after actually
+enabling it.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Fixes: a5fd9139f74c ("w1: add 1-wire master driver for i.MX27 / i.MX31")
+Signed-off-by: Stefan Potyra <Stefan.Potyra@elektrobit.com>
+Acked-by: Evgeniy Polyakov <zbr@ioremap.net>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/w1/masters/mxc_w1.c | 20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/drivers/w1/masters/mxc_w1.c
++++ b/drivers/w1/masters/mxc_w1.c
+@@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_
+ if (IS_ERR(mdev->clk))
+ return PTR_ERR(mdev->clk);
+
++ err = clk_prepare_enable(mdev->clk);
++ if (err)
++ return err;
++
+ clkrate = clk_get_rate(mdev->clk);
+ if (clkrate < 10000000)
+ dev_warn(&pdev->dev,
+@@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->regs = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(mdev->regs))
+- return PTR_ERR(mdev->regs);
+-
+- err = clk_prepare_enable(mdev->clk);
+- if (err)
+- return err;
++ if (IS_ERR(mdev->regs)) {
++ err = PTR_ERR(mdev->regs);
++ goto out_disable_clk;
++ }
+
+ /* Software reset 1-Wire module */
+ writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
+@@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_
+
+ err = w1_add_master_device(&mdev->bus_master);
+ if (err)
+- clk_disable_unprepare(mdev->clk);
++ goto out_disable_clk;
++
++ return 0;
+
++out_disable_clk:
++ clk_disable_unprepare(mdev->clk);
+ return err;
+ }
+
--- /dev/null
+From 1d9f3e20a56d33e55748552aeec597f58542f92d Mon Sep 17 00:00:00 2001
+From: Tony Luck <tony.luck@intel.com>
+Date: Fri, 8 Jun 2018 09:07:32 -0700
+Subject: x86/intel_rdt: Enable CMT and MBM on new Skylake stepping
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit 1d9f3e20a56d33e55748552aeec597f58542f92d upstream.
+
+New stepping of Skylake has fixes for cache occupancy and memory
+bandwidth monitoring.
+
+Update the code to enable these by default on newer steppings.
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: stable@vger.kernel.org # v4.14
+Cc: Vikas Shivappa <vikas.shivappa@linux.intel.com>
+Link: https://lkml.kernel.org/r/20180608160732.9842-1-tony.luck@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -773,6 +773,8 @@ static __init void rdt_quirks(void)
+ case INTEL_FAM6_SKYLAKE_X:
+ if (boot_cpu_data.x86_stepping <= 4)
+ set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
++ else
++ set_rdt_options("!l3cat");
+ }
+ }
+