--- /dev/null
+From bbb427e342495df1cda10051d0566388697499c0 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+Date: Thu, 29 Sep 2016 08:33:30 -0700
+Subject: blkcg: Unlock blkcg_pol_mutex only once when cpd == NULL
+
+From: Bart Van Assche <bart.vanassche@sandisk.com>
+
+commit bbb427e342495df1cda10051d0566388697499c0 upstream.
+
+Unlocking a mutex twice is wrong. Hence modify blkcg_policy_register()
+such that blkcg_pol_mutex is unlocked once if cpd == NULL. This patch
+avoids that smatch reports the following error:
+
+block/blk-cgroup.c:1378: blkcg_policy_register() error: double unlock 'mutex:&blkcg_pol_mutex'
+
+Fixes: 06b285bd1125 ("blkcg: fix blkcg_policy_data allocation bug")
+Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
+Cc: Tejun Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-cgroup.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1340,10 +1340,8 @@ int blkcg_policy_register(struct blkcg_p
+ struct blkcg_policy_data *cpd;
+
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
+- if (!cpd) {
+- mutex_unlock(&blkcg_pol_mutex);
++ if (!cpd)
+ goto err_free_cpds;
+- }
+
+ blkcg->cpd[pol->plid] = cpd;
+ cpd->blkcg = blkcg;
--- /dev/null
+From 7d414f396c91a3382e51cf628c1cf0709ad0188b Mon Sep 17 00:00:00 2001
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Tue, 20 Sep 2016 13:37:13 +0100
+Subject: cifs: Limit the overall credit acquired
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+commit 7d414f396c91a3382e51cf628c1cf0709ad0188b upstream.
+
+The kernel client requests 2 credits for many operations even though
+they only use 1 credit (presumably to build up a buffer of credit).
+Some servers seem to give the client as much credit as is requested. In
+this case, the amount of credit the client has continues increasing to
+the point where (server->credits * MAX_BUFFER_SIZE) overflows in
+smb2_wait_mtu_credits().
+
+Fix this by throttling the credit requests if an set limit is reached.
+For async requests where the credit charge may be > 1, request as much
+credit as what is charged.
+The limit is chosen somewhat arbitrarily. The Windows client
+defaults to 128 credits, the Windows server allows clients up to
+512 credits (or 8192 for Windows 2016), and the NetApp server
+(and at least one other) does not limit clients at all.
+Choose a high enough value such that the client shouldn't limit
+performance.
+
+This behavior was seen with a NetApp filer (NetApp Release 9.0RC2).
+
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2glob.h | 10 ++++++++++
+ fs/cifs/smb2pdu.c | 18 +++++++++++++++++-
+ 2 files changed, 27 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -61,4 +61,14 @@
+ /* Maximum buffer size value we can send with 1 credit */
+ #define SMB2_MAX_BUFFER_SIZE 65536
+
++/*
++ * Maximum number of credits to keep available.
++ * This value is chosen somewhat arbitrarily. The Windows client
++ * defaults to 128 credits, the Windows server allows clients up to
++ * 512 credits, and the NetApp server does not limit clients at all.
++ * Choose a high enough value such that the client shouldn't limit
++ * performance.
++ */
++#define SMB2_MAX_CREDITS_AVAILABLE 32000
++
+ #endif /* _SMB2_GLOB_H */
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -100,7 +100,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr,
+ hdr->ProtocolId = SMB2_PROTO_NUMBER;
+ hdr->StructureSize = cpu_to_le16(64);
+ hdr->Command = smb2_cmd;
+- hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
++ if (tcon && tcon->ses && tcon->ses->server) {
++ struct TCP_Server_Info *server = tcon->ses->server;
++
++ spin_lock(&server->req_lock);
++ /* Request up to 2 credits but don't go over the limit. */
++ if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
++ hdr->CreditRequest = cpu_to_le16(0);
++ else
++ hdr->CreditRequest = cpu_to_le16(
++ min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
++ server->credits, 2));
++ spin_unlock(&server->req_lock);
++ } else {
++ hdr->CreditRequest = cpu_to_le16(2);
++ }
+ hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
+
+ if (!tcon)
+@@ -2057,6 +2071,7 @@ smb2_async_readv(struct cifs_readdata *r
+ if (rdata->credits) {
+ buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
++ buf->CreditRequest = buf->CreditCharge;
+ spin_lock(&server->req_lock);
+ server->credits += rdata->credits -
+ le16_to_cpu(buf->CreditCharge);
+@@ -2243,6 +2258,7 @@ smb2_async_writev(struct cifs_writedata
+ if (wdata->credits) {
+ req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ SMB2_MAX_BUFFER_SIZE));
++ req->hdr.CreditRequest = req->hdr.CreditCharge;
+ spin_lock(&server->req_lock);
+ server->credits += wdata->credits -
+ le16_to_cpu(req->hdr.CreditCharge);
--- /dev/null
+From 3afca265b5f53a0b15b79531c13858049505582d Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Thu, 22 Sep 2016 18:58:16 -0500
+Subject: Clarify locking of cifs file and tcon structures and make more granular
+
+From: Steve French <smfrench@gmail.com>
+
+commit 3afca265b5f53a0b15b79531c13858049505582d upstream.
+
+Remove the global file_list_lock to simplify cifs/smb3 locking and
+have spinlocks that more closely match the information they are
+protecting.
+
+Add new tcon->open_file_lock and file->file_info_lock spinlocks.
+Locks continue to follow a heirachy,
+ cifs_socket --> cifs_ses --> cifs_tcon --> cifs_file
+where global tcp_ses_lock still protects socket and cifs_ses, while the
+the newer locks protect the lower level structure's information
+(tcon and cifs_file respectively).
+
+Signed-off-by: Steve French <steve.french@primarydata.com>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Reviewed-by: Germano Percossi <germano.percossi@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsfs.c | 1
+ fs/cifs/cifsglob.h | 30 ++++++++++++------------
+ fs/cifs/cifssmb.c | 4 +--
+ fs/cifs/file.c | 66 +++++++++++++++++++++++++++++++----------------------
+ fs/cifs/misc.c | 15 ++++++------
+ fs/cifs/readdir.c | 6 ++--
+ fs/cifs/smb2misc.c | 16 ++++++------
+ 7 files changed, 75 insertions(+), 63 deletions(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1271,7 +1271,6 @@ init_cifs(void)
+ GlobalTotalActiveXid = 0;
+ GlobalMaxActiveXid = 0;
+ spin_lock_init(&cifs_tcp_ses_lock);
+- spin_lock_init(&cifs_file_list_lock);
+ spin_lock_init(&GlobalMid_Lock);
+
+ get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -833,6 +833,7 @@ struct cifs_tcon {
+ struct list_head tcon_list;
+ int tc_count;
+ struct list_head openFileList;
++ spinlock_t open_file_lock; /* protects list above */
+ struct cifs_ses *ses; /* pointer to session associated with */
+ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+ char *nativeFileSystem;
+@@ -889,7 +890,7 @@ struct cifs_tcon {
+ #endif /* CONFIG_CIFS_STATS2 */
+ __u64 bytes_read;
+ __u64 bytes_written;
+- spinlock_t stat_lock;
++ spinlock_t stat_lock; /* protects the two fields above */
+ #endif /* CONFIG_CIFS_STATS */
+ FILE_SYSTEM_DEVICE_INFO fsDevInfo;
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
+@@ -1040,8 +1041,10 @@ struct cifs_fid_locks {
+ };
+
+ struct cifsFileInfo {
++ /* following two lists are protected by tcon->open_file_lock */
+ struct list_head tlist; /* pointer to next fid owned by tcon */
+ struct list_head flist; /* next fid (file instance) for this inode */
++ /* lock list below protected by cifsi->lock_sem */
+ struct cifs_fid_locks *llist; /* brlocks held by this fid */
+ kuid_t uid; /* allows finding which FileInfo structure */
+ __u32 pid; /* process id who opened file */
+@@ -1049,11 +1052,12 @@ struct cifsFileInfo {
+ /* BB add lock scope info here if needed */ ;
+ /* lock scope id (0 if none) */
+ struct dentry *dentry;
+- unsigned int f_flags;
+ struct tcon_link *tlink;
++ unsigned int f_flags;
+ bool invalidHandle:1; /* file closed via session abend */
+ bool oplock_break_cancelled:1;
+- int count; /* refcount protected by cifs_file_list_lock */
++ int count;
++ spinlock_t file_info_lock; /* protects four flag/count fields above */
+ struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+ struct cifs_search_info srch_inf;
+ struct work_struct oplock_break; /* work for oplock breaks */
+@@ -1120,7 +1124,7 @@ struct cifs_writedata {
+
+ /*
+ * Take a reference on the file private data. Must be called with
+- * cifs_file_list_lock held.
++ * cfile->file_info_lock held.
+ */
+ static inline void
+ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+@@ -1514,8 +1518,10 @@ require use of the stronger protocol */
+ * GlobalMid_Lock protects:
+ * list operations on pending_mid_q and oplockQ
+ * updates to XID counters, multiplex id and SMB sequence numbers
+- * cifs_file_list_lock protects:
+- * list operations on tcp and SMB session lists and tCon lists
++ * tcp_ses_lock protects:
++ * list operations on tcp and SMB session lists
++ * tcon->open_file_lock protects the list of open files hanging off the tcon
++ * cfile->file_info_lock protects counters and fields in cifs file struct
+ * f_owner.lock protects certain per file struct operations
+ * mapping->page_lock protects certain per page operations
+ *
+@@ -1547,18 +1553,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp
+ * tcp session, and the list of tcon's per smb session. It also protects
+ * the reference counters for the server, smb session, and tcon. Finally,
+ * changes to the tcon->tidStatus should be done while holding this lock.
++ * generally the locks should be taken in order tcp_ses_lock before
++ * tcon->open_file_lock and that before file->file_info_lock since the
++ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
+ */
+ GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
+
+-/*
+- * This lock protects the cifs_file->llist and cifs_file->flist
+- * list operations, and updates to some flags (cifs_file->invalidHandle)
+- * It will be moved to either use the tcon->stat_lock or equivalent later.
+- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
+- * the cifs_tcp_ses_lock must be grabbed first and released last.
+- */
+-GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
+-
+ #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
+ /* Outstanding dir notify requests */
+ GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs
+ struct list_head *tmp1;
+
+ /* list all files open on tree connection and mark them invalid */
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
+ open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ open_file->invalidHandle = true;
+ open_file->oplock_break_cancelled = true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ /*
+ * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ * to this tcon.
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid,
+ cfile->tlink = cifs_get_tlink(tlink);
+ INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ mutex_init(&cfile->fh_mutex);
++ spin_lock_init(&cfile->file_info_lock);
+
+ cifs_sb_active(inode->i_sb);
+
+@@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid,
+ oplock = 0;
+ }
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
+ oplock = fid->pending_open->oplock;
+ list_del(&fid->pending_open->olist);
+@@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid,
+ server->ops->set_fid(cfile, fid, oplock);
+
+ list_add(&cfile->tlist, &tcon->openFileList);
++
+ /* if readable file instance put first in list*/
+ if (file->f_mode & FMODE_READ)
+ list_add(&cfile->flist, &cinode->openFileList);
+ else
+ list_add_tail(&cfile->flist, &cinode->openFileList);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+
+ if (fid->purge_cache)
+ cifs_zap_mapping(inode);
+@@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid,
+ struct cifsFileInfo *
+ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cifs_file->file_info_lock);
+ cifsFileInfo_get_locked(cifs_file);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cifs_file->file_info_lock);
+ return cifs_file;
+ }
+
+ /*
+ * Release a reference on the file private data. This may involve closing
+ * the filehandle out on the server. Must be called without holding
+- * cifs_file_list_lock.
++ * tcon->open_file_lock and cifs_file->file_info_lock.
+ */
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ {
+@@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInf
+ struct cifs_pending_open open;
+ bool oplock_break_cancelled;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
++
++ spin_lock(&cifs_file->file_info_lock);
+ if (--cifs_file->count > 0) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cifs_file->file_info_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return;
+ }
++ spin_unlock(&cifs_file->file_info_lock);
+
+ if (server->ops->get_lease_key)
+ server->ops->get_lease_key(inode, &fid);
+@@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInf
+ set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
+ cifs_set_oplock_level(cifsi, 0);
+ }
+- spin_unlock(&cifs_file_list_lock);
++
++ spin_unlock(&tcon->open_file_lock);
+
+ oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+
+@@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, s
+ server = tcon->ses->server;
+
+ cifs_dbg(FYI, "Freeing private data in close dir\n");
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cfile->file_info_lock);
+ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (server->ops->close_dir)
+ rc = server->ops->close_dir(xid, tcon, &cfile->fid);
+ else
+@@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, s
+ /* not much we can do if it fails anyway, ignore rc */
+ rc = 0;
+ } else
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+
+ buf = cfile->srch_inf.ntwrk_buf_start;
+ if (buf) {
+@@ -1728,12 +1735,13 @@ struct cifsFileInfo *find_readable_file(
+ {
+ struct cifsFileInfo *open_file = NULL;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ /* we could simply get the first_list_entry since write-only entries
+ are always at the end of the list but since the first entry might
+ have a close pending, we go through the whole list */
+@@ -1744,8 +1752,8 @@ struct cifsFileInfo *find_readable_file(
+ if (!open_file->invalidHandle) {
+ /* found a good file */
+ /* lock it so it will not be closed on us */
+- cifsFileInfo_get_locked(open_file);
+- spin_unlock(&cifs_file_list_lock);
++ cifsFileInfo_get(open_file);
++ spin_unlock(&tcon->open_file_lock);
+ return open_file;
+ } /* else might as well continue, and look for
+ another, or simply have the caller reopen it
+@@ -1753,7 +1761,7 @@ struct cifsFileInfo *find_readable_file(
+ } else /* write only file */
+ break; /* write only files are last so must be done */
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return NULL;
+ }
+
+@@ -1762,6 +1770,7 @@ struct cifsFileInfo *find_writable_file(
+ {
+ struct cifsFileInfo *open_file, *inv_file = NULL;
+ struct cifs_sb_info *cifs_sb;
++ struct cifs_tcon *tcon;
+ bool any_available = false;
+ int rc;
+ unsigned int refind = 0;
+@@ -1777,15 +1786,16 @@ struct cifsFileInfo *find_writable_file(
+ }
+
+ cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
++ tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ refind_writable:
+ if (refind > MAX_REOPEN_ATT) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return NULL;
+ }
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+@@ -1796,8 +1806,8 @@ refind_writable:
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
+- cifsFileInfo_get_locked(open_file);
+- spin_unlock(&cifs_file_list_lock);
++ cifsFileInfo_get(open_file);
++ spin_unlock(&tcon->open_file_lock);
+ return open_file;
+ } else {
+ if (!inv_file)
+@@ -1813,24 +1823,24 @@ refind_writable:
+
+ if (inv_file) {
+ any_available = false;
+- cifsFileInfo_get_locked(inv_file);
++ cifsFileInfo_get(inv_file);
+ }
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+
+ if (inv_file) {
+ rc = cifs_reopen_file(inv_file, false);
+ if (!rc)
+ return inv_file;
+ else {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_move_tail(&inv_file->flist,
+ &cifs_inode->openFileList);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ cifsFileInfo_put(inv_file);
+- spin_lock(&cifs_file_list_lock);
+ ++refind;
+ inv_file = NULL;
++ spin_lock(&tcon->open_file_lock);
+ goto refind_writable;
+ }
+ }
+@@ -3618,15 +3628,17 @@ static int cifs_readpage(struct file *fi
+ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+ {
+ struct cifsFileInfo *open_file;
++ struct cifs_tcon *tcon =
++ cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
+
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return 1;
+ }
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ return 0;
+ }
+
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -120,6 +120,7 @@ tconInfoAlloc(void)
+ ++ret_buf->tc_count;
+ INIT_LIST_HEAD(&ret_buf->openFileList);
+ INIT_LIST_HEAD(&ret_buf->tcon_list);
++ spin_lock_init(&ret_buf->open_file_lock);
+ #ifdef CONFIG_CIFS_STATS
+ spin_lock_init(&ret_buf->stat_lock);
+ #endif
+@@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, stru
+ continue;
+
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ netfile = list_entry(tmp2, struct cifsFileInfo,
+ tlist);
+@@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, stru
+ &netfile->oplock_break);
+ netfile->oplock_break_cancelled = false;
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ cifs_dbg(FYI, "No matching file for oplock break\n");
+ return true;
+@@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb
+ void
+ cifs_del_pending_open(struct cifs_pending_open *open)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
+ list_del(&open->olist);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+
+ void
+@@ -635,7 +636,7 @@ void
+ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
+ struct cifs_pending_open *open)
+ {
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tlink_tcon(tlink)->open_file_lock);
+ cifs_add_pending_open_locked(fid, tlink, open);
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
+ }
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -597,14 +597,14 @@ find_cifs_entry(const unsigned int xid,
+ is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
+ /* close and restart search */
+ cifs_dbg(FYI, "search backing up - close and restart search\n");
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&cfile->file_info_lock);
+ if (server->ops->dir_needs_close(cfile)) {
+ cfile->invalidHandle = true;
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (server->ops->close_dir)
+ server->ops->close_dir(xid, tcon, &cfile->fid);
+ } else
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&cfile->file_info_lock);
+ if (cfile->srch_inf.ntwrk_buf_start) {
+ cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
+ if (cfile->srch_inf.smallBuf)
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -549,19 +549,19 @@ smb2_is_valid_lease_break(char *buffer)
+ list_for_each(tmp1, &server->smb_ses_list) {
+ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
+
+- spin_lock(&cifs_file_list_lock);
+ list_for_each(tmp2, &ses->tcon_list) {
+ tcon = list_entry(tmp2, struct cifs_tcon,
+ tcon_list);
++ spin_lock(&tcon->open_file_lock);
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
+ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
++ spin_unlock(&tcon->open_file_lock);
+ }
+- spin_unlock(&cifs_file_list_lock);
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+@@ -603,7 +603,7 @@ smb2_is_valid_oplock_break(char *buffer,
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+- spin_lock(&cifs_file_list_lock);
++ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ cfile = list_entry(tmp2, struct cifsFileInfo,
+ tlist);
+@@ -615,7 +615,7 @@ smb2_is_valid_oplock_break(char *buffer,
+
+ cifs_dbg(FYI, "file id match, oplock break\n");
+ cinode = CIFS_I(d_inode(cfile->dentry));
+-
++ spin_lock(&cfile->file_info_lock);
+ if (!CIFS_CACHE_WRITE(cinode) &&
+ rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
+ cfile->oplock_break_cancelled = true;
+@@ -637,14 +637,14 @@ smb2_is_valid_oplock_break(char *buffer,
+ clear_bit(
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+-
++ spin_unlock(&cfile->file_info_lock);
+ queue_work(cifsiod_wq, &cfile->oplock_break);
+
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
+- spin_unlock(&cifs_file_list_lock);
++ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ cifs_dbg(FYI, "No matching file for oplock break\n");
+ return true;
--- /dev/null
+From 24df1483c272c99ed88b0cba135d0e1dfdee3930 Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Thu, 29 Sep 2016 04:20:23 -0500
+Subject: Cleanup missing frees on some ioctls
+
+From: Steve French <smfrench@gmail.com>
+
+commit 24df1483c272c99ed88b0cba135d0e1dfdee3930 upstream.
+
+Cleanup some missing mem frees on some cifs ioctls, and
+clarify others to make more obvious that no data is returned.
+
+Signed-off-by: Steve French <smfrench@gmail.com>
+Acked-by: Sachin Prabhu <sprabhu@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2ops.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -287,7 +287,7 @@ SMB3_request_interfaces(const unsigned i
+ cifs_dbg(FYI, "Link Speed %lld\n",
+ le64_to_cpu(out_buf->LinkSpeed));
+ }
+-
++ kfree(out_buf);
+ return rc;
+ }
+ #endif /* STATS2 */
+@@ -700,6 +700,7 @@ smb2_clone_range(const unsigned int xid,
+
+ cchunk_out:
+ kfree(pcchunk);
++ kfree(retbuf);
+ return rc;
+ }
+
+@@ -824,7 +825,6 @@ smb2_duplicate_extents(const unsigned in
+ {
+ int rc;
+ unsigned int ret_data_len;
+- char *retbuf = NULL;
+ struct duplicate_extents_to_file dup_ext_buf;
+ struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
+
+@@ -850,7 +850,7 @@ smb2_duplicate_extents(const unsigned in
+ FSCTL_DUPLICATE_EXTENTS_TO_FILE,
+ true /* is_fsctl */, (char *)&dup_ext_buf,
+ sizeof(struct duplicate_extents_to_file),
+- (char **)&retbuf,
++ NULL,
+ &ret_data_len);
+
+ if (ret_data_len > 0)
+@@ -873,7 +873,6 @@ smb3_set_integrity(const unsigned int xi
+ struct cifsFileInfo *cfile)
+ {
+ struct fsctl_set_integrity_information_req integr_info;
+- char *retbuf = NULL;
+ unsigned int ret_data_len;
+
+ integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
+@@ -885,7 +884,7 @@ smb3_set_integrity(const unsigned int xi
+ FSCTL_SET_INTEGRITY_INFORMATION,
+ true /* is_fsctl */, (char *)&integr_info,
+ sizeof(struct fsctl_set_integrity_information_req),
+- (char **)&retbuf,
++ NULL,
+ &ret_data_len);
+
+ }
--- /dev/null
+From 70b565bbdb911023373e035225ab10077e4ab937 Mon Sep 17 00:00:00 2001
+From: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Date: Fri, 14 Oct 2016 15:08:36 +0530
+Subject: cxl: Prevent adapter reset if an active context exists
+
+From: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+
+commit 70b565bbdb911023373e035225ab10077e4ab937 upstream.
+
+This patch prevents resetting the cxl adapter via sysfs in presence of
+one or more active cxl_context on it. This protects against an
+unrecoverable error caused by PSL owning a dirty cache line even after
+reset and host tries to touch the same cache line. In case a force reset
+of the card is required irrespective of any active contexts, the int
+value -1 can be stored in the 'reset' sysfs attribute of the card.
+
+The patch introduces a new atomic_t member named contexts_num inside
+struct cxl that holds the number of active context attached to the card
+, which is checked against '0' before proceeding with the reset. To
+prevent against a race condition where a context is activated just after
+reset check is performed, the contexts_num is atomically set to '-1'
+after reset-check to indicate that no more contexts can be activated on
+the card anymore.
+
+Before activating a context we atomically test if contexts_num is
+non-negative and if so, increment its value by one. In case the value of
+contexts_num is negative then it indicates that the card is about to be
+reset and context activation is error-ed out at that point.
+
+Fixes: 62fa19d4b4fd ("cxl: Add ability to reset the card")
+Acked-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+Reviewed-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+Signed-off-by: Vaibhav Jain <vaibhav@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/ABI/testing/sysfs-class-cxl | 7 +++--
+ drivers/misc/cxl/api.c | 9 ++++++
+ drivers/misc/cxl/context.c | 3 ++
+ drivers/misc/cxl/cxl.h | 24 +++++++++++++++++
+ drivers/misc/cxl/file.c | 11 +++++++
+ drivers/misc/cxl/guest.c | 3 ++
+ drivers/misc/cxl/main.c | 42 +++++++++++++++++++++++++++++-
+ drivers/misc/cxl/pci.c | 2 +
+ drivers/misc/cxl/sysfs.c | 27 ++++++++++++++++---
+ 9 files changed, 121 insertions(+), 7 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-class-cxl
++++ b/Documentation/ABI/testing/sysfs-class-cxl
+@@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/re
+ Date: October 2014
+ Contact: linuxppc-dev@lists.ozlabs.org
+ Description: write only
+- Writing 1 will issue a PERST to card which may cause the card
+- to reload the FPGA depending on load_image_on_perst.
++ Writing 1 will issue a PERST to card provided there are no
++ contexts active on any one of the card AFUs. This may cause
++ the card to reload the FPGA depending on load_image_on_perst.
++ Writing -1 will do a force PERST irrespective of any active
++ contexts on the card AFUs.
+ Users: https://github.com/ibm-capi/libcxl
+
+ What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context
+ if (ctx->status == STARTED)
+ goto out; /* already started */
+
++ /*
++ * Increment the mapped context count for adapter. This also checks
++ * if adapter_context_lock is taken.
++ */
++ rc = cxl_adapter_context_get(ctx->afu->adapter);
++ if (rc)
++ goto out;
++
+ if (task) {
+ ctx->pid = get_task_pid(task, PIDTYPE_PID);
+ ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
+@@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context
+
+ if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->pid);
++ cxl_adapter_context_put(ctx->afu->adapter);
+ cxl_ctx_put();
+ goto out;
+ }
+--- a/drivers/misc/cxl/context.c
++++ b/drivers/misc/cxl/context.c
+@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context
+ put_pid(ctx->glpid);
+
+ cxl_ctx_put();
++
++ /* Decrease the attached context count on the adapter */
++ cxl_adapter_context_put(ctx->afu->adapter);
+ return 0;
+ }
+
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -615,6 +615,14 @@ struct cxl {
+ bool perst_select_user;
+ bool perst_same_image;
+ bool psl_timebase_synced;
++
++ /*
++ * number of contexts mapped on to this card. Possible values are:
++ * >0: Number of contexts mapped and new one can be mapped.
++ * 0: No active contexts and new ones can be mapped.
++ * -1: No contexts mapped and new ones cannot be mapped.
++ */
++ atomic_t contexts_num;
+ };
+
+ int cxl_pci_alloc_one_irq(struct cxl *adapter);
+@@ -940,4 +948,20 @@ bool cxl_pci_is_vphb_device(struct pci_d
+
+ /* decode AFU error bits in the PSL register PSL_SERR_An */
+ void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
++
++/*
++ * Increments the number of attached contexts on an adapter.
++ * In case an adapter_context_lock is taken the return -EBUSY.
++ */
++int cxl_adapter_context_get(struct cxl *adapter);
++
++/* Decrements the number of attached contexts on an adapter */
++void cxl_adapter_context_put(struct cxl *adapter);
++
++/* If no active contexts then prevents contexts from being attached */
++int cxl_adapter_context_lock(struct cxl *adapter);
++
++/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
++void cxl_adapter_context_unlock(struct cxl *adapter);
++
+ #endif
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
++ /*
++ * Increment the mapped context count for adapter. This also checks
++ * if adapter_context_lock is taken.
++ */
++ rc = cxl_adapter_context_get(ctx->afu->adapter);
++ if (rc) {
++ afu_release_irqs(ctx, ctx);
++ goto out;
++ }
++
+ trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+
+ if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
+ amr))) {
+ afu_release_irqs(ctx, ctx);
++ cxl_adapter_context_put(ctx->afu->adapter);
+ goto out;
+ }
+
+--- a/drivers/misc/cxl/guest.c
++++ b/drivers/misc/cxl/guest.c
+@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struc
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+ goto err_put1;
+
++ /* release the context lock as the adapter is configured */
++ cxl_adapter_context_unlock(adapter);
++
+ return adapter;
+
+ err_put1:
+--- a/drivers/misc/cxl/main.c
++++ b/drivers/misc/cxl/main.c
+@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
+ if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+ goto err2;
+
+- return adapter;
++ /* start with context lock taken */
++ atomic_set(&adapter->contexts_num, -1);
+
++ return adapter;
+ err2:
+ cxl_remove_adapter_nr(adapter);
+ err1:
+@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_
+ return 0;
+ }
+
++int cxl_adapter_context_get(struct cxl *adapter)
++{
++ int rc;
++
++ rc = atomic_inc_unless_negative(&adapter->contexts_num);
++ return rc >= 0 ? 0 : -EBUSY;
++}
++
++void cxl_adapter_context_put(struct cxl *adapter)
++{
++ atomic_dec_if_positive(&adapter->contexts_num);
++}
++
++int cxl_adapter_context_lock(struct cxl *adapter)
++{
++ int rc;
++ /* no active contexts -> contexts_num == 0 */
++ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
++ return rc ? -EBUSY : 0;
++}
++
++void cxl_adapter_context_unlock(struct cxl *adapter)
++{
++ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
++
++ /*
++ * contexts lock taken -> contexts_num == -1
++ * If not true then show a warning and force reset the lock.
++ * This will happen when context_unlock was requested without
++ * doing a context_lock.
++ */
++ if (val != -1) {
++ atomic_set(&adapter->contexts_num, 0);
++ WARN(1, "Adapter context unlocked with %d active contexts",
++ val);
++ }
++}
++
+ static int __init init_cxl(void)
+ {
+ int rc = 0;
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -1484,6 +1484,8 @@ static int cxl_configure_adapter(struct
+ if ((rc = cxl_native_register_psl_err_irq(adapter)))
+ goto err;
+
++ /* Release the context lock as adapter is configured */
++ cxl_adapter_context_unlock(adapter);
+ return 0;
+
+ err:
+--- a/drivers/misc/cxl/sysfs.c
++++ b/drivers/misc/cxl/sysfs.c
+@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struc
+ int val;
+
+ rc = sscanf(buf, "%i", &val);
+- if ((rc != 1) || (val != 1))
++ if ((rc != 1) || (val != 1 && val != -1))
+ return -EINVAL;
+
+- if ((rc = cxl_ops->adapter_reset(adapter)))
+- return rc;
+- return count;
++ /*
++ * See if we can lock the context mapping that's only allowed
++ * when there are no contexts attached to the adapter. Once
++ * taken this will also prevent any context from getting activated.
++ */
++ if (val == 1) {
++ rc = cxl_adapter_context_lock(adapter);
++ if (rc)
++ goto out;
++
++ rc = cxl_ops->adapter_reset(adapter);
++ /* In case reset failed release context lock */
++ if (rc)
++ cxl_adapter_context_unlock(adapter);
++
++ } else if (val == -1) {
++ /* Perform a forced adapter reset */
++ rc = cxl_ops->adapter_reset(adapter);
++ }
++
++out:
++ return rc ? rc : count;
+ }
+
+ static ssize_t load_image_on_perst_show(struct device *device,
--- /dev/null
+From 9742805d6b1bfb45d7f267648c34fb5bcd347397 Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Mon, 19 Sep 2016 22:06:35 -0500
+Subject: Display number of credits available
+
+From: Steve French <smfrench@gmail.com>
+
+commit 9742805d6b1bfb45d7f267648c34fb5bcd347397 upstream.
+
+In debugging smb3, it is useful to display the number
+of credits available, so we can see when the server has not granted
+sufficient operations for the client to make progress, or alternatively
+the client has requested too many credits (as we saw in a recent bug)
+so we can compare with the number of credits the server thinks
+we have.
+
+Add a /proc/fs/cifs/DebugData line to display the client view
+on how many credits are available.
+
+Signed-off-by: Steve French <steve.french@primarydata.com>
+Reported-by: Germano Percossi <germano.percossi@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifs_debug.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(str
+ list_for_each(tmp1, &cifs_tcp_ses_list) {
+ server = list_entry(tmp1, struct TCP_Server_Info,
+ tcp_ses_list);
++ seq_printf(m, "\nNumber of credits: %d", server->credits);
+ i++;
+ list_for_each(tmp2, &server->smb_ses_list) {
+ ses = list_entry(tmp2, struct cifs_ses,
--- /dev/null
+From 18dd8e1a65ddae2351d0f0d6dd4a334f441fc5fa Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Mon, 26 Sep 2016 14:23:08 -0500
+Subject: Do not send SMB3 SET_INFO request if nothing is changing
+
+From: Steve French <smfrench@gmail.com>
+
+commit 18dd8e1a65ddae2351d0f0d6dd4a334f441fc5fa upstream.
+
+[CIFS] We had cases where we sent a SMB2/SMB3 setinfo request with all
+timestamp (and DOS attribute) fields marked as 0 (ie do not change)
+e.g. on chmod or chown.
+
+Signed-off-by: Steve French <steve.french@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2inode.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode,
+ struct tcon_link *tlink;
+ int rc;
+
++ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
++ (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
++ (buf->Attributes == 0))
++ return 0; /* would be a no op, no sense sending this */
++
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
++
+ rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
+ SMB2_OP_SET_INFO);
--- /dev/null
+From d171356ff11ab1825e456dfb979755e01b3c54a1 Mon Sep 17 00:00:00 2001
+From: Sachin Prabhu <sprabhu@redhat.com>
+Date: Tue, 6 Sep 2016 13:22:34 +0100
+Subject: Fix regression which breaks DFS mounting
+
+From: Sachin Prabhu <sprabhu@redhat.com>
+
+commit d171356ff11ab1825e456dfb979755e01b3c54a1 upstream.
+
+Patch a6b5058 results in -EREMOTE returned by is_path_accessible() in
+cifs_mount() to be ignored which breaks DFS mounting.
+
+Signed-off-by: Sachin Prabhu <sprabhu@redhat.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3688,14 +3688,16 @@ remote_path_check:
+ goto mount_fail_check;
+ }
+
+- rc = cifs_are_all_path_components_accessible(server,
++ if (rc != -EREMOTE) {
++ rc = cifs_are_all_path_components_accessible(server,
+ xid, tcon, cifs_sb,
+ full_path);
+- if (rc != 0) {
+- cifs_dbg(VFS, "cannot query dirs between root and final path, "
+- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+- rc = 0;
++ if (rc != 0) {
++ cifs_dbg(VFS, "cannot query dirs between root and final path, "
++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++ rc = 0;
++ }
+ }
+ kfree(full_path);
+ }
--- /dev/null
+From 94f873717571c759b7928399cbbddfa3d569bd01 Mon Sep 17 00:00:00 2001
+From: Aurelien Aptel <aaptel@suse.com>
+Date: Thu, 22 Sep 2016 07:38:50 +0200
+Subject: fs/cifs: keep guid when assigning fid to fileinfo
+
+From: Aurelien Aptel <aaptel@suse.com>
+
+commit 94f873717571c759b7928399cbbddfa3d569bd01 upstream.
+
+When we open a durable handle we give a Globally Unique
+Identifier (GUID) to the server which we must keep for later reference
+e.g. when reopening persistent handles on reconnection.
+
+Without this the GUID generated for a new persistent handle was lost and
+16 zero bytes were used instead on re-opening.
+
+Signed-off-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2ops.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -541,6 +541,7 @@ smb2_set_fid(struct cifsFileInfo *cfile,
+ server->ops->set_oplock_level(cinode, oplock, fid->epoch,
+ &fid->purge_cache);
+ cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
++ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
+ }
+
+ static void
--- /dev/null
+From c0ca8df717061ae3d2ea624024033103c64210ae Mon Sep 17 00:00:00 2001
+From: Noam Camus <noamca@mellanox.com>
+Date: Thu, 13 Oct 2016 16:15:32 +0300
+Subject: irqchip/eznps: Acknowledge NPS_IPI before calling the handler
+
+From: Noam Camus <noamca@mellanox.com>
+
+commit c0ca8df717061ae3d2ea624024033103c64210ae upstream.
+
+IPI_IRQ (also TIMER0_IRQ) should be acked before the action->handler is called
+in handle_percpu_devid_irq.
+
+The IPI irq is edge sensitive and we might miss an IPI interrupt if it is
+triggered again while the handler runs.
+
+Fixes: 44df427c894a ("irqchip: add nps Internal and external irqchips")
+Signed-off-by: Noam Camus <noamca@mellanox.com>
+Cc: marc.zyngier@arm.com
+Cc: jason@lakedaemon.net
+Link: http://lkml.kernel.org/r/1476364532-12634-1-git-send-email-noamca@mellanox.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/irqchip/irq-eznps.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/irqchip/irq-eznps.c
++++ b/drivers/irqchip/irq-eznps.c
+@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct
+ nps_ack_gic();
+ }
+
+-static void nps400_irq_eoi(struct irq_data *irqd)
++static void nps400_irq_ack(struct irq_data *irqd)
+ {
+ unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_p
+ .name = "NPS400 IC",
+ .irq_mask = nps400_irq_mask,
+ .irq_unmask = nps400_irq_unmask,
+- .irq_eoi = nps400_irq_eoi,
++ .irq_ack = nps400_irq_ack,
+ };
+
+ static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
--- /dev/null
+From 9224eb77e63f70f16c0b6b7a20ca7d395f3bc077 Mon Sep 17 00:00:00 2001
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+Date: Mon, 17 Oct 2016 16:00:46 +0100
+Subject: irqchip/gic-v3-its: Fix entry size mask for GITS_BASER
+
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+
+commit 9224eb77e63f70f16c0b6b7a20ca7d395f3bc077 upstream.
+
+Entry Size in GITS_BASER<n> occupies 5 bits [52:48], but we mask out 8
+bits.
+
+Fixes: cc2d3216f53c ("irqchip: GICv3: ITS command queue")
+Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/irqchip/arm-gic-v3.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -290,7 +290,7 @@
+ #define GITS_BASER_TYPE_SHIFT (56)
+ #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+ #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+ #define GITS_BASER_SHAREABILITY_SHIFT (10)
+ #define GITS_BASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
--- /dev/null
+From d102eb5c1ac5e6743b1c6d145c06a25d98ad1375 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 14 Oct 2016 10:26:21 +0300
+Subject: irqchip/gicv3: Handle loop timeout proper
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d102eb5c1ac5e6743b1c6d145c06a25d98ad1375 upstream.
+
+The timeout loop terminates when the loop count is zero, but the decrement
+of the count variable is post check. So count is -1 when we check for the
+timeout and therefor the error message is supressed.
+
+Change it to predecrement, so the error message is emitted.
+
+[ tglx: Massaged changelog ]
+
+Fixes: a2c225101234 ("irqchip: gic-v3: Refactor gic_enable_redist to support both enabling and disabling")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Sudeep Holla <sudeep.holla@arm.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: kernel-janitors@vger.kernel.org
+Cc: Jason Cooper <jason@lakedaemon.net>
+Link: http://lkml.kernel.org/r/20161014072534.GA15168@mwanda
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/irqchip/irq-gic-v3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enabl
+ return; /* No PM support in this redistributor */
+ }
+
+- while (count--) {
++ while (--count) {
+ val = readl_relaxed(rbase + GICR_WAKER);
+ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
+ break;
--- /dev/null
+From 8678654e3c7ad7b0f4beb03fa89691279cba71f9 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Thu, 13 Oct 2016 17:45:20 +0200
+Subject: kvm: x86: memset whole irq_eoi
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+commit 8678654e3c7ad7b0f4beb03fa89691279cba71f9 upstream.
+
+gcc 7 warns:
+arch/x86/kvm/ioapic.c: In function 'kvm_ioapic_reset':
+arch/x86/kvm/ioapic.c:597:2: warning: 'memset' used with length equal to number of elements without multiplication by element size [-Wmemset-elt-size]
+
+And it is right. Memset whole array using sizeof operator.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+[Added x86 subject tag]
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/ioapic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_
+ ioapic->irr = 0;
+ ioapic->irr_delivered = 0;
+ ioapic->id = 0;
+- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
++ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
+ rtc_irq_eoi_tracking_reset(ioapic);
+ }
+
--- /dev/null
+From a171bc51fa697021e1b2082d7e95c12a363bc0a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Mon, 3 Oct 2016 17:56:55 +0300
+Subject: pinctrl: baytrail: Fix lockdep
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit a171bc51fa697021e1b2082d7e95c12a363bc0a9 upstream.
+
+Initialize the spinlock before using it.
+
+INFO: trying to register non-static key.
+the code is fine but needs lockdep annotation.
+turning off the locking correctness validator.
+CPU: 2 PID: 1 Comm: swapper/0 Not tainted 4.8.0-dwc-bisect #4
+Hardware name: Intel Corp. VALLEYVIEW C0 PLATFORM/BYT-T FFD8, BIOS BLAKFF81.X64.0088.R10.1403240443 FFD8_X64_R_2014_13_1_00 03/24/2014
+ 0000000000000000 ffff8800788ff770 ffffffff8133d597 0000000000000000
+ 0000000000000000 ffff8800788ff7e0 ffffffff810cfb9e 0000000000000002
+ ffff8800788ff7d0 ffffffff8205b600 0000000000000002 ffff8800788ff7f0
+Call Trace:
+ [<ffffffff8133d597>] dump_stack+0x67/0x90
+ [<ffffffff810cfb9e>] register_lock_class+0x52e/0x540
+ [<ffffffff810d2081>] __lock_acquire+0x81/0x16b0
+ [<ffffffff810cede1>] ? save_trace+0x41/0xd0
+ [<ffffffff810d33b2>] ? __lock_acquire+0x13b2/0x16b0
+ [<ffffffff810cf05a>] ? __lock_is_held+0x4a/0x70
+ [<ffffffff810d3b1a>] lock_acquire+0xba/0x220
+ [<ffffffff8136f1fe>] ? byt_gpio_get_direction+0x3e/0x80
+ [<ffffffff81631567>] _raw_spin_lock_irqsave+0x47/0x60
+ [<ffffffff8136f1fe>] ? byt_gpio_get_direction+0x3e/0x80
+ [<ffffffff8136f1fe>] byt_gpio_get_direction+0x3e/0x80
+ [<ffffffff813740a9>] gpiochip_add_data+0x319/0x7d0
+ [<ffffffff81631723>] ? _raw_spin_unlock_irqrestore+0x43/0x70
+ [<ffffffff8136fe3b>] byt_pinctrl_probe+0x2fb/0x620
+ [<ffffffff8142fb0c>] platform_drv_probe+0x3c/0xa0
+...
+
+Based on the diff it looks like the problem was introduced in
+commit 71e6ca61e826 ("pinctrl: baytrail: Register pin control handling")
+but I wasn't able to verify that empirically as the parent commit
+just oopsed when I tried to boot it.
+
+Cc: Cristina Ciocan <cristina.ciocan@intel.com>
+Fixes: 71e6ca61e826 ("pinctrl: baytrail: Register pin control handling")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-baytrail.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct plat
+ return PTR_ERR(vg->pctl_dev);
+ }
+
++ raw_spin_lock_init(&vg->lock);
++
+ ret = byt_gpio_probe(vg);
+ if (ret) {
+ pinctrl_unregister(vg->pctl_dev);
+@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct plat
+ }
+
+ platform_set_drvdata(pdev, vg);
+- raw_spin_lock_init(&vg->lock);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
--- /dev/null
+From c538b9436751a0be2e1246b48353bc23156bdbcc Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Mon, 10 Oct 2016 16:39:31 +0300
+Subject: pinctrl: intel: Only restore pins that are used by the driver
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit c538b9436751a0be2e1246b48353bc23156bdbcc upstream.
+
+Dell XPS 13 (and maybe some others) uses a GPIO (CPU_GP_1) during suspend
+to explicitly disable USB touchscreen interrupt. This is done to prevent
+situation where the lid is closed the touchscreen is left functional.
+
+The pinctrl driver (wrongly) assumes it owns all pins which are owned by
+host and not locked down. It is perfectly fine for BIOS to use those pins
+as it is also considered as host in this context.
+
+What happens is that when the lid of Dell XPS 13 is closed, the BIOS
+configures CPU_GP_1 low disabling the touchscreen interrupt. During resume
+we restore all host owned pins to the known state which includes CPU_GP_1
+and this overwrites what the BIOS has programmed there causing the
+touchscreen to fail as no interrupts are reaching the CPU anymore.
+
+Fix this by restoring only those pins we know are explicitly requested by
+the kernel one way or other.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=176361
+Reported-by: AceLan Kao <acelan.kao@canonical.com>
+Tested-by: AceLan Kao <acelan.kao@canonical.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-intel.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -19,6 +19,7 @@
+ #include <linux/pinctrl/pinconf.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+
++#include "../core.h"
+ #include "pinctrl-intel.h"
+
+ /* Offset from regs */
+@@ -1079,6 +1080,26 @@ int intel_pinctrl_remove(struct platform
+ EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
+
+ #ifdef CONFIG_PM_SLEEP
++static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
++{
++ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
++
++ if (!pd || !intel_pad_usable(pctrl, pin))
++ return false;
++
++ /*
++ * Only restore the pin if it is actually in use by the kernel (or
++ * by userspace). It is possible that some pins are used by the
++ * BIOS during resume and those are not always locked down so leave
++ * them alone.
++ */
++ if (pd->mux_owner || pd->gpio_owner ||
++ gpiochip_line_is_irq(&pctrl->chip, pin))
++ return true;
++
++ return false;
++}
++
+ int intel_pinctrl_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+@@ -1092,7 +1113,7 @@ int intel_pinctrl_suspend(struct device
+ const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
+ u32 val;
+
+- if (!intel_pad_usable(pctrl, desc->number))
++ if (!intel_pinctrl_should_save(pctrl, desc->number))
+ continue;
+
+ val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
+@@ -1153,7 +1174,7 @@ int intel_pinctrl_resume(struct device *
+ void __iomem *padcfg;
+ u32 val;
+
+- if (!intel_pad_usable(pctrl, desc->number))
++ if (!intel_pinctrl_should_save(pctrl, desc->number))
+ continue;
+
+ padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
--- /dev/null
+From b5a9b340789b2b24c6896bcf7a065c31a4db671c Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Wed, 19 Oct 2016 14:45:23 +0200
+Subject: sched/fair: Fix incorrect task group ->load_avg
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+commit b5a9b340789b2b24c6896bcf7a065c31a4db671c upstream.
+
+A scheduler performance regression has been reported by Joseph Salisbury,
+which he bisected back to:
+
+ 3d30544f0212 ("sched/fair: Apply more PELT fixes)
+
+The regression triggers when several levels of task groups are involved
+(read: SystemD) and cpu_possible_mask != cpu_present_mask.
+
+The root cause is that group entity's load (tg_child->se[i]->avg.load_avg)
+is initialized to scale_load_down(se->load.weight). During the creation of
+a child task group, its group entities on possible CPUs are attached to
+parent's cfs_rq (tg_parent) and their loads are added to the parent's load
+(tg_parent->load_avg) with update_tg_load_avg().
+
+But only the load on online CPUs will then be updated to reflect real load,
+whereas load on other CPUs will stay at the initial value.
+
+The result is a tg_parent->load_avg that is higher than the real load, the
+weight of group entities (tg_parent->se[i]->load.weight) on online CPUs is
+smaller than it should be, and the task group gets a less running time than
+what it could expect.
+
+( This situation can be detected with /proc/sched_debug. The ".tg_load_avg"
+ of the task group will be much higher than sum of ".tg_load_avg_contrib"
+ of online cfs_rqs of the task group. )
+
+The load of group entities don't have to be intialized to something else
+than 0 because their load will increase when an entity is attached.
+
+Reported-by: Joseph Salisbury <joseph.salisbury@canonical.com>
+Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: joonwoop@codeaurora.org
+Fixes: 3d30544f0212 ("sched/fair: Apply more PELT fixes)
+Link: http://lkml.kernel.org/r/1476881123-10159-1-git-send-email-vincent.guittot@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -680,7 +680,14 @@ void init_entity_runnable_average(struct
+ * will definitely be update (after enqueue).
+ */
+ sa->period_contrib = 1023;
+- sa->load_avg = scale_load_down(se->load.weight);
++ /*
++ * Tasks are intialized with full load to be seen as heavy tasks until
++ * they get a chance to stabilize to their real load level.
++ * Group entities are intialized with zero load to reflect the fact that
++ * nothing has been attached to the task group yet.
++ */
++ if (entity_is_task(se))
++ sa->load_avg = scale_load_down(se->load.weight);
+ sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
+ /*
+ * At this point, util_avg won't be used in select_task_rq_fair anyway
--- /dev/null
+From b60205c7c558330e4e2b5df498355ec959457358 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 20 Sep 2016 21:58:12 +0200
+Subject: sched/fair: Fix min_vruntime tracking
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit b60205c7c558330e4e2b5df498355ec959457358 upstream.
+
+While going through enqueue/dequeue to review the movement of
+set_curr_task() I noticed that the (2nd) update_min_vruntime() call in
+dequeue_entity() is suspect.
+
+It turns out, its actually wrong because it will consider
+cfs_rq->curr, which could be the entry we just normalized. This mixes
+different vruntime forms and leads to fail.
+
+The purpose of the second update_min_vruntime() is to move
+min_vruntime forward if the entity we just removed is the one that was
+holding it back; _except_ for the DEQUEUE_SAVE case, because then we
+know its a temporary removal and it will come back.
+
+However, since we do put_prev_task() _after_ dequeue(), cfs_rq->curr
+will still be set (and per the above, can be tranformed into a
+different unit), so update_min_vruntime() should also consider
+curr->on_rq. This also fixes another corner case where the enqueue
+(which also does update_curr()->update_min_vruntime()) happens on the
+rq->lock break in schedule(), between dequeue and put_prev_task.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Fixes: 1e876231785d ("sched: Fix ->min_vruntime calculation in dequeue_entity()")
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c | 29 ++++++++++++++++++++++-------
+ 1 file changed, 22 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -456,17 +456,23 @@ static inline int entity_before(struct s
+
+ static void update_min_vruntime(struct cfs_rq *cfs_rq)
+ {
++ struct sched_entity *curr = cfs_rq->curr;
++
+ u64 vruntime = cfs_rq->min_vruntime;
+
+- if (cfs_rq->curr)
+- vruntime = cfs_rq->curr->vruntime;
++ if (curr) {
++ if (curr->on_rq)
++ vruntime = curr->vruntime;
++ else
++ curr = NULL;
++ }
+
+ if (cfs_rq->rb_leftmost) {
+ struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
+ struct sched_entity,
+ run_node);
+
+- if (!cfs_rq->curr)
++ if (!curr)
+ vruntime = se->vruntime;
+ else
+ vruntime = min_vruntime(vruntime, se->vruntime);
+@@ -3466,9 +3472,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ account_entity_dequeue(cfs_rq, se);
+
+ /*
+- * Normalize the entity after updating the min_vruntime because the
+- * update can refer to the ->curr item and we need to reflect this
+- * movement in our normalized position.
++ * Normalize after update_curr(); which will also have moved
++ * min_vruntime if @se is the one holding it back. But before doing
++ * update_min_vruntime() again, which will discount @se's position and
++ * can move min_vruntime forward still more.
+ */
+ if (!(flags & DEQUEUE_SLEEP))
+ se->vruntime -= cfs_rq->min_vruntime;
+@@ -3476,8 +3483,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ /* return excess runtime on last dequeue */
+ return_cfs_rq_runtime(cfs_rq);
+
+- update_min_vruntime(cfs_rq);
+ update_cfs_shares(cfs_rq);
++
++ /*
++ * Now advance min_vruntime if @se was the entity holding it back,
++ * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
++ * put back on, and if we advance min_vruntime, we'll be placed back
++ * further than we started -- ie. we'll be penalized.
++ */
++ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
++ update_min_vruntime(cfs_rq);
+ }
+
+ /*
arm64-kvm-vhe-reset-pstate.pan-on-entry-to-el2.patch
arc-don-t-leak-bits-of-kernel-stack-into-coredump.patch
fs-super.c-fix-race-between-freeze_super-and-thaw_super.patch
+cifs-limit-the-overall-credit-acquired.patch
+fs-cifs-keep-guid-when-assigning-fid-to-fileinfo.patch
+clarify-locking-of-cifs-file-and-tcon-structures-and-make-more-granular.patch
+display-number-of-credits-available.patch
+set-previous-session-id-correctly-on-smb3-reconnect.patch
+smb3-guids-should-be-constructed-as-random-but-valid-uuids.patch
+do-not-send-smb3-set_info-request-if-nothing-is-changing.patch
+cleanup-missing-frees-on-some-ioctls.patch
+fix-regression-which-breaks-dfs-mounting.patch
+blkcg-unlock-blkcg_pol_mutex-only-once-when-cpd-null.patch
+x86-e820-don-t-merge-consecutive-e820_pram-ranges.patch
+kvm-x86-memset-whole-irq_eoi.patch
+x86-platform-uv-fix-support-for-efi_old_memmap-after-bios-callback-updates.patch
+x86-boot-smp-don-t-try-to-poke-disabled-non-existent-apic.patch
+pinctrl-intel-only-restore-pins-that-are-used-by-the-driver.patch
+pinctrl-baytrail-fix-lockdep.patch
+sched-fair-fix-incorrect-task-group-load_avg.patch
+sched-fair-fix-min_vruntime-tracking.patch
+irqchip-gicv3-handle-loop-timeout-proper.patch
+irqchip-eznps-acknowledge-nps_ipi-before-calling-the-handler.patch
+irqchip-gic-v3-its-fix-entry-size-mask-for-gits_baser.patch
+cxl-prevent-adapter-reset-if-an-active-context-exists.patch
--- /dev/null
+From c2afb8147e69819885493edf3a7c1ce03aaf2d4e Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Tue, 20 Sep 2016 22:56:13 -0500
+Subject: Set previous session id correctly on SMB3 reconnect
+
+From: Steve French <smfrench@gmail.com>
+
+commit c2afb8147e69819885493edf3a7c1ce03aaf2d4e upstream.
+
+Signed-off-by: Steve French <steve.french@primarydata.com>
+Reported-by: David Goebel <davidgoe@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 5 +++++
+ fs/cifs/smb2pdu.h | 2 +-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -604,6 +604,7 @@ SMB2_sess_setup(const unsigned int xid,
+ char *security_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
+ bool use_spnego = false; /* else use raw ntlmssp */
++ u64 previous_session = ses->Suid;
+
+ cifs_dbg(FYI, "Session Setup\n");
+
+@@ -641,6 +642,10 @@ ssetup_ntlmssp_authenticate:
+ return rc;
+
+ req->hdr.SessionId = 0; /* First session, not a reauthenticate */
++
++ /* if reconnect, we need to send previous sess id, otherwise it is 0 */
++ req->PreviousSessionId = previous_session;
++
+ req->Flags = 0; /* MBZ */
+ /* to enable echos and oplocks */
+ req->hdr.CreditRequest = cpu_to_le16(3);
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
+ __le32 Channel;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+- __le64 PreviousSessionId;
++ __u64 PreviousSessionId;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+ } __packed;
+
--- /dev/null
+From fa70b87cc6641978b20e12cc5d517e9ffc0086d4 Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Thu, 22 Sep 2016 00:39:34 -0500
+Subject: SMB3: GUIDs should be constructed as random but valid uuids
+
+From: Steve French <smfrench@gmail.com>
+
+commit fa70b87cc6641978b20e12cc5d517e9ffc0086d4 upstream.
+
+GUIDs although random, and 16 bytes, need to be generated as
+proper uuids.
+
+Signed-off-by: Steve French <steve.french@primarydata.com>
+Reviewed-by: Aurelien Aptel <aaptel@suse.com>
+Reported-by: David Goebels <davidgoe@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsfs.c | 2 +-
+ fs/cifs/connect.c | 2 +-
+ fs/cifs/smb2ops.c | 2 +-
+ fs/cifs/smb2pdu.c | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -271,7 +271,7 @@ cifs_alloc_inode(struct super_block *sb)
+ cifs_inode->createtime = 0;
+ cifs_inode->epoch = 0;
+ #ifdef CONFIG_CIFS_SMB2
+- get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
++ generate_random_uuid(cifs_inode->lease_key);
+ #endif
+ /*
+ * Can not set i_flags here - they get immediately overwritten to zero
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2163,7 +2163,7 @@ cifs_get_tcp_session(struct smb_vol *vol
+ memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+ sizeof(tcp_ses->dstaddr));
+ #ifdef CONFIG_CIFS_SMB2
+- get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
++ generate_random_uuid(tcp_ses->client_guid);
+ #endif
+ /*
+ * at this point we are the only ones with the pointer
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1042,7 +1042,7 @@ smb2_set_lease_key(struct inode *inode,
+ static void
+ smb2_new_lease_key(struct cifs_fid *fid)
+ {
+- get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
++ generate_random_uuid(fid->lease_key);
+ }
+
+ #define SMB2_SYMLINK_STRUCT_SIZE \
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1183,7 +1183,7 @@ create_durable_v2_buf(struct cifs_fid *p
+
+ buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+ buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
+- get_random_bytes(buf->dcontext.CreateGuid, 16);
++ generate_random_uuid(buf->dcontext.CreateGuid);
+ memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
+
+ /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
--- /dev/null
+From ff8560512b8d4b7ca3ef4fd69166634ac30b2525 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Sat, 22 Oct 2016 05:18:04 +0300
+Subject: x86/boot/smp: Don't try to poke disabled/non-existent APIC
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit ff8560512b8d4b7ca3ef4fd69166634ac30b2525 upstream.
+
+Apparently trying to poke a disabled or non-existent APIC
+leads to a box that doesn't even boot. Let's not do that.
+
+No real clue if this is the right fix, but at least my
+P3 machine boots again.
+
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Len Brown <len.brown@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Cc: dyoung@redhat.com
+Cc: kexec@lists.infradead.org
+Fixes: 2a51fe083eba ("arch/x86: Handle non enumerated CPU after physical hotplug")
+Link: http://lkml.kernel.org/r/1477102684-5092-1-git-send-email-ville.syrjala@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/smpboot.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1408,15 +1408,17 @@ __init void prefill_possible_map(void)
+
+ /* No boot processor was found in mptable or ACPI MADT */
+ if (!num_processors) {
+- int apicid = boot_cpu_physical_apicid;
+- int cpu = hard_smp_processor_id();
++ if (boot_cpu_has(X86_FEATURE_APIC)) {
++ int apicid = boot_cpu_physical_apicid;
++ int cpu = hard_smp_processor_id();
+
+- pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
++ pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+
+- /* Make sure boot cpu is enumerated */
+- if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+- apic->apic_id_valid(apicid))
+- generic_processor_info(apicid, boot_cpu_apic_version);
++ /* Make sure boot cpu is enumerated */
++ if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
++ apic->apic_id_valid(apicid))
++ generic_processor_info(apicid, boot_cpu_apic_version);
++ }
+
+ if (!num_processors)
+ num_processors = 1;
--- /dev/null
+From 23446cb66c073b827779e5eb3dec301623299b32 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 12 Oct 2016 11:01:48 -0700
+Subject: x86/e820: Don't merge consecutive E820_PRAM ranges
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 23446cb66c073b827779e5eb3dec301623299b32 upstream.
+
+Commit:
+
+ 917db484dc6a ("x86/boot: Fix kdump, cleanup aborted E820_PRAM max_pfn manipulation")
+
+... fixed up the broken manipulations of max_pfn in the presence of
+E820_PRAM ranges.
+
+However, it also broke the sanitize_e820_map() support for not merging
+E820_PRAM ranges.
+
+Re-introduce the enabling to keep resource boundaries between
+consecutive defined ranges. Otherwise, for example, an environment that
+boots with memmap=2G!8G,2G!10G will end up with a single 4G /dev/pmem0
+device instead of a /dev/pmem0 and /dev/pmem1 device 2G in size.
+
+Reported-by: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Zhang Yi <yizhan@redhat.com>
+Cc: linux-nvdimm@lists.01.org
+Fixes: 917db484dc6a ("x86/boot: Fix kdump, cleanup aborted E820_PRAM max_pfn manipulation")
+Link: http://lkml.kernel.org/r/147629530854.10618.10383744751594021268.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/e820.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -348,7 +348,7 @@ int __init sanitize_e820_map(struct e820
+ * continue building up new bios map based on this
+ * information
+ */
+- if (current_type != last_type) {
++ if (current_type != last_type || current_type == E820_PRAM) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
--- /dev/null
+From caef78b6cdeddf4ad364f95910bba6b43b8eb9bf Mon Sep 17 00:00:00 2001
+From: Alex Thorlton <athorlton@sgi.com>
+Date: Wed, 19 Oct 2016 20:48:51 -0500
+Subject: x86/platform/UV: Fix support for EFI_OLD_MEMMAP after BIOS callback updates
+
+From: Alex Thorlton <athorlton@sgi.com>
+
+commit caef78b6cdeddf4ad364f95910bba6b43b8eb9bf upstream.
+
+Some time ago, we brought our UV BIOS callback code up to speed with the
+new EFI memory mapping scheme, in commit:
+
+ d1be84a232e3 ("x86/uv: Update uv_bios_call() to use efi_call_virt_pointer()")
+
+By leveraging some changes that I made to a few of the EFI runtime
+callback mechanisms, in commit:
+
+ 80e75596079f ("efi: Convert efi_call_virt() to efi_call_virt_pointer()")
+
+This got everything running smoothly on UV, with the new EFI mapping
+code. However, this left one, small loose end, in that EFI_OLD_MEMMAP
+(a.k.a. efi=old_map) will no longer work on UV, on kernels that include
+the aforementioned changes.
+
+At the time this was not a major issue (in fact, it still really isn't),
+but there's no reason that EFI_OLD_MEMMAP *shouldn't* work on our
+systems. This commit adds a check into uv_bios_call(), to see if we have
+the EFI_OLD_MEMMAP bit set in efi.flags. If it is set, we fall back to
+using our old callback method, which uses efi_call() directly on the __va()
+of our function pointer.
+
+Signed-off-by: Alex Thorlton <athorlton@sgi.com>
+Acked-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Dimitri Sivanich <sivanich@sgi.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Mike Travis <travis@sgi.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Russ Anderson <rja@sgi.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1476928131-170101-1-git-send-email-athorlton@sgi.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/platform/uv/bios_uv.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/platform/uv/bios_uv.c
++++ b/arch/x86/platform/uv/bios_uv.c
+@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which,
+ */
+ return BIOS_STATUS_UNIMPLEMENTED;
+
+- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
++ /*
++ * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
++ * callback method, which uses efi_call() directly, with the kernel page tables:
++ */
++ if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
++ ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
++ else
++ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
++
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(uv_bios_call);