--- /dev/null
+From 1a541b4e3cd6f5795022514114854b3e1345f24e Mon Sep 17 00:00:00 2001
+From: Steve Capper <steve.capper@linaro.org>
+Date: Thu, 1 Oct 2015 13:06:07 +0100
+Subject: arm64: Fix THP protection change logic
+
+From: Steve Capper <steve.capper@linaro.org>
+
+commit 1a541b4e3cd6f5795022514114854b3e1345f24e upstream.
+
+6910fa1 ("arm64: enable PTE type bit in the mask for pte_modify") fixes
+a problem whereby a large block of PROT_NONE mapped memory is
+incorrectly mapped as block descriptors when mprotect is called.
+
+Unfortunately, a subtle bug was introduced by this fix to the THP logic.
+
+If one mmaps a large block of memory, then faults it such that it is
+collapsed into THPs; resulting calls to mprotect on this area of memory
+will lead to incorrect table descriptors being written instead of block
+descriptors. This is because pmd_modify calls pte_modify which is now
+allowed to modify the type of the page table entry.
+
+This patch reverts commit 6910fa16dbe142f6a0fd0fd7c249f9883ff7fc8a, and
+fixes the problem it was trying to address by adjusting PAGE_NONE to
+represent a table entry. Thus no change in pte type is required when
+moving from PROT_NONE to a different protection.
+
+Fixes: 6910fa16dbe1 ("arm64: enable PTE type bit in the mask for pte_modify")
+Cc: <stable@vger.kernel.org> # 4.0+
+Cc: Feng Kan <fkan@apm.com>
+Reported-by: Ganapatrao Kulkarni <Ganapatrao.Kulkarni@caviumnetworks.com>
+Tested-by: Ganapatrao Kulkarni <gkulkarni@caviumnetworks.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+[SteveC: backported 1a541b4e3cd6f5795022514114854b3e1345f24e to 4.1 and
+ 4.2 stable. Just one minor fix to second part to allow patch to apply
+cleanly, no logic changed.]
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -80,7 +80,7 @@ extern void __pgd_error(const char *file
+ #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+ #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+
+-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
++#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+ #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *p
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+- PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
++ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
--- /dev/null
+From bcd7f78d078ff6197715c1ed070c92aca57ec12c Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jeff.layton@primarydata.com>
+Date: Sat, 11 Jul 2015 06:43:02 -0400
+Subject: locks: have flock_lock_file take an inode pointer instead of a filp
+
+From: Jeff Layton <jeff.layton@primarydata.com>
+
+commit bcd7f78d078ff6197715c1ed070c92aca57ec12c upstream.
+
+...and rename it to better describe how it works.
+
+In order to fix a use-after-free in NFS, we need to be able to remove
+locks from an inode after the filp associated with them may have already
+been freed. flock_lock_file already only dereferences the filp to get to
+the inode, so just change it so the callers do that.
+
+All of the callers already pass in a lock request that has the fl_file
+set properly, so we don't need to pass it in individually. With that
+change it now only dereferences the filp to get to the inode, so just
+push that out to the callers.
+
+Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
+Reviewed-by: "J. Bruce Fields" <bfields@fieldses.org>
+Tested-by: "J. Bruce Fields" <bfields@fieldses.org>
+Cc: William Dauchy <william@gandi.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/locks.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct f
+ * whether or not a lock was successfully freed by testing the return
+ * value for -ENOENT.
+ */
+-static int flock_lock_file(struct file *filp, struct file_lock *request)
++static int flock_lock_inode(struct inode *inode, struct file_lock *request)
+ {
+ struct file_lock *new_fl = NULL;
+ struct file_lock *fl;
+ struct file_lock_context *ctx;
+- struct inode *inode = file_inode(filp);
+ int error = 0;
+ bool found = false;
+ LIST_HEAD(dispose);
+@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *
+ goto find_conflict;
+
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
+- if (filp != fl->fl_file)
++ if (request->fl_file != fl->fl_file)
+ continue;
+ if (request->fl_type == fl->fl_type)
+ goto out;
+@@ -1862,7 +1861,7 @@ int flock_lock_file_wait(struct file *fi
+ int error;
+ might_sleep();
+ for (;;) {
+- error = flock_lock_file(filp, fl);
++ error = flock_lock_inode(file_inode(filp), fl);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+@@ -2401,7 +2400,8 @@ locks_remove_flock(struct file *filp)
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+- struct file_lock_context *flctx = file_inode(filp)->i_flctx;
++ struct inode *inode = file_inode(filp);
++ struct file_lock_context *flctx = inode->i_flctx;
+
+ if (list_empty(&flctx->flc_flock))
+ return;
+@@ -2409,7 +2409,7 @@ locks_remove_flock(struct file *filp)
+ if (filp->f_op->flock)
+ filp->f_op->flock(filp, F_SETLKW, &fl);
+ else
+- flock_lock_file(filp, &fl);
++ flock_lock_inode(inode, &fl);
+
+ if (fl.fl_ops && fl.fl_ops->fl_release_private)
+ fl.fl_ops->fl_release_private(&fl);
--- /dev/null
+From ee296d7c5709440f8abd36b5b65c6b3e388538d9 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jeff.layton@primarydata.com>
+Date: Sat, 11 Jul 2015 06:43:03 -0400
+Subject: locks: inline posix_lock_file_wait and flock_lock_file_wait
+
+From: Jeff Layton <jeff.layton@primarydata.com>
+
+commit ee296d7c5709440f8abd36b5b65c6b3e388538d9 upstream.
+
+They just call file_inode and then the corresponding *_inode_file_wait
+function. Just make them static inlines instead.
+
+Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
+Cc: William Dauchy <william@gandi.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/locks.c | 28 ----------------------------
+ include/linux/fs.h | 32 ++++++++++++++------------------
+ 2 files changed, 14 insertions(+), 46 deletions(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1190,21 +1190,6 @@ int posix_lock_inode_wait(struct inode *
+ EXPORT_SYMBOL(posix_lock_inode_wait);
+
+ /**
+- * posix_lock_file_wait - Apply a POSIX-style lock to a file
+- * @filp: The file to apply the lock to
+- * @fl: The lock to be applied
+- *
+- * Add a POSIX style lock to a file.
+- * We merge adjacent & overlapping locks whenever possible.
+- * POSIX locks are sorted by owner task, then by starting address
+- */
+-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+-{
+- return posix_lock_inode_wait(file_inode(filp), fl);
+-}
+-EXPORT_SYMBOL(posix_lock_file_wait);
+-
+-/**
+ * locks_mandatory_locked - Check for an active lock
+ * @file: the file to check
+ *
+@@ -1890,19 +1875,6 @@ int flock_lock_inode_wait(struct inode *
+ EXPORT_SYMBOL(flock_lock_inode_wait);
+
+ /**
+- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
+- * @filp: The file to apply the lock to
+- * @fl: The lock to be applied
+- *
+- * Add a FLOCK style lock to a file.
+- */
+-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+-{
+- return flock_lock_inode_wait(file_inode(filp), fl);
+-}
+-EXPORT_SYMBOL(flock_lock_file_wait);
+-
+-/**
+ * sys_flock: - flock() system call.
+ * @fd: the file descriptor to lock.
+ * @cmd: the type of lock to apply.
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1037,13 +1037,11 @@ extern void locks_release_private(struct
+ extern void posix_test_lock(struct file *, struct file_lock *);
+ extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+ extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
+-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+ extern int posix_unblock_lock(struct file_lock *);
+ extern int vfs_test_lock(struct file *, struct file_lock *);
+ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+ extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+ extern void lease_get_mtime(struct inode *, struct timespec *time);
+ extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+@@ -1135,11 +1133,6 @@ static inline int posix_lock_inode_wait(
+ return -ENOLCK;
+ }
+
+-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+-{
+- return -ENOLCK;
+-}
+-
+ static inline int posix_unblock_lock(struct file_lock *waiter)
+ {
+ return -ENOENT;
+@@ -1167,12 +1160,6 @@ static inline int flock_lock_inode_wait(
+ return -ENOLCK;
+ }
+
+-static inline int flock_lock_file_wait(struct file *filp,
+- struct file_lock *request)
+-{
+- return -ENOLCK;
+-}
+-
+ static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+ {
+ return 0;
+@@ -1206,6 +1193,20 @@ static inline void show_fd_locks(struct
+ struct file *filp, struct files_struct *files) {}
+ #endif /* !CONFIG_FILE_LOCKING */
+
++static inline struct inode *file_inode(const struct file *f)
++{
++ return f->f_inode;
++}
++
++static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++ return posix_lock_inode_wait(file_inode(filp), fl);
++}
++
++static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++ return flock_lock_inode_wait(file_inode(filp), fl);
++}
+
+ struct fasync_struct {
+ spinlock_t fa_lock;
+@@ -2005,11 +2006,6 @@ extern void ihold(struct inode * inode);
+ extern void iput(struct inode *);
+ extern int generic_update_time(struct inode *, struct timespec *, int);
+
+-static inline struct inode *file_inode(const struct file *f)
+-{
+- return f->f_inode;
+-}
+-
+ /* /sys/fs */
+ extern struct kobject *fs_kobj;
+
--- /dev/null
+From 29d01b22eaa18d8b46091d3c98c6001c49f78e4a Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jeff.layton@primarydata.com>
+Date: Sat, 11 Jul 2015 06:43:02 -0400
+Subject: locks: new helpers - flock_lock_inode_wait and posix_lock_inode_wait
+
+From: Jeff Layton <jeff.layton@primarydata.com>
+
+commit 29d01b22eaa18d8b46091d3c98c6001c49f78e4a upstream.
+
+Allow callers to pass in an inode instead of a filp.
+
+Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
+Reviewed-by: "J. Bruce Fields" <bfields@fieldses.org>
+Tested-by: "J. Bruce Fields" <bfields@fieldses.org>
+Cc: William Dauchy <william@gandi.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/locks.c | 50 ++++++++++++++++++++++++++++++++++++++------------
+ include/linux/fs.h | 14 ++++++++++++++
+ 2 files changed, 52 insertions(+), 12 deletions(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1163,20 +1163,19 @@ int posix_lock_file(struct file *filp, s
+ EXPORT_SYMBOL(posix_lock_file);
+
+ /**
+- * posix_lock_file_wait - Apply a POSIX-style lock to a file
+- * @filp: The file to apply the lock to
++ * posix_lock_inode_wait - Apply a POSIX-style lock to a file
++ * @inode: inode of file to which lock request should be applied
+ * @fl: The lock to be applied
+ *
+- * Add a POSIX style lock to a file.
+- * We merge adjacent & overlapping locks whenever possible.
+- * POSIX locks are sorted by owner task, then by starting address
++ * Variant of posix_lock_file_wait that does not take a filp, and so can be
++ * used after the filp has already been torn down.
+ */
+-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ {
+ int error;
+ might_sleep ();
+ for (;;) {
+- error = posix_lock_file(filp, fl, NULL);
++ error = __posix_lock_file(inode, fl, NULL);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+@@ -1188,6 +1187,21 @@ int posix_lock_file_wait(struct file *fi
+ }
+ return error;
+ }
++EXPORT_SYMBOL(posix_lock_inode_wait);
++
++/**
++ * posix_lock_file_wait - Apply a POSIX-style lock to a file
++ * @filp: The file to apply the lock to
++ * @fl: The lock to be applied
++ *
++ * Add a POSIX style lock to a file.
++ * We merge adjacent & overlapping locks whenever possible.
++ * POSIX locks are sorted by owner task, then by starting address
++ */
++int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++ return posix_lock_inode_wait(file_inode(filp), fl);
++}
+ EXPORT_SYMBOL(posix_lock_file_wait);
+
+ /**
+@@ -1850,18 +1864,18 @@ int fcntl_setlease(unsigned int fd, stru
+ }
+
+ /**
+- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
+- * @filp: The file to apply the lock to
++ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
++ * @inode: inode of the file to apply to
+ * @fl: The lock to be applied
+ *
+- * Add a FLOCK style lock to a file.
++ * Apply a FLOCK style lock request to an inode.
+ */
+-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
++int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ {
+ int error;
+ might_sleep();
+ for (;;) {
+- error = flock_lock_inode(file_inode(filp), fl);
++ error = flock_lock_inode(inode, fl);
+ if (error != FILE_LOCK_DEFERRED)
+ break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+@@ -1873,7 +1887,19 @@ int flock_lock_file_wait(struct file *fi
+ }
+ return error;
+ }
++EXPORT_SYMBOL(flock_lock_inode_wait);
+
++/**
++ * flock_lock_file_wait - Apply a FLOCK-style lock to a file
++ * @filp: The file to apply the lock to
++ * @fl: The lock to be applied
++ *
++ * Add a FLOCK style lock to a file.
++ */
++int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++ return flock_lock_inode_wait(file_inode(filp), fl);
++}
+ EXPORT_SYMBOL(flock_lock_file_wait);
+
+ /**
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1036,11 +1036,13 @@ extern void locks_remove_file(struct fil
+ extern void locks_release_private(struct file_lock *);
+ extern void posix_test_lock(struct file *, struct file_lock *);
+ extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
++extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
+ extern int posix_lock_file_wait(struct file *, struct file_lock *);
+ extern int posix_unblock_lock(struct file_lock *);
+ extern int vfs_test_lock(struct file *, struct file_lock *);
+ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
++extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+ extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+ extern void lease_get_mtime(struct inode *, struct timespec *time);
+@@ -1127,6 +1129,12 @@ static inline int posix_lock_file(struct
+ return -ENOLCK;
+ }
+
++static inline int posix_lock_inode_wait(struct inode *inode,
++ struct file_lock *fl)
++{
++ return -ENOLCK;
++}
++
+ static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+ {
+ return -ENOLCK;
+@@ -1153,6 +1161,12 @@ static inline int vfs_cancel_lock(struct
+ return 0;
+ }
+
++static inline int flock_lock_inode_wait(struct inode *inode,
++ struct file_lock *request)
++{
++ return -ENOLCK;
++}
++
+ static inline int flock_lock_file_wait(struct file *filp,
+ struct file_lock *request)
+ {
--- /dev/null
+From 83bfff23e9ed19f37c4ef0bba84e75bd88e5cf21 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jeff.layton@primarydata.com>
+Date: Sat, 11 Jul 2015 06:43:03 -0400
+Subject: nfs4: have do_vfs_lock take an inode pointer
+
+From: Jeff Layton <jeff.layton@primarydata.com>
+
+commit 83bfff23e9ed19f37c4ef0bba84e75bd88e5cf21 upstream.
+
+Now that we have file locking helpers that can deal with an inode
+instead of a filp, we can change the NFSv4 locking code to use that
+instead.
+
+This should fix the case where we have a filp that is closed while flock
+or OFD locks are set on it, and the task is signaled so that it doesn't
+wait for the LOCKU reply to come in before the filp is freed. At that
+point we can end up with a use-after-free with the current code, which
+relies on dereferencing the fl_file in the lock request.
+
+Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
+Reviewed-by: "J. Bruce Fields" <bfields@fieldses.org>
+Tested-by: "J. Bruce Fields" <bfields@fieldses.org>
+Cc: William Dauchy <william@gandi.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5367,15 +5367,15 @@ static int nfs4_proc_getlk(struct nfs4_s
+ return err;
+ }
+
+-static int do_vfs_lock(struct file *file, struct file_lock *fl)
++static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
+ {
+ int res = 0;
+ switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ case FL_POSIX:
+- res = posix_lock_file_wait(file, fl);
++ res = posix_lock_inode_wait(inode, fl);
+ break;
+ case FL_FLOCK:
+- res = flock_lock_file_wait(file, fl);
++ res = flock_lock_inode_wait(inode, fl);
+ break;
+ default:
+ BUG();
+@@ -5435,7 +5435,7 @@ static void nfs4_locku_done(struct rpc_t
+ switch (task->tk_status) {
+ case 0:
+ renew_lease(calldata->server, calldata->timestamp);
+- do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
++ do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
+ if (nfs4_update_lock_stateid(calldata->lsp,
+ &calldata->res.stateid))
+ break;
+@@ -5543,7 +5543,7 @@ static int nfs4_proc_unlck(struct nfs4_s
+ mutex_lock(&sp->so_delegreturn_mutex);
+ /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
+ down_read(&nfsi->rwsem);
+- if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
++ if (do_vfs_lock(inode, request) == -ENOENT) {
+ up_read(&nfsi->rwsem);
+ mutex_unlock(&sp->so_delegreturn_mutex);
+ goto out;
+@@ -5684,7 +5684,7 @@ static void nfs4_lock_done(struct rpc_ta
+ data->timestamp);
+ if (data->arg.new_lock) {
+ data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+- if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
++ if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
+ rpc_restart_call_prepare(task);
+ break;
+ }
+@@ -5926,7 +5926,7 @@ static int _nfs4_proc_setlk(struct nfs4_
+ if (status != 0)
+ goto out;
+ request->fl_flags |= FL_ACCESS;
+- status = do_vfs_lock(request->fl_file, request);
++ status = do_vfs_lock(state->inode, request);
+ if (status < 0)
+ goto out;
+ down_read(&nfsi->rwsem);
+@@ -5934,7 +5934,7 @@ static int _nfs4_proc_setlk(struct nfs4_
+ /* Yes: cache locks! */
+ /* ...but avoid races with delegation recall... */
+ request->fl_flags = fl_flags & ~FL_SLEEP;
+- status = do_vfs_lock(request->fl_file, request);
++ status = do_vfs_lock(state->inode, request);
+ up_read(&nfsi->rwsem);
+ goto out;
+ }
--- /dev/null
+From 9911a2d5e9d14e39692b751929a92cb5a1d9d0e0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+Date: Wed, 23 Sep 2015 16:35:09 +0200
+Subject: pinctrl: imx25: ensure that a pin with id i is at position i in the info array
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= <u.kleine-koenig@pengutronix.de>
+
+commit 9911a2d5e9d14e39692b751929a92cb5a1d9d0e0 upstream.
+
+The code in pinctrl-imx.c only works correctly if in the
+imx_pinctrl_soc_info passed to imx_pinctrl_probe we have:
+
+ info->pins[i].number = i
+ conf_reg(info->pins[i]) = 4 * i
+
+(which conf_reg(pin) being the offset of the pin's configuration
+register).
+
+When the imx25 specific part was introduced in b4a87c9b966f ("pinctrl:
+pinctrl-imx: add imx25 pinctrl driver") we had:
+
+ info->pins[i].number = i + 1
+ conf_reg(info->pins[i]) = 4 * i
+
+. Commit 34027ca2bbc6 ("pinctrl: imx25: fix numbering for pins") tried
+to fix that but made the situation:
+
+ info->pins[i-1].number = i
+ conf_reg(info->pins[i-1]) = 4 * i
+
+which is hardly better but fixed the error seen back then.
+
+So insert another reserved entry in the array to finally yield:
+
+ info->pins[i].number = i
+ conf_reg(info->pins[i]) = 4 * i
+
+Fixes: 34027ca2bbc6 ("pinctrl: imx25: fix numbering for pins")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/freescale/pinctrl-imx25.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
+@@ -26,7 +26,8 @@
+ #include "pinctrl-imx.h"
+
+ enum imx25_pads {
+- MX25_PAD_RESERVE0 = 1,
++ MX25_PAD_RESERVE0 = 0,
++ MX25_PAD_RESERVE1 = 1,
+ MX25_PAD_A10 = 2,
+ MX25_PAD_A13 = 3,
+ MX25_PAD_A14 = 4,
+@@ -169,6 +170,7 @@ enum imx25_pads {
+ /* Pad names for the pinmux subsystem */
+ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
++ IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(MX25_PAD_A10),
+ IMX_PINCTRL_PIN(MX25_PAD_A13),
+ IMX_PINCTRL_PIN(MX25_PAD_A14),
--- /dev/null
+From 3ebe138ac642a195c7f2efdb918f464734421fd6 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Mon, 31 Aug 2015 15:21:39 +0300
+Subject: rbd: fix double free on rbd_dev->header_name
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit 3ebe138ac642a195c7f2efdb918f464734421fd6 upstream.
+
+If rbd_dev_image_probe() in rbd_dev_probe_parent() fails, header_name
+is freed twice: once in rbd_dev_probe_parent() and then in its caller
+rbd_dev_image_probe() (rbd_dev_image_probe() is called recursively to
+handle parent images).
+
+rbd_dev_probe_parent() is responsible for probing the parent, so it
+shouldn't muck with clone's fields.
+
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Alex Elder <elder@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/rbd.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -5174,7 +5174,6 @@ static int rbd_dev_probe_parent(struct r
+ out_err:
+ if (parent) {
+ rbd_dev_unparent(rbd_dev);
+- kfree(rbd_dev->header_name);
+ rbd_dev_destroy(parent);
+ } else {
+ rbd_put_client(rbdc);
--- /dev/null
+From fe32d3cd5e8eb0f82e459763374aa80797023403 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Wed, 15 Jul 2015 12:52:04 +0300
+Subject: sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit fe32d3cd5e8eb0f82e459763374aa80797023403 upstream.
+
+These functions check should_resched() before unlocking spinlock/bh-enable:
+preempt_count always non-zero => should_resched() always returns false.
+cond_resched_lock() worked iff spin_needbreak is set.
+
+This patch adds argument "preempt_offset" to should_resched().
+
+preempt_count offset constants for that:
+
+ PREEMPT_DISABLE_OFFSET - offset after preempt_disable()
+ PREEMPT_LOCK_OFFSET - offset after spin_lock()
+ SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable()
+ SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh()
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Graf <agraf@suse.de>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: David Vrabel <david.vrabel@citrix.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers")
+Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/preempt.h | 4 ++--
+ include/asm-generic/preempt.h | 5 +++--
+ include/linux/preempt.h | 5 +++--
+ include/linux/preempt_mask.h | 14 +++++++++++---
+ include/linux/sched.h | 6 ------
+ kernel/sched/core.c | 6 +++---
+ 6 files changed, 22 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -90,9 +90,9 @@ static __always_inline bool __preempt_co
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+- return unlikely(!raw_cpu_read_4(__preempt_count));
++ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+ }
+
+ #ifdef CONFIG_PREEMPT
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -71,9 +71,10 @@ static __always_inline bool __preempt_co
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+- return unlikely(!preempt_count() && tif_need_resched());
++ return unlikely(preempt_count() == preempt_offset &&
++ tif_need_resched());
+ }
+
+ #ifdef CONFIG_PREEMPT
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -20,7 +20,8 @@
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ extern void preempt_count_add(int val);
+ extern void preempt_count_sub(int val);
+-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
++#define preempt_count_dec_and_test() \
++ ({ preempt_count_sub(1); should_resched(0); })
+ #else
+ #define preempt_count_add(val) __preempt_count_add(val)
+ #define preempt_count_sub(val) __preempt_count_sub(val)
+@@ -59,7 +60,7 @@ do { \
+
+ #define preempt_check_resched() \
+ do { \
+- if (should_resched()) \
++ if (should_resched(0)) \
+ __preempt_schedule(); \
+ } while (0)
+
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
+@@ -71,13 +71,21 @@
+ */
+ #define in_nmi() (preempt_count() & NMI_MASK)
+
++/*
++ * The preempt_count offset after preempt_disable();
++ */
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_DISABLE_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
+ #else
+-# define PREEMPT_DISABLE_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET 0
+ #endif
+
+ /*
++ * The preempt_count offset after spin_lock()
++ */
++#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++
++/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+@@ -90,7 +98,7 @@
+ *
+ * Work as expected.
+ */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+
+ /*
+ * Are we running in atomic context? WARNING: this macro cannot
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2834,12 +2834,6 @@ extern int _cond_resched(void);
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT_COUNT
+-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+-#else
+-#define PREEMPT_LOCK_OFFSET 0
+-#endif
+-
+ #define cond_resched_lock(lock) ({ \
+ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
+ __cond_resched_lock(lock); \
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4232,7 +4232,7 @@ SYSCALL_DEFINE0(sched_yield)
+
+ int __sched _cond_resched(void)
+ {
+- if (should_resched()) {
++ if (should_resched(0)) {
+ preempt_schedule_common();
+ return 1;
+ }
+@@ -4250,7 +4250,7 @@ EXPORT_SYMBOL(_cond_resched);
+ */
+ int __cond_resched_lock(spinlock_t *lock)
+ {
+- int resched = should_resched();
++ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held(lock);
+@@ -4272,7 +4272,7 @@ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+
+- if (should_resched()) {
++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
+ local_bh_enable();
+ preempt_schedule_common();
+ local_bh_disable();
--- /dev/null
+From 90b62b5129d5cb50f62f40e684de7a1961e57197 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Tue, 12 May 2015 16:41:48 +0200
+Subject: sched/preempt: Rename PREEMPT_CHECK_OFFSET to PREEMPT_DISABLE_OFFSET
+
+From: Frederic Weisbecker <fweisbec@gmail.com>
+
+commit 90b62b5129d5cb50f62f40e684de7a1961e57197 upstream.
+
+"CHECK" suggests it's only used as a comparison mask. But now it's used
+further as a config-conditional preempt disabler offset. Lets
+disambiguate this name.
+
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1431441711-29753-4-git-send-email-fweisbec@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ include/linux/preempt_mask.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
+@@ -72,9 +72,9 @@
+ #define in_nmi() (preempt_count() & NMI_MASK)
+
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_CHECK_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET 1
+ #else
+-# define PREEMPT_CHECK_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET 0
+ #endif
+
+ /*
+@@ -90,7 +90,7 @@
+ *
+ * Work as expected.
+ */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+
+ /*
+ * Are we running in atomic context? WARNING: this macro cannot
+@@ -106,7 +106,7 @@
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+ #define in_atomic_preempt_off() \
+- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
++ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+
+ #ifdef CONFIG_PREEMPT_COUNT
+ # define preemptible() (preempt_count() == 0 && !irqs_disabled())
drm-radeon-attach-tile-property-to-mst-connector.patch
drm-radeon-add-pm-sysfs-files-late.patch
dm-thin-fix-missing-pool-reference-count-decrement-in-pool_ctr-error-path.patch
+rbd-fix-double-free-on-rbd_dev-header_name.patch
+sched-preempt-rename-preempt_check_offset-to-preempt_disable_offset.patch
+sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch
+pinctrl-imx25-ensure-that-a-pin-with-id-i-is-at-position-i-in-the-info-array.patch
+arm64-fix-thp-protection-change-logic.patch
+svcrdma-handle-rdma-read-with-a-non-zero-initial-page-offset.patch
+locks-have-flock_lock_file-take-an-inode-pointer-instead-of-a-filp.patch
+locks-new-helpers-flock_lock_inode_wait-and-posix_lock_inode_wait.patch
+locks-inline-posix_lock_file_wait-and-flock_lock_file_wait.patch
+nfs4-have-do_vfs_lock-take-an-inode-pointer.patch
--- /dev/null
+From c91aed9896946721bb30705ea2904edb3725dd61 Mon Sep 17 00:00:00 2001
+From: Steve Wise <swise@opengridcomputing.com>
+Date: Mon, 28 Sep 2015 16:46:06 -0500
+Subject: svcrdma: handle rdma read with a non-zero initial page offset
+
+From: Steve Wise <swise@opengridcomputing.com>
+
+commit c91aed9896946721bb30705ea2904edb3725dd61 upstream.
+
+The server rdma_read_chunk_lcl() and rdma_read_chunk_frmr() functions
+were not taking into account the initial page_offset when determining
+the rdma read length. This resulted in a read who's starting address
+and length exceeded the base/bounds of the frmr.
+
+The server gets an async error from the rdma device and kills the
+connection, and the client then reconnects and resends. This repeats
+indefinitely, and the application hangs.
+
+Most work loads don't tickle this bug apparently, but one test hit it
+every time: building the linux kernel on a 16 core node with 'make -j
+16 O=/mnt/0' where /mnt/0 is a ramdisk mounted via NFSRDMA.
+
+This bug seems to only be tripped with devices having small fastreg page
+list depths. I didn't see it with mlx4, for instance.
+
+Fixes: 0bf4828983df ('svcrdma: refactor marshalling logic')
+Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+Tested-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -146,7 +146,8 @@ int rdma_read_chunk_lcl(struct svcxprt_r
+ ctxt->read_hdr = head;
+ pages_needed =
+ min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
+- read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
++ read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
++ rs_length);
+
+ for (pno = 0; pno < pages_needed; pno++) {
+ int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
+@@ -245,7 +246,8 @@ int rdma_read_chunk_frmr(struct svcxprt_
+ ctxt->direction = DMA_FROM_DEVICE;
+ ctxt->frmr = frmr;
+ pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
+- read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
++ read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
++ rs_length);
+
+ frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
+ frmr->direction = DMA_FROM_DEVICE;