--- /dev/null
+From b84477d3ebb96294f87dc3161e53fa8fe22d9bfd Mon Sep 17 00:00:00 2001
+From: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+Date: Sat, 5 Oct 2019 11:59:27 -0700
+Subject: blk-wbt: fix performance regression in wbt scale_up/scale_down
+
+From: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+
+commit b84477d3ebb96294f87dc3161e53fa8fe22d9bfd upstream.
+
+scale_up wakes up waiters after scaling up. But after scaling max, it
+should not wake up more waiters as waiters will not have anything to
+do. This patch fixes this by making scale_up (and also scale_down)
+return when threshold is reached.
+
+This bug causes increased fdatasync latency when fdatasync and dd
+conv=sync are performed in parallel on 4.19 compared to 4.14. This
+bug was introduced during refactoring of blk-wbt code.
+
+Fixes: a79050434b45 ("blk-rq-qos: refactor out common elements of blk-wbt")
+Cc: stable@vger.kernel.org
+Cc: Josef Bacik <jbacik@fb.com>
+Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-rq-qos.c | 14 +++++++++-----
+ block/blk-rq-qos.h | 4 ++--
+ block/blk-wbt.c | 6 ++++--
+ 3 files changed, 15 insertions(+), 9 deletions(-)
+
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -142,24 +142,27 @@ bool rq_depth_calc_max_depth(struct rq_d
+ return ret;
+ }
+
+-void rq_depth_scale_up(struct rq_depth *rqd)
++/* Returns true on success and false if scaling up wasn't possible */
++bool rq_depth_scale_up(struct rq_depth *rqd)
+ {
+ /*
+ * Hit max in previous round, stop here
+ */
+ if (rqd->scaled_max)
+- return;
++ return false;
+
+ rqd->scale_step--;
+
+ rqd->scaled_max = rq_depth_calc_max_depth(rqd);
++ return true;
+ }
+
+ /*
+ * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
+- * had a latency violation.
++ * had a latency violation. Returns true on success and returns false if
++ * scaling down wasn't possible.
+ */
+-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
++bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+ {
+ /*
+ * Stop scaling down when we've hit the limit. This also prevents
+@@ -167,7 +170,7 @@ void rq_depth_scale_down(struct rq_depth
+ * keep up.
+ */
+ if (rqd->max_depth == 1)
+- return;
++ return false;
+
+ if (rqd->scale_step < 0 && hard_throttle)
+ rqd->scale_step = 0;
+@@ -176,6 +179,7 @@ void rq_depth_scale_down(struct rq_depth
+
+ rqd->scaled_max = false;
+ rq_depth_calc_max_depth(rqd);
++ return true;
+ }
+
+ struct rq_qos_wait_data {
+--- a/block/blk-rq-qos.h
++++ b/block/blk-rq-qos.h
+@@ -125,8 +125,8 @@ void rq_qos_wait(struct rq_wait *rqw, vo
+ acquire_inflight_cb_t *acquire_inflight_cb,
+ cleanup_cb_t *cleanup_cb);
+ bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
+-void rq_depth_scale_up(struct rq_depth *rqd);
+-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
++bool rq_depth_scale_up(struct rq_depth *rqd);
++bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+ bool rq_depth_calc_max_depth(struct rq_depth *rqd);
+
+ void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb
+
+ static void scale_up(struct rq_wb *rwb)
+ {
+- rq_depth_scale_up(&rwb->rq_depth);
++ if (!rq_depth_scale_up(&rwb->rq_depth))
++ return;
+ calc_wb_limits(rwb);
+ rwb->unknown_cnt = 0;
+ rwb_wake_all(rwb);
+@@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
+
+ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
+ {
+- rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
++ if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
++ return;
+ calc_wb_limits(rwb);
+ rwb->unknown_cnt = 0;
+ rwb_trace_step(rwb, "scale down");
--- /dev/null
+From 0b3d0ef9840f7be202393ca9116b857f6f793715 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Mon, 30 Sep 2019 10:06:20 -0700
+Subject: CIFS: Force reval dentry if LOOKUP_REVAL flag is set
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit 0b3d0ef9840f7be202393ca9116b857f6f793715 upstream.
+
+Mark inode for force revalidation if LOOKUP_REVAL flag is set.
+This tells the client to actually send a QueryInfo request to
+the server to obtain the latest metadata in case a directory
+or a file were changed remotely. Only do that if the client
+doesn't have a lease for the file to avoid unneeded round
+trips to the server.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/dir.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -738,10 +738,16 @@ cifs_lookup(struct inode *parent_dir_ino
+ static int
+ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+ {
++ struct inode *inode;
++
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (d_really_is_positive(direntry)) {
++ inode = d_inode(direntry);
++ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
++ CIFS_I(inode)->time = 0; /* force reval */
++
+ if (cifs_revalidate_dentry(direntry))
+ return 0;
+ else {
+@@ -752,7 +758,7 @@ cifs_d_revalidate(struct dentry *direntr
+ * attributes will have been updated by
+ * cifs_revalidate_dentry().
+ */
+- if (IS_AUTOMOUNT(d_inode(direntry)) &&
++ if (IS_AUTOMOUNT(inode) &&
+ !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
+ spin_lock(&direntry->d_lock);
+ direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
--- /dev/null
+From c82e5ac7fe3570a269c0929bf7899f62048e7dbc Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Mon, 30 Sep 2019 10:06:19 -0700
+Subject: CIFS: Force revalidate inode when dentry is stale
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit c82e5ac7fe3570a269c0929bf7899f62048e7dbc upstream.
+
+Currently the client indicates that a dentry is stale when inode
+numbers or type types between a local inode and a remote file
+don't match. If this is the case attributes is not being copied
+from remote to local, so, it is already known that the local copy
+has stale metadata. That's why the inode needs to be marked for
+revalidation in order to tell the VFS to lookup the dentry again
+before openning a file. This prevents unexpected stale errors
+to be returned to the user space when openning a file.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/inode.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -414,6 +414,7 @@ int cifs_get_inode_info_unix(struct inod
+ /* if uniqueid is different, return error */
+ if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+ CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
++ CIFS_I(*pinode)->time = 0; /* force reval */
+ rc = -ESTALE;
+ goto cgiiu_exit;
+ }
+@@ -421,6 +422,7 @@ int cifs_get_inode_info_unix(struct inod
+ /* if filetype is different, return error */
+ if (unlikely(((*pinode)->i_mode & S_IFMT) !=
+ (fattr.cf_mode & S_IFMT))) {
++ CIFS_I(*pinode)->time = 0; /* force reval */
+ rc = -ESTALE;
+ goto cgiiu_exit;
+ }
+@@ -924,6 +926,7 @@ cifs_get_inode_info(struct inode **inode
+ /* if uniqueid is different, return error */
+ if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+ CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
++ CIFS_I(*inode)->time = 0; /* force reval */
+ rc = -ESTALE;
+ goto cgii_exit;
+ }
+@@ -931,6 +934,7 @@ cifs_get_inode_info(struct inode **inode
+ /* if filetype is different, return error */
+ if (unlikely(((*inode)->i_mode & S_IFMT) !=
+ (fattr.cf_mode & S_IFMT))) {
++ CIFS_I(*inode)->time = 0; /* force reval */
+ rc = -ESTALE;
+ goto cgii_exit;
+ }
--- /dev/null
+From 30573a82fb179420b8aac30a3a3595aa96a93156 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Mon, 30 Sep 2019 10:06:18 -0700
+Subject: CIFS: Gracefully handle QueryInfo errors during open
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit 30573a82fb179420b8aac30a3a3595aa96a93156 upstream.
+
+Currently if the client identifies problems when processing
+metadata returned in CREATE response, the open handle is being
+leaked. This causes multiple problems like a file missing a lease
+break by that client which causes high latencies to other clients
+accessing the file. Another side-effect of this is that the file
+can't be deleted.
+
+Fix this by closing the file after the client hits an error after
+the file was opened and the open descriptor wasn't returned to
+the user space. Also convert -ESTALE to -EOPENSTALE to allow
+the VFS to revalidate a dentry and retry the open.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -253,6 +253,12 @@ cifs_nt_open(char *full_path, struct ino
+ rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
+ xid, fid);
+
++ if (rc) {
++ server->ops->close(xid, tcon, fid);
++ if (rc == -ESTALE)
++ rc = -EOPENSTALE;
++ }
++
+ out:
+ kfree(buf);
+ return rc;
--- /dev/null
+From cb248819d209d113e45fed459773991518e8e80b Mon Sep 17 00:00:00 2001
+From: Dave Wysochanski <dwysocha@redhat.com>
+Date: Thu, 3 Oct 2019 15:16:27 +1000
+Subject: cifs: use cifsInodeInfo->open_file_lock while iterating to avoid a panic
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+commit cb248819d209d113e45fed459773991518e8e80b upstream.
+
+Commit 487317c99477 ("cifs: add spinlock for the openFileList to
+cifsInodeInfo") added cifsInodeInfo->open_file_lock spin_lock to protect
+the openFileList, but missed a few places where cifs_inode->openFileList
+was enumerated. Change these remaining tcon->open_file_lock to
+cifsInodeInfo->open_file_lock to avoid panic in is_size_safe_to_change.
+
+[17313.245641] RIP: 0010:is_size_safe_to_change+0x57/0xb0 [cifs]
+[17313.245645] Code: 68 40 48 89 ef e8 19 67 b7 f1 48 8b 43 40 48 8d 4b 40 48 8d 50 f0 48 39 c1 75 0f eb 47 48 8b 42 10 48 8d 50 f0 48 39 c1 74 3a <8b> 80 88 00 00 00 83 c0 01 a8 02 74 e6 48 89 ef c6 07 00 0f 1f 40
+[17313.245649] RSP: 0018:ffff94ae1baefa30 EFLAGS: 00010202
+[17313.245654] RAX: dead000000000100 RBX: ffff88dc72243300 RCX: ffff88dc72243340
+[17313.245657] RDX: dead0000000000f0 RSI: 00000000098f7940 RDI: ffff88dd3102f040
+[17313.245659] RBP: ffff88dd3102f040 R08: 0000000000000000 R09: ffff94ae1baefc40
+[17313.245661] R10: ffffcdc8bb1c4e80 R11: ffffcdc8b50adb08 R12: 00000000098f7940
+[17313.245663] R13: ffff88dc72243300 R14: ffff88dbc8f19600 R15: ffff88dc72243428
+[17313.245667] FS: 00007fb145485700(0000) GS:ffff88dd3e000000(0000) knlGS:0000000000000000
+[17313.245670] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[17313.245672] CR2: 0000026bb46c6000 CR3: 0000004edb110003 CR4: 00000000007606e0
+[17313.245753] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[17313.245756] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[17313.245759] PKRU: 55555554
+[17313.245761] Call Trace:
+[17313.245803] cifs_fattr_to_inode+0x16b/0x580 [cifs]
+[17313.245838] cifs_get_inode_info+0x35c/0xa60 [cifs]
+[17313.245852] ? kmem_cache_alloc_trace+0x151/0x1d0
+[17313.245885] cifs_open+0x38f/0x990 [cifs]
+[17313.245921] ? cifs_revalidate_dentry_attr+0x3e/0x350 [cifs]
+[17313.245953] ? cifsFileInfo_get+0x30/0x30 [cifs]
+[17313.245960] ? do_dentry_open+0x132/0x330
+[17313.245963] do_dentry_open+0x132/0x330
+[17313.245969] path_openat+0x573/0x14d0
+[17313.245974] do_filp_open+0x93/0x100
+[17313.245979] ? __check_object_size+0xa3/0x181
+[17313.245986] ? audit_alloc_name+0x7e/0xd0
+[17313.245992] do_sys_open+0x184/0x220
+[17313.245999] do_syscall_64+0x5b/0x1b0
+
+Fixes: 487317c99477 ("cifs: add spinlock for the openFileList to cifsInodeInfo")
+
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 27 +++++++++++----------------
+ 1 file changed, 11 insertions(+), 16 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1853,13 +1853,12 @@ struct cifsFileInfo *find_readable_file(
+ {
+ struct cifsFileInfo *open_file = NULL;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&tcon->open_file_lock);
++ spin_lock(&cifs_inode->open_file_lock);
+ /* we could simply get the first_list_entry since write-only entries
+ are always at the end of the list but since the first entry might
+ have a close pending, we go through the whole list */
+@@ -1871,7 +1870,7 @@ struct cifsFileInfo *find_readable_file(
+ /* found a good file */
+ /* lock it so it will not be closed on us */
+ cifsFileInfo_get(open_file);
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ return open_file;
+ } /* else might as well continue, and look for
+ another, or simply have the caller reopen it
+@@ -1879,7 +1878,7 @@ struct cifsFileInfo *find_readable_file(
+ } else /* write only file */
+ break; /* write only files are last so must be done */
+ }
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ return NULL;
+ }
+
+@@ -1890,7 +1889,6 @@ cifs_get_writable_file(struct cifsInodeI
+ {
+ struct cifsFileInfo *open_file, *inv_file = NULL;
+ struct cifs_sb_info *cifs_sb;
+- struct cifs_tcon *tcon;
+ bool any_available = false;
+ int rc = -EBADF;
+ unsigned int refind = 0;
+@@ -1910,16 +1908,15 @@ cifs_get_writable_file(struct cifsInodeI
+ }
+
+ cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+- tcon = cifs_sb_master_tcon(cifs_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+- spin_lock(&tcon->open_file_lock);
++ spin_lock(&cifs_inode->open_file_lock);
+ refind_writable:
+ if (refind > MAX_REOPEN_ATT) {
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ return rc;
+ }
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+@@ -1931,7 +1928,7 @@ refind_writable:
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
+ cifsFileInfo_get(open_file);
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ *ret_file = open_file;
+ return 0;
+ } else {
+@@ -1951,7 +1948,7 @@ refind_writable:
+ cifsFileInfo_get(inv_file);
+ }
+
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+
+ if (inv_file) {
+ rc = cifs_reopen_file(inv_file, false);
+@@ -1966,7 +1963,7 @@ refind_writable:
+ cifsFileInfo_put(inv_file);
+ ++refind;
+ inv_file = NULL;
+- spin_lock(&tcon->open_file_lock);
++ spin_lock(&cifs_inode->open_file_lock);
+ goto refind_writable;
+ }
+
+@@ -4405,17 +4402,15 @@ static int cifs_readpage(struct file *fi
+ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
+ {
+ struct cifsFileInfo *open_file;
+- struct cifs_tcon *tcon =
+- cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
+
+- spin_lock(&tcon->open_file_lock);
++ spin_lock(&cifs_inode->open_file_lock);
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ return 1;
+ }
+ }
+- spin_unlock(&tcon->open_file_lock);
++ spin_unlock(&cifs_inode->open_file_lock);
+ return 0;
+ }
+
--- /dev/null
+From 6c76a93c453643e11a1063906c7c39168dd8d163 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 9 Sep 2019 12:00:08 +0100
+Subject: drm/i915: Perform GGTT restore much earlier during resume
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 6c76a93c453643e11a1063906c7c39168dd8d163 upstream.
+
+As soon as we re-enable the various functions within the HW, they may go
+off and read data via a GGTT offset. Hence, if we have not yet restored
+the GGTT PTE before then, they may read and even *write* random locations
+in memory.
+
+Detected by DMAR faults during resume.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Cc: Martin Peres <martin.peres@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190909110011.8958-4-chris@chris-wilson.co.uk
+(cherry picked from commit cec5ca08e36fd18d2939b98055346b3b06f56c6c)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_pm.c | 3 ---
+ drivers/gpu/drm/i915/i915_drv.c | 5 +++++
+ drivers/gpu/drm/i915/selftests/i915_gem.c | 6 ++++++
+ 3 files changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+@@ -250,9 +250,6 @@ void i915_gem_resume(struct drm_i915_pri
+ mutex_lock(&i915->drm.struct_mutex);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+
+- i915_gem_restore_gtt_mappings(i915);
+- i915_gem_restore_fences(i915);
+-
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -2238,6 +2238,11 @@ static int i915_drm_resume(struct drm_de
+ if (ret)
+ DRM_ERROR("failed to re-enable GGTT\n");
+
++ mutex_lock(&dev_priv->drm.struct_mutex);
++ i915_gem_restore_gtt_mappings(dev_priv);
++ i915_gem_restore_fences(dev_priv);
++ mutex_unlock(&dev_priv->drm.struct_mutex);
++
+ intel_csr_ucode_resume(dev_priv);
+
+ i915_restore_state(dev_priv);
+--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
+@@ -117,6 +117,12 @@ static void pm_resume(struct drm_i915_pr
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ intel_gt_sanitize(i915, false);
+ i915_gem_sanitize(i915);
++
++ mutex_lock(&i915->drm.struct_mutex);
++ i915_gem_restore_gtt_mappings(i915);
++ i915_gem_restore_fences(i915);
++ mutex_unlock(&i915->drm.struct_mutex);
++
+ i915_gem_resume(i915);
+ }
+ }
--- /dev/null
+From 047d50aee341d940350897c85799e56ae57c3849 Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones@redhat.com>
+Date: Wed, 2 Oct 2019 18:59:00 +0200
+Subject: efi/tpm: Don't access event->count when it isn't mapped
+
+From: Peter Jones <pjones@redhat.com>
+
+commit 047d50aee341d940350897c85799e56ae57c3849 upstream.
+
+Some machines generate a lot of event log entries. When we're
+iterating over them, the code removes the old mapping and adds a
+new one, so once we cross the page boundary we're unmapping the page
+with the count on it. Hilarity ensues.
+
+This patch keeps the info from the header in local variables so we don't
+need to access that page again or keep track of if it's mapped.
+
+Tested-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Peter Jones <pjones@redhat.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Acked-by: Matthew Garrett <mjg59@google.com>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Ben Dooks <ben.dooks@codethink.co.uk>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Jerry Snitselaar <jsnitsel@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Scott Talbert <swt@techie.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Cc: linux-integrity@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: 44038bc514a2 ("tpm: Abstract crypto agile event size calculations")
+Link: https://lkml.kernel.org/r/20191002165904.8819-4-ard.biesheuvel@linaro.org
+[ Minor edits. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/tpm_eventlog.h | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -170,6 +170,7 @@ static inline int __calc_tpm2_event_size
+ u16 halg;
+ int i;
+ int j;
++ u32 count, event_type;
+
+ marker = event;
+ marker_start = marker;
+@@ -190,16 +191,22 @@ static inline int __calc_tpm2_event_size
+ }
+
+ event = (struct tcg_pcr_event2_head *)mapping;
++ /*
++ * The loop below will unmap these fields if the log is larger than
++ * one page, so save them here for reference:
++ */
++ count = READ_ONCE(event->count);
++ event_type = READ_ONCE(event->event_type);
+
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+ /* Check if event is malformed. */
+- if (event->count > efispecid->num_algs) {
++ if (count > efispecid->num_algs) {
+ size = 0;
+ goto out;
+ }
+
+- for (i = 0; i < event->count; i++) {
++ for (i = 0; i < count; i++) {
+ halg_size = sizeof(event->digests[i].alg_id);
+
+ /* Map the digest's algorithm identifier */
+@@ -256,8 +263,9 @@ static inline int __calc_tpm2_event_size
+ + event_field->event_size;
+ size = marker - marker_start;
+
+- if ((event->event_type == 0) && (event_field->event_size == 0))
++ if (event_type == 0 && event_field->event_size == 0)
+ size = 0;
++
+ out:
+ if (do_mapping)
+ TPM_MEMUNMAP(mapping, mapping_size);
--- /dev/null
+From 05c8c1ff81ed2eb9bad7c27cf92e55c864c16df8 Mon Sep 17 00:00:00 2001
+From: Peter Jones <pjones@redhat.com>
+Date: Wed, 2 Oct 2019 18:59:01 +0200
+Subject: efi/tpm: Don't traverse an event log with no events
+
+From: Peter Jones <pjones@redhat.com>
+
+commit 05c8c1ff81ed2eb9bad7c27cf92e55c864c16df8 upstream.
+
+When there are no entries to put into the final event log, some machines
+will return the template they would have populated anyway. In this case
+the nr_events field is 0, but the rest of the log is just garbage.
+
+This patch stops us from trying to iterate the table with
+__calc_tpm2_event_size() when the number of events in the table is 0.
+
+Tested-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Peter Jones <pjones@redhat.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Acked-by: Matthew Garrett <mjg59@google.com>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Ben Dooks <ben.dooks@codethink.co.uk>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Jerry Snitselaar <jsnitsel@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Scott Talbert <swt@techie.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Cc: linux-integrity@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: c46f3405692d ("tpm: Reserve the TPM final events table")
+Link: https://lkml.kernel.org/r/20191002165904.8819-5-ard.biesheuvel@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/efi/tpm.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/firmware/efi/tpm.c
++++ b/drivers/firmware/efi/tpm.c
+@@ -75,11 +75,16 @@ int __init efi_tpm_eventlog_init(void)
+ goto out;
+ }
+
+- tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
+- + sizeof(final_tbl->version)
+- + sizeof(final_tbl->nr_events),
+- final_tbl->nr_events,
+- log_tbl->log);
++ tbl_size = 0;
++ if (final_tbl->nr_events != 0) {
++ void *events = (void *)efi.tpm_final_log
++ + sizeof(final_tbl->version)
++ + sizeof(final_tbl->nr_events);
++
++ tbl_size = tpm2_calc_event_log_size(events,
++ final_tbl->nr_events,
++ log_tbl->log);
++ }
+ memblock_reserve((unsigned long)final_tbl,
+ tbl_size + sizeof(*final_tbl));
+ early_memunmap(final_tbl, sizeof(*final_tbl));
--- /dev/null
+From e658c82be5561412c5e83b5e74e9da4830593f3e Mon Sep 17 00:00:00 2001
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+Date: Wed, 2 Oct 2019 18:59:02 +0200
+Subject: efi/tpm: Only set 'efi_tpm_final_log_size' after successful event log parsing
+
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+
+commit e658c82be5561412c5e83b5e74e9da4830593f3e upstream.
+
+If __calc_tpm2_event_size() fails to parse an event it will return 0,
+resulting tpm2_calc_event_log_size() returning -1. Currently there is
+no check of this return value, and 'efi_tpm_final_log_size' can end up
+being set to this negative value resulting in a crash like this one:
+
+ BUG: unable to handle page fault for address: ffffbc8fc00866ad
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+
+ RIP: 0010:memcpy_erms+0x6/0x10
+ Call Trace:
+ tpm_read_log_efi()
+ tpm_bios_log_setup()
+ tpm_chip_register()
+ tpm_tis_core_init.cold.9+0x28c/0x466
+ tpm_tis_plat_probe()
+ platform_drv_probe()
+ ...
+
+Also __calc_tpm2_event_size() returns a size of 0 when it fails
+to parse an event, so update function documentation to reflect this.
+
+The root cause of the issue that caused the failure of event parsing
+in this case is resolved by Peter Jone's patchset dealing with large
+event logs where crossing over a page boundary causes the page with
+the event count to be unmapped.
+
+Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Ben Dooks <ben.dooks@codethink.co.uk>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Matthew Garrett <mjg59@google.com>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Scott Talbert <swt@techie.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Cc: linux-integrity@vger.kernel.org
+Cc: stable@vger.kernel.org
+Fixes: c46f3405692de ("tpm: Reserve the TPM final events table")
+Link: https://lkml.kernel.org/r/20191002165904.8819-6-ard.biesheuvel@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/efi/tpm.c | 9 ++++++++-
+ include/linux/tpm_eventlog.h | 2 +-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/firmware/efi/tpm.c
++++ b/drivers/firmware/efi/tpm.c
+@@ -85,11 +85,18 @@ int __init efi_tpm_eventlog_init(void)
+ final_tbl->nr_events,
+ log_tbl->log);
+ }
++
++ if (tbl_size < 0) {
++ pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
++ goto out_calc;
++ }
++
+ memblock_reserve((unsigned long)final_tbl,
+ tbl_size + sizeof(*final_tbl));
+- early_memunmap(final_tbl, sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
++out_calc:
++ early_memunmap(final_tbl, sizeof(*final_tbl));
+ out:
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return ret;
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -152,7 +152,7 @@ struct tcg_algorithm_info {
+ * total. Once we've done this we know the offset of the data length field,
+ * and can calculate the total size of the event.
+ *
+- * Return: size of the event on success, <0 on failure
++ * Return: size of the event on success, 0 on failure
+ */
+
+ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
--- /dev/null
+From c05f8f92b701576b615f30aac31fabdc0648649b Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 2 Oct 2019 18:58:59 +0200
+Subject: efivar/ssdt: Don't iterate over EFI vars if no SSDT override was specified
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit c05f8f92b701576b615f30aac31fabdc0648649b upstream.
+
+The kernel command line option efivar_ssdt= allows the name to be
+specified of an EFI variable containing an ACPI SSDT table that should
+be loaded into memory by the OS, and treated as if it was provided by
+the firmware.
+
+Currently, that code will always iterate over the EFI variables and
+compare each name with the provided name, even if the command line
+option wasn't set to begin with.
+
+So bail early when no variable name was provided. This works around a
+boot regression on the 2012 Mac Pro, as reported by Scott.
+
+Tested-by: Scott Talbert <swt@techie.net>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: <stable@vger.kernel.org> # v4.9+
+Cc: Ben Dooks <ben.dooks@codethink.co.uk>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Cc: Jerry Snitselaar <jsnitsel@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Matthew Garrett <mjg59@google.com>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Cc: linux-integrity@vger.kernel.org
+Fixes: 475fb4e8b2f4 ("efi / ACPI: load SSTDs from EFI variables")
+Link: https://lkml.kernel.org/r/20191002165904.8819-3-ard.biesheuvel@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/efi/efi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -282,6 +282,9 @@ static __init int efivar_ssdt_load(void)
+ void *data;
+ int ret;
+
++ if (!efivar_ssdt[0])
++ return 0;
++
+ ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
+
+ list_for_each_entry_safe(entry, aux, &entries, list) {
--- /dev/null
+From 1cbe866cbcb53338de33cf67262e73f9315a9725 Mon Sep 17 00:00:00 2001
+From: Mohamad Heib <mohamadh@mellanox.com>
+Date: Wed, 2 Oct 2019 15:21:27 +0300
+Subject: IB/core: Fix wrong iterating on ports
+
+From: Mohamad Heib <mohamadh@mellanox.com>
+
+commit 1cbe866cbcb53338de33cf67262e73f9315a9725 upstream.
+
+rdma_for_each_port is already incrementing the iterator's value it
+receives therefore, after the first iteration the iterator is increased by
+2 which eventually causing wrong queries and possible traces.
+
+Fix the above by removing the old redundant incrementation that was used
+before rdma_for_each_port() macro.
+
+Cc: <stable@vger.kernel.org>
+Fixes: ea1075edcbab ("RDMA: Add and use rdma_for_each_port")
+Link: https://lore.kernel.org/r/20191002122127.17571-1-leon@kernel.org
+Signed-off-by: Mohamad Heib <mohamadh@mellanox.com>
+Reviewed-by: Erez Alfasi <ereza@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/core/security.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/security.c
++++ b/drivers/infiniband/core/security.c
+@@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *
+ int ret;
+
+ rdma_for_each_port (dev, i) {
+- is_ib = rdma_protocol_ib(dev, i++);
++ is_ib = rdma_protocol_ib(dev, i);
+ if (is_ib)
+ break;
+ }
--- /dev/null
+From 62df81b74393079debf04961c48cb22268fc5fab Mon Sep 17 00:00:00 2001
+From: Stefan Popa <stefan.popa@analog.com>
+Date: Tue, 10 Sep 2019 17:44:21 +0300
+Subject: iio: accel: adxl372: Fix push to buffers lost samples
+
+From: Stefan Popa <stefan.popa@analog.com>
+
+commit 62df81b74393079debf04961c48cb22268fc5fab upstream.
+
+One in two sample sets was lost by multiplying fifo_set_size with
+sizeof(u16). Also, the double number of available samples were pushed to
+the iio buffers.
+
+Signed-off-by: Stefan Popa <stefan.popa@analog.com>
+Fixes: f4f55ce38e5f ("iio:adxl372: Add FIFO and interrupts support")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/adxl372.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/iio/accel/adxl372.c
++++ b/drivers/iio/accel/adxl372.c
+@@ -553,8 +553,7 @@ static irqreturn_t adxl372_trigger_handl
+ goto err;
+
+ /* Each sample is 2 bytes */
+- for (i = 0; i < fifo_entries * sizeof(u16);
+- i += st->fifo_set_size * sizeof(u16))
++ for (i = 0; i < fifo_entries; i += st->fifo_set_size)
+ iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+ }
+ err:
--- /dev/null
+From d202ce4787e446556c6b9d01f84734c3f8174ba3 Mon Sep 17 00:00:00 2001
+From: Stefan Popa <stefan.popa@analog.com>
+Date: Tue, 10 Sep 2019 17:43:32 +0300
+Subject: iio: accel: adxl372: Fix/remove limitation for FIFO samples
+
+From: Stefan Popa <stefan.popa@analog.com>
+
+commit d202ce4787e446556c6b9d01f84734c3f8174ba3 upstream.
+
+Currently, the driver sets the FIFO_SAMPLES register with the number of
+sample sets (maximum of 170 for 3 axis data, 256 for 2-axis and 512 for
+single axis). However, the FIFO_SAMPLES register should store the number
+of samples, regardless of how the FIFO format is configured.
+
+Signed-off-by: Stefan Popa <stefan.popa@analog.com>
+Fixes: f4f55ce38e5f ("iio:adxl372: Add FIFO and interrupts support")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/adxl372.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/accel/adxl372.c
++++ b/drivers/iio/accel/adxl372.c
+@@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct
+ if (ret < 0)
+ return ret;
+
+- fifo_samples = st->watermark & 0xFF;
++ /*
++ * watermark stores the number of sets; we need to write the FIFO
++ * registers with the number of samples
++ */
++ fifo_samples = (st->watermark * st->fifo_set_size);
+ fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) |
+ ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) |
+- ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark);
++ ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples);
+
+- ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples);
++ ret = regmap_write(st->regmap,
++ ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF);
+ if (ret < 0)
+ return ret;
+
--- /dev/null
+From d9a997bd4d762d5bd8cc548d762902f58b5e0a74 Mon Sep 17 00:00:00 2001
+From: Stefan Popa <stefan.popa@analog.com>
+Date: Tue, 10 Sep 2019 17:44:46 +0300
+Subject: iio: accel: adxl372: Perform a reset at start up
+
+From: Stefan Popa <stefan.popa@analog.com>
+
+commit d9a997bd4d762d5bd8cc548d762902f58b5e0a74 upstream.
+
+We need to perform a reset a start up to make sure that the chip is in a
+consistent state. This reset also disables all the interrupts which
+should only be enabled together with the iio buffer. Not doing this, was
+sometimes causing unwanted interrupts to trigger.
+
+Signed-off-by: Stefan Popa <stefan.popa@analog.com>
+Fixes: f4f55ce38e5f ("iio:adxl372: Add FIFO and interrupts support")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/adxl372.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/iio/accel/adxl372.c
++++ b/drivers/iio/accel/adxl372.c
+@@ -575,6 +575,14 @@ static int adxl372_setup(struct adxl372_
+ return -ENODEV;
+ }
+
++ /*
++ * Perform a software reset to make sure the device is in a consistent
++ * state after start up.
++ */
++ ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE);
++ if (ret < 0)
++ return ret;
++
+ ret = adxl372_set_op_mode(st, ADXL372_STANDBY);
+ if (ret < 0)
+ return ret;
--- /dev/null
+From 7fd1c2606508eb384992251e87d50591393a48d0 Mon Sep 17 00:00:00 2001
+From: Marco Felsch <m.felsch@pengutronix.de>
+Date: Tue, 17 Sep 2019 16:56:37 +0200
+Subject: iio: light: add missing vcnl4040 of_compatible
+
+From: Marco Felsch <m.felsch@pengutronix.de>
+
+commit 7fd1c2606508eb384992251e87d50591393a48d0 upstream.
+
+Commit 5a441aade5b3 ("iio: light: vcnl4000 add support for the VCNL4040
+proximity and light sensor") added the support for the vcnl4040 but
+forgot to add the of_compatible. Fix this by adding it now.
+
+Signed-off-by: Marco Felsch <m.felsch@pengutronix.de>
+Fixes: 5a441aade5b3 ("iio: light: vcnl4000 add support for the VCNL4040 proximity and light sensor")
+Reviewed-by: Angus Ainslie (Purism) angus@akkea.ca
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/light/vcnl4000.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/iio/light/vcnl4000.c
++++ b/drivers/iio/light/vcnl4000.c
+@@ -409,6 +409,10 @@ static const struct of_device_id vcnl_40
+ .data = "VCNL4020",
+ },
+ {
++ .compatible = "vishay,vcnl4040",
++ .data = (void *)VCNL4040,
++ },
++ {
+ .compatible = "vishay,vcnl4200",
+ .data = "VCNL4200",
+ },
--- /dev/null
+From 82f3015635249a8c8c45bac303fd84905066f04f Mon Sep 17 00:00:00 2001
+From: David Frey <dpfrey@gmail.com>
+Date: Thu, 19 Sep 2019 15:54:18 -0700
+Subject: iio: light: opt3001: fix mutex unlock race
+
+From: David Frey <dpfrey@gmail.com>
+
+commit 82f3015635249a8c8c45bac303fd84905066f04f upstream.
+
+When an end-of-conversion interrupt is received after performing a
+single-shot reading of the light sensor, the driver was waking up the
+result ready queue before checking opt->ok_to_ignore_lock to determine
+if it should unlock the mutex. The problem occurred in the case where
+the other thread woke up and changed the value of opt->ok_to_ignore_lock
+to false prior to the interrupt thread performing its read of the
+variable. In this case, the mutex would be unlocked twice.
+
+Signed-off-by: David Frey <dpfrey@gmail.com>
+Reviewed-by: Andreas Dannenberg <dannenberg@ti.com>
+Fixes: 94a9b7b1809f ("iio: light: add support for TI's opt3001 light sensor")
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/light/opt3001.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/light/opt3001.c
++++ b/drivers/iio/light/opt3001.c
+@@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq,
+ struct iio_dev *iio = _iio;
+ struct opt3001 *opt = iio_priv(iio);
+ int ret;
++ bool wake_result_ready_queue = false;
+
+ if (!opt->ok_to_ignore_lock)
+ mutex_lock(&opt->lock);
+@@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq,
+ }
+ opt->result = ret;
+ opt->result_ready = true;
+- wake_up(&opt->result_ready_queue);
++ wake_result_ready_queue = true;
+ }
+
+ out:
+ if (!opt->ok_to_ignore_lock)
+ mutex_unlock(&opt->lock);
+
++ if (wake_result_ready_queue)
++ wake_up(&opt->result_ready_queue);
++
+ return IRQ_HANDLED;
+ }
+
--- /dev/null
+From 8a99734081775c012a4a6c442fdef0379fe52bdf Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 9 Oct 2019 14:40:13 -0600
+Subject: io_uring: only flush workqueues on fileset removal
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 8a99734081775c012a4a6c442fdef0379fe52bdf upstream.
+
+We should not remove the workqueue, we just need to ensure that the
+workqueues are synced. The workqueues are torn down on ctx removal.
+
+Cc: stable@vger.kernel.org
+Fixes: 6b06314c47e1 ("io_uring: add file set registration")
+Reported-by: Stefan Hajnoczi <stefanha@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2565,8 +2565,12 @@ static void io_finish_async(struct io_ri
+ static void io_destruct_skb(struct sk_buff *skb)
+ {
+ struct io_ring_ctx *ctx = skb->sk->sk_user_data;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
++ if (ctx->sqo_wq[i])
++ flush_workqueue(ctx->sqo_wq[i]);
+
+- io_finish_async(ctx);
+ unix_destruct_scm(skb);
+ }
+
--- /dev/null
+From b0f53dbc4bc4c371f38b14c391095a3bb8a0bb40 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.com>
+Date: Sun, 6 Oct 2019 17:58:19 -0700
+Subject: kernel/sysctl.c: do not override max_threads provided by userspace
+
+From: Michal Hocko <mhocko@suse.com>
+
+commit b0f53dbc4bc4c371f38b14c391095a3bb8a0bb40 upstream.
+
+Partially revert 16db3d3f1170 ("kernel/sysctl.c: threads-max observe
+limits") because the patch is causing a regression to any workload which
+needs to override the auto-tuning of the limit provided by kernel.
+
+set_max_threads is implementing a boot time guesstimate to provide a
+sensible limit of the concurrently running threads so that runaways will
+not deplete all the memory. This is a good thing in general but there
+are workloads which might need to increase this limit for an application
+to run (reportedly WebSpher MQ is affected) and that is simply not
+possible after the mentioned change. It is also very dubious to
+override an admin decision by an estimation that doesn't have any direct
+relation to correctness of the kernel operation.
+
+Fix this by dropping set_max_threads from sysctl_max_threads so any
+value is accepted as long as it fits into MAX_THREADS which is important
+to check because allowing more threads could break internal robust futex
+restriction. While at it, do not use MIN_THREADS as the lower boundary
+because it is also only a heuristic for automatic estimation and admin
+might have a good reason to stop new threads to be created even when
+below this limit.
+
+This became more severe when we switched x86 from 4k to 8k kernel
+stacks. Starting since 6538b8ea886e ("x86_64: expand kernel stack to
+16K") (3.16) we use THREAD_SIZE_ORDER = 2 and that halved the auto-tuned
+value.
+
+In the particular case
+
+ 3.12
+ kernel.threads-max = 515561
+
+ 4.4
+ kernel.threads-max = 200000
+
+Neither of the two values is really insane on 32GB machine.
+
+I am not sure we want/need to tune the max_thread value further. If
+anything the tuning should be removed altogether if proven not useful in
+general. But we definitely need a way to override this auto-tuning.
+
+Link: http://lkml.kernel.org/r/20190922065801.GB18814@dhcp22.suse.cz
+Fixes: 16db3d3f1170 ("kernel/sysctl.c: threads-max observe limits")
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/fork.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2939,7 +2939,7 @@ int sysctl_max_threads(struct ctl_table
+ struct ctl_table t;
+ int ret;
+ int threads = max_threads;
+- int min = MIN_THREADS;
++ int min = 1;
+ int max = MAX_THREADS;
+
+ t = *table;
+@@ -2951,7 +2951,7 @@ int sysctl_max_threads(struct ctl_table
+ if (ret || !write)
+ return ret;
+
+- set_max_threads(threads);
++ max_threads = threads;
+
+ return 0;
+ }
--- /dev/null
+From 234fdce892f905cbc2674349a9eb4873e288e5b3 Mon Sep 17 00:00:00 2001
+From: Qian Cai <cai@lca.pw>
+Date: Sun, 6 Oct 2019 17:58:25 -0700
+Subject: mm/page_alloc.c: fix a crash in free_pages_prepare()
+
+From: Qian Cai <cai@lca.pw>
+
+commit 234fdce892f905cbc2674349a9eb4873e288e5b3 upstream.
+
+On architectures like s390, arch_free_page() could mark the page unused
+(set_page_unused()) and any access later would trigger a kernel panic.
+Fix it by moving arch_free_page() after all possible accessing calls.
+
+ Hardware name: IBM 2964 N96 400 (z/VM 6.4.0)
+ Krnl PSW : 0404e00180000000 0000000026c2b96e (__free_pages_ok+0x34e/0x5d8)
+ R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 RI:0 EA:3
+ Krnl GPRS: 0000000088d43af7 0000000000484000 000000000000007c 000000000000000f
+ 000003d080012100 000003d080013fc0 0000000000000000 0000000000100000
+ 00000000275cca48 0000000000000100 0000000000000008 000003d080010000
+ 00000000000001d0 000003d000000000 0000000026c2b78a 000000002717fdb0
+ Krnl Code: 0000000026c2b95c: ec1100b30659 risbgn %r1,%r1,0,179,6
+ 0000000026c2b962: e32014000036 pfd 2,1024(%r1)
+ #0000000026c2b968: d7ff10001000 xc 0(256,%r1),0(%r1)
+ >0000000026c2b96e: 41101100 la %r1,256(%r1)
+ 0000000026c2b972: a737fff8 brctg %r3,26c2b962
+ 0000000026c2b976: d7ff10001000 xc 0(256,%r1),0(%r1)
+ 0000000026c2b97c: e31003400004 lg %r1,832
+ 0000000026c2b982: ebff1430016a asi 5168(%r1),-1
+ Call Trace:
+ __free_pages_ok+0x16a/0x5d8)
+ memblock_free_all+0x206/0x290
+ mem_init+0x58/0x120
+ start_kernel+0x2b0/0x570
+ startup_continue+0x6a/0xc0
+ INFO: lockdep is turned off.
+ Last Breaking-Event-Address:
+ __free_pages_ok+0x372/0x5d8
+ Kernel panic - not syncing: Fatal exception: panic_on_oops
+ 00: HCPGIR450W CP entered; disabled wait PSW 00020001 80000000 00000000 26A2379C
+
+In the past, only kernel_poison_pages() would trigger this but it needs
+"page_poison=on" kernel cmdline, and I suspect nobody tested that on
+s390. Recently, kernel_init_free_pages() (commit 6471384af2a6 ("mm:
+security: introduce init_on_alloc=1 and init_on_free=1 boot options"))
+was added and could trigger this as well.
+
+[akpm@linux-foundation.org: add comment]
+Link: http://lkml.kernel.org/r/1569613623-16820-1-git-send-email-cai@lca.pw
+Fixes: 8823b1dbc05f ("mm/page_poison.c: enable PAGE_POISONING as a separate option")
+Fixes: 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options")
+Signed-off-by: Qian Cai <cai@lca.pw>
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: Alexander Duyck <alexander.duyck@gmail.com>
+Cc: <stable@vger.kernel.org> [5.3+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1174,11 +1174,17 @@ static __always_inline bool free_pages_p
+ debug_check_no_obj_freed(page_address(page),
+ PAGE_SIZE << order);
+ }
+- arch_free_page(page, order);
+ if (want_init_on_free())
+ kernel_init_free_pages(page, 1 << order);
+
+ kernel_poison_pages(page, 1 << order, 0);
++ /*
++ * arch_free_page() can make the page's contents inaccessible. s390
++ * does this. So nothing which can access the page's contents should
++ * happen after this.
++ */
++ arch_free_page(page, order);
++
+ if (debug_pagealloc_enabled())
+ kernel_map_pages(page, 1 << order, 0);
+
--- /dev/null
+From 518a86713078168acd67cf50bc0b45d54b4cce6c Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Sun, 6 Oct 2019 17:58:28 -0700
+Subject: mm/vmpressure.c: fix a signedness bug in vmpressure_register_event()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 518a86713078168acd67cf50bc0b45d54b4cce6c upstream.
+
+The "mode" and "level" variables are enums and in this context GCC will
+treat them as unsigned ints so the error handling is never triggered.
+
+I also removed the bogus initializer because it isn't required any more
+and it's sort of confusing.
+
+[akpm@linux-foundation.org: reduce implicit and explicit typecasting]
+[akpm@linux-foundation.org: fix return value, add comment, per Matthew]
+Link: http://lkml.kernel.org/r/20190925110449.GO3264@mwanda
+Fixes: 3cadfa2b9497 ("mm/vmpressure.c: convert to use match_string() helper")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Reviewed-by: Matthew Wilcox <willy@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Enrico Weigelt <info@metux.net>
+Cc: Kate Stewart <kstewart@linuxfoundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmpressure.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/mm/vmpressure.c
++++ b/mm/vmpressure.c
+@@ -355,6 +355,9 @@ void vmpressure_prio(gfp_t gfp, struct m
+ * "hierarchy" or "local").
+ *
+ * To be used as memcg event method.
++ *
++ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
++ * not be parsed.
+ */
+ int vmpressure_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd, const char *args)
+@@ -362,7 +365,7 @@ int vmpressure_register_event(struct mem
+ struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
+ struct vmpressure_event *ev;
+ enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
+- enum vmpressure_levels level = -1;
++ enum vmpressure_levels level;
+ char *spec, *spec_orig;
+ char *token;
+ int ret = 0;
+@@ -375,20 +378,18 @@ int vmpressure_register_event(struct mem
+
+ /* Find required level */
+ token = strsep(&spec, ",");
+- level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+- if (level < 0) {
+- ret = level;
++ ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
++ if (ret < 0)
+ goto out;
+- }
++ level = ret;
+
+ /* Find optional mode */
+ token = strsep(&spec, ",");
+ if (token) {
+- mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+- if (mode < 0) {
+- ret = mode;
++ ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
++ if (ret < 0)
+ goto out;
+- }
++ mode = ret;
+ }
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+@@ -404,6 +405,7 @@ int vmpressure_register_event(struct mem
+ mutex_lock(&vmpr->events_lock);
+ list_add(&ev->node, &vmpr->events);
+ mutex_unlock(&vmpr->events_lock);
++ ret = 0;
+ out:
+ kfree(spec_orig);
+ return ret;
--- /dev/null
+From 5b6807de11445c05b537df8324f5d7ab1c2782f9 Mon Sep 17 00:00:00 2001
+From: Vitaly Wool <vitalywool@gmail.com>
+Date: Sun, 6 Oct 2019 17:58:22 -0700
+Subject: mm/z3fold.c: claim page in the beginning of free
+
+From: Vitaly Wool <vitalywool@gmail.com>
+
+commit 5b6807de11445c05b537df8324f5d7ab1c2782f9 upstream.
+
+There's a really hard to reproduce race in z3fold between z3fold_free()
+and z3fold_reclaim_page(). z3fold_reclaim_page() can claim the page
+after z3fold_free() has checked if the page was claimed and
+z3fold_free() will then schedule this page for compaction which may in
+turn lead to random page faults (since that page would have been
+reclaimed by then).
+
+Fix that by claiming page in the beginning of z3fold_free() and not
+forgetting to clear the claim in the end.
+
+[vitalywool@gmail.com: v2]
+ Link: http://lkml.kernel.org/r/20190928113456.152742cf@bigdell
+Link: http://lkml.kernel.org/r/20190926104844.4f0c6efa1366b8f5741eaba9@gmail.com
+Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
+Reported-by: Markus Linnala <markus.linnala@gmail.com>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Henry Burns <henrywolfeburns@gmail.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: Markus Linnala <markus.linnala@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/z3fold.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -998,9 +998,11 @@ static void z3fold_free(struct z3fold_po
+ struct z3fold_header *zhdr;
+ struct page *page;
+ enum buddy bud;
++ bool page_claimed;
+
+ zhdr = handle_to_z3fold_header(handle);
+ page = virt_to_page(zhdr);
++ page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
+
+ if (test_bit(PAGE_HEADLESS, &page->private)) {
+ /* if a headless page is under reclaim, just leave.
+@@ -1008,7 +1010,7 @@ static void z3fold_free(struct z3fold_po
+ * has not been set before, we release this page
+ * immediately so we don't care about its value any more.
+ */
+- if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
++ if (!page_claimed) {
+ spin_lock(&pool->lock);
+ list_del(&page->lru);
+ spin_unlock(&pool->lock);
+@@ -1044,13 +1046,15 @@ static void z3fold_free(struct z3fold_po
+ atomic64_dec(&pool->pages_nr);
+ return;
+ }
+- if (test_bit(PAGE_CLAIMED, &page->private)) {
++ if (page_claimed) {
++ /* the page has not been claimed by us */
+ z3fold_page_unlock(zhdr);
+ return;
+ }
+ if (unlikely(PageIsolated(page)) ||
+ test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+ z3fold_page_unlock(zhdr);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ return;
+ }
+ if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
+@@ -1060,10 +1064,12 @@ static void z3fold_free(struct z3fold_po
+ zhdr->cpu = -1;
+ kref_get(&zhdr->refcount);
+ do_compact_page(zhdr, true);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ return;
+ }
+ kref_get(&zhdr->refcount);
+ queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
+ }
+
--- /dev/null
+From b59711e9b0d22fd47abfa00602fd8c365cdd3ab7 Mon Sep 17 00:00:00 2001
+From: Steve MacLean <Steve.MacLean@microsoft.com>
+Date: Sat, 28 Sep 2019 01:41:18 +0000
+Subject: perf inject jit: Fix JIT_CODE_MOVE filename
+
+From: Steve MacLean <Steve.MacLean@microsoft.com>
+
+commit b59711e9b0d22fd47abfa00602fd8c365cdd3ab7 upstream.
+
+During perf inject --jit, JIT_CODE_MOVE records were injecting MMAP records
+with an incorrect filename. Specifically it was missing the ".so" suffix.
+
+Further the JIT_CODE_LOAD record were silently truncating the
+jr->load.code_index field to 32 bits before generating the filename.
+
+Make both records emit the same filename based on the full 64 bit
+code_index field.
+
+Fixes: 9b07e27f88b9 ("perf inject: Add jitdump mmap injection support")
+Cc: stable@vger.kernel.org # v4.6+
+Signed-off-by: Steve MacLean <Steve.MacLean@Microsoft.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Brian Robbins <brianrob@microsoft.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Eric Saint-Etienne <eric.saint.etienne@oracle.com>
+Cc: John Keeping <john@metanate.com>
+Cc: John Salem <josalem@microsoft.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Tom McDonald <thomas.mcdonald@microsoft.com>
+Link: http://lore.kernel.org/lkml/BN8PR21MB1362FF8F127B31DBF4121528F7800@BN8PR21MB1362.namprd21.prod.outlook.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/jitdump.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/tools/perf/util/jitdump.c
++++ b/tools/perf/util/jitdump.c
+@@ -396,7 +396,7 @@ static int jit_repipe_code_load(struct j
+ size_t size;
+ u16 idr_size;
+ const char *sym;
+- uint32_t count;
++ uint64_t count;
+ int ret, csize, usize;
+ pid_t pid, tid;
+ struct {
+@@ -419,7 +419,7 @@ static int jit_repipe_code_load(struct j
+ return -1;
+
+ filename = event->mmap2.filename;
+- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
++ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
+ jd->dir,
+ pid,
+ count);
+@@ -530,7 +530,7 @@ static int jit_repipe_code_move(struct j
+ return -1;
+
+ filename = event->mmap2.filename;
+- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
++ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
+ jd->dir,
+ pid,
+ jr->move.code_index);
--- /dev/null
+From 7d4c85b7035eb2f9ab217ce649dcd1bfaf0cacd3 Mon Sep 17 00:00:00 2001
+From: Ian Rogers <irogers@google.com>
+Date: Thu, 26 Sep 2019 15:00:18 -0700
+Subject: perf llvm: Don't access out-of-scope array
+
+From: Ian Rogers <irogers@google.com>
+
+commit 7d4c85b7035eb2f9ab217ce649dcd1bfaf0cacd3 upstream.
+
+The 'test_dir' variable is assigned to the 'release' array which is
+out-of-scope 3 lines later.
+
+Extend the scope of the 'release' array so that an out-of-scope array
+isn't accessed.
+
+Bug detected by clang's address sanitizer.
+
+Fixes: 07bc5c699a3d ("perf tools: Make fetch_kernel_version() publicly available")
+Cc: stable@vger.kernel.org # v4.4+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Wang Nan <wangnan0@huawei.com>
+Link: http://lore.kernel.org/lkml/20190926220018.25402-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/util/llvm-utils.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/tools/perf/util/llvm-utils.c
++++ b/tools/perf/util/llvm-utils.c
+@@ -231,14 +231,14 @@ static int detect_kbuild_dir(char **kbui
+ const char *prefix_dir = "";
+ const char *suffix_dir = "";
+
++ /* _UTSNAME_LENGTH is 65 */
++ char release[128];
++
+ char *autoconf_path;
+
+ int err;
+
+ if (!test_dir) {
+- /* _UTSNAME_LENGTH is 65 */
+- char release[128];
+-
+ err = fetch_kernel_version(NULL, release,
+ sizeof(release));
+ if (err)
--- /dev/null
+From 2a5243937c700ffe6a28e6557a4562a9ab0a17a4 Mon Sep 17 00:00:00 2001
+From: Ondrej Mosnacek <omosnace@redhat.com>
+Date: Thu, 3 Oct 2019 15:59:22 +0200
+Subject: selinux: fix context string corruption in convert_context()
+
+From: Ondrej Mosnacek <omosnace@redhat.com>
+
+commit 2a5243937c700ffe6a28e6557a4562a9ab0a17a4 upstream.
+
+string_to_context_struct() may garble the context string, so we need to
+copy back the contents again from the old context struct to avoid
+storing the corrupted context.
+
+Since string_to_context_struct() tokenizes (and therefore truncates) the
+context string and we are later potentially copying it with kstrdup(),
+this may eventually cause pieces of uninitialized kernel memory to be
+disclosed to userspace (when copying to userspace based on the stored
+length and not the null character).
+
+How to reproduce on Fedora and similar:
+ # dnf install -y memcached
+ # systemctl start memcached
+ # semodule -d memcached
+ # load_policy
+ # load_policy
+ # systemctl stop memcached
+ # ausearch -m AVC
+ type=AVC msg=audit(1570090572.648:313): avc: denied { signal } for pid=1 comm="systemd" scontext=system_u:system_r:init_t:s0 tcontext=system_u:object_r:unlabeled_t:s0 tclass=process permissive=0 trawcon=73797374656D5F75007400000000000070BE6E847296FFFF726F6D000096FFFF76
+
+Cc: stable@vger.kernel.org
+Reported-by: Milos Malik <mmalik@redhat.com>
+Fixes: ee1a84fdfeed ("selinux: overhaul sidtab to fix bug and improve performance")
+Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
+Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ security/selinux/ss/services.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1946,7 +1946,14 @@ static int convert_context(struct contex
+ rc = string_to_context_struct(args->newp, NULL, s,
+ newc, SECSID_NULL);
+ if (rc == -EINVAL) {
+- /* Retain string representation for later mapping. */
++ /*
++ * Retain string representation for later mapping.
++ *
++ * IMPORTANT: We need to copy the contents of oldc->str
++ * back into s again because string_to_context_struct()
++ * may have garbled it.
++ */
++ memcpy(s, oldc->str, oldc->len);
+ context_init(newc);
+ newc->str = s;
+ newc->len = oldc->len;
iio-adc-axp288-override-ts-pin-bias-current-for-some-models.patch
iio-adc-stm32-adc-move-registers-definitions.patch
iio-adc-stm32-adc-fix-a-race-when-using-several-adcs-with-dma-and-irq.patch
+iio-light-opt3001-fix-mutex-unlock-race.patch
+iio-light-add-missing-vcnl4040-of_compatible.patch
+iio-accel-adxl372-fix-remove-limitation-for-fifo-samples.patch
+iio-accel-adxl372-fix-push-to-buffers-lost-samples.patch
+iio-accel-adxl372-perform-a-reset-at-start-up.patch
+efivar-ssdt-don-t-iterate-over-efi-vars-if-no-ssdt-override-was-specified.patch
+efi-tpm-don-t-access-event-count-when-it-isn-t-mapped.patch
+efi-tpm-don-t-traverse-an-event-log-with-no-events.patch
+efi-tpm-only-set-efi_tpm_final_log_size-after-successful-event-log-parsing.patch
+perf-llvm-don-t-access-out-of-scope-array.patch
+perf-inject-jit-fix-jit_code_move-filename.patch
+drm-i915-perform-ggtt-restore-much-earlier-during-resume.patch
+blk-wbt-fix-performance-regression-in-wbt-scale_up-scale_down.patch
+selinux-fix-context-string-corruption-in-convert_context.patch
+io_uring-only-flush-workqueues-on-fileset-removal.patch
+cifs-gracefully-handle-queryinfo-errors-during-open.patch
+cifs-force-revalidate-inode-when-dentry-is-stale.patch
+cifs-force-reval-dentry-if-lookup_reval-flag-is-set.patch
+cifs-use-cifsinodeinfo-open_file_lock-while-iterating-to-avoid-a-panic.patch
+kernel-sysctl.c-do-not-override-max_threads-provided-by-userspace.patch
+mm-z3fold.c-claim-page-in-the-beginning-of-free.patch
+mm-page_alloc.c-fix-a-crash-in-free_pages_prepare.patch
+mm-vmpressure.c-fix-a-signedness-bug-in-vmpressure_register_event.patch
+ib-core-fix-wrong-iterating-on-ports.patch