--- /dev/null
+From 91c380624d5828f96fb5a390dd20475a47005857 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Aug 2019 18:03:25 +0800
+Subject: 9p: avoid attaching writeback_fid on mmap with type PRIVATE
+
+From: Chengguang Xu <cgxu519@zoho.com.cn>
+
+[ Upstream commit c87a37ebd40b889178664c2c09cc187334146292 ]
+
+Currently on mmap cache policy, we always attach writeback_fid
+whether mmap type is SHARED or PRIVATE. However, in the use case
+of kata-container which combines 9p(Guest OS) with overlayfs(Host OS),
+this behavior will trigger overlayfs' copy-up when excute command
+inside container.
+
+Link: http://lkml.kernel.org/r/20190820100325.10313-1-cgxu519@zoho.com.cn
+Signed-off-by: Chengguang Xu <cgxu519@zoho.com.cn>
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/9p/vfs_file.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 79ff727254bb6..e963b83afc717 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
+ v9inode = V9FS_I(inode);
+ mutex_lock(&v9inode->v_mutex);
+ if (!v9inode->writeback_fid &&
++ (vma->vm_flags & VM_SHARED) &&
+ (vma->vm_flags & VM_WRITE)) {
+ /*
+ * clone a fid and add it to writeback_fid
+@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
+ (vma->vm_end - vma->vm_start - 1),
+ };
+
++ if (!(vma->vm_flags & VM_SHARED))
++ return;
+
+ p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
+
+--
+2.20.1
+
--- /dev/null
+From 9ff1828f1d2ebaf759ce81776535496078f92a83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Jul 2019 16:50:20 +0100
+Subject: ceph: fix directories inode i_blkbits initialization
+
+From: Luis Henriques <lhenriques@suse.com>
+
+[ Upstream commit 750670341a24cb714e624e0fd7da30900ad93752 ]
+
+When filling an inode with info from the MDS, i_blkbits is being
+initialized using fl_stripe_unit, which contains the stripe unit in
+bytes. Unfortunately, this doesn't make sense for directories as they
+have fl_stripe_unit set to '0'. This means that i_blkbits will be set
+to 0xff, causing an UBSAN undefined behaviour in i_blocksize():
+
+ UBSAN: Undefined behaviour in ./include/linux/fs.h:731:12
+ shift exponent 255 is too large for 32-bit type 'int'
+
+Fix this by initializing i_blkbits to CEPH_BLOCK_SHIFT if fl_stripe_unit
+is zero.
+
+Signed-off-by: Luis Henriques <lhenriques@suse.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/inode.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 339fdf6355df7..7fcddaaca8a5d 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -800,7 +800,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ ci->i_version = le64_to_cpu(info->version);
+ inode->i_version++;
+ inode->i_rdev = le32_to_cpu(info->rdev);
+- inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
++ /* directories have fl_stripe_unit set to zero */
++ if (le32_to_cpu(info->layout.fl_stripe_unit))
++ inode->i_blkbits =
++ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
++ else
++ inode->i_blkbits = CEPH_BLOCK_SHIFT;
+
+ if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
+ (issued & CEPH_CAP_AUTH_EXCL) == 0) {
+--
+2.20.1
+
--- /dev/null
+From 8a71b29bd754612434b07c16cd4afab3e223a06a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2019 21:22:45 +0800
+Subject: ceph: reconnect connection if session hang in opening state
+
+From: Erqi Chen <chenerqi@gmail.com>
+
+[ Upstream commit 71a228bc8d65900179e37ac309e678f8c523f133 ]
+
+If client mds session is evicted in CEPH_MDS_SESSION_OPENING state,
+mds won't send session msg to client, and delayed_work skip
+CEPH_MDS_SESSION_OPENING state session, the session hang forever.
+
+Allow ceph_con_keepalive to reconnect a session in OPENING to avoid
+session hang. Also, ensure that we skip sessions in RESTARTING and
+REJECTED states since those states can't be resurrected by issuing
+a keepalive.
+
+Link: https://tracker.ceph.com/issues/41551
+Signed-off-by: Erqi Chen chenerqi@gmail.com
+Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/mds_client.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 67cb9d078bfa7..3139fbd4c34e3 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3410,7 +3410,9 @@ static void delayed_work(struct work_struct *work)
+ pr_info("mds%d hung\n", s->s_mds);
+ }
+ }
+- if (s->s_state < CEPH_MDS_SESSION_OPEN) {
++ if (s->s_state == CEPH_MDS_SESSION_NEW ||
++ s->s_state == CEPH_MDS_SESSION_RESTARTING ||
++ s->s_state == CEPH_MDS_SESSION_REJECTED) {
+ /* this mds is failed or recovering, just wait */
+ ceph_put_mds_session(s);
+ continue;
+--
+2.20.1
+
--- /dev/null
+From 984060b86ac1601e3713e83209b5b0f77e0e34a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Aug 2019 21:25:36 +0200
+Subject: drm/amdgpu: Check for valid number of registers to read
+
+From: Trek <trek00@inbox.ru>
+
+[ Upstream commit 73d8e6c7b841d9bf298c8928f228fb433676635c ]
+
+Do not try to allocate any amount of memory requested by the user.
+Instead limit it to 128 registers. Actually the longest series of
+consecutive allowed registers are 48, mmGB_TILE_MODE0-31 and
+mmGB_MACROTILE_MODE0-15 (0x2644-0x2673).
+
+Bug: https://bugs.freedesktop.org/show_bug.cgi?id=111273
+Signed-off-by: Trek <trek00@inbox.ru>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 3938fca1ea8e5..24941a7b659f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -430,6 +430,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ sh_num = 0xffffffff;
+
++ if (info->read_mmr_reg.count > 128)
++ return -EINVAL;
++
+ regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+--
+2.20.1
+
--- /dev/null
+From da7d6bc1a6cfac16136855bb8c26c841402d0109 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Jul 2019 15:03:27 +0100
+Subject: drm/i915/userptr: Acquire the page lock around set_page_dirty()
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit cb6d7c7dc7ff8cace666ddec66334117a6068ce2 ]
+
+set_page_dirty says:
+
+ For pages with a mapping this should be done under the page lock
+ for the benefit of asynchronous memory errors who prefer a
+ consistent dirty state. This rule can be broken in some special
+ cases, but should be better not to.
+
+Under those rules, it is only safe for us to use the plain set_page_dirty
+calls for shmemfs/anonymous memory. Userptr may be used with real
+mappings and so needs to use the locked version (set_page_dirty_lock).
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190708140327.26825-1-chris@chris-wilson.co.uk
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_gem_userptr.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index 555fd47c1831d..a761038384efb 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -689,7 +689,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
+ if (obj->dirty)
+- set_page_dirty(page);
++ /*
++ * As this may not be anonymous memory (e.g. shmem)
++ * but exist on a real mapping, we have to lock
++ * the page in order to dirty it -- holding
++ * the page reference is not sufficient to
++ * prevent the inode from being truncated.
++ * Play safe and take the lock.
++ */
++ set_page_dirty_lock(page);
+
+ mark_page_accessed(page);
+ put_page(page);
+--
+2.20.1
+
--- /dev/null
+From 24035364c0494385ae85bd1944bd3715ea30c672 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2019 15:48:53 +0800
+Subject: fs: nfs: Fix possible null-pointer dereferences in encode_attrs()
+
+From: Jia-Ju Bai <baijiaju1990@gmail.com>
+
+[ Upstream commit e2751463eaa6f9fec8fea80abbdc62dbc487b3c5 ]
+
+In encode_attrs(), there is an if statement on line 1145 to check
+whether label is NULL:
+ if (label && (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL))
+
+When label is NULL, it is used on lines 1178-1181:
+ *p++ = cpu_to_be32(label->lfs);
+ *p++ = cpu_to_be32(label->pi);
+ *p++ = cpu_to_be32(label->len);
+ p = xdr_encode_opaque_fixed(p, label->label, label->len);
+
+To fix these bugs, label is checked before being used.
+
+These bugs are found by a static analysis tool STCheck written by us.
+
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4xdr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 5e2724a928ed3..d7f8d5ce30e3e 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1123,7 +1123,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
+ } else
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
+ }
+- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
++ if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
+ *p++ = cpu_to_be32(label->lfs);
+ *p++ = cpu_to_be32(label->pi);
+ *p++ = cpu_to_be32(label->len);
+--
+2.20.1
+
--- /dev/null
+From bb9ca63900b8f0e9a2dd7c569a87309d8a52bfb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Aug 2019 15:59:09 +0800
+Subject: fuse: fix memleak in cuse_channel_open
+
+From: zhengbin <zhengbin13@huawei.com>
+
+[ Upstream commit 9ad09b1976c562061636ff1e01bfc3a57aebe56b ]
+
+If cuse_send_init fails, need to fuse_conn_put cc->fc.
+
+cuse_channel_open->fuse_conn_init->refcount_set(&fc->count, 1)
+ ->fuse_dev_alloc->fuse_conn_get
+ ->fuse_dev_free->fuse_conn_put
+
+Fixes: cc080e9e9be1 ("fuse: introduce per-instance fuse_dev structure")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: zhengbin <zhengbin13@huawei.com>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/cuse.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index c5b6b71654893..d9aba97007267 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -513,6 +513,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
+ rc = cuse_send_init(cc);
+ if (rc) {
+ fuse_dev_free(fud);
++ fuse_conn_put(&cc->fc);
+ return rc;
+ }
+ file->private_data = fud;
+--
+2.20.1
+
--- /dev/null
+From b15af6d56bb8db697d42581d8c53b40825f97a2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Jul 2019 10:00:40 +0200
+Subject: ima: always return negative code for error
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit f5e1040196dbfe14c77ce3dfe3b7b08d2d961e88 ]
+
+integrity_kernel_read() returns the number of bytes read. If this is
+a short read then this positive value is returned from
+ima_calc_file_hash_atfm(). Currently this is only indirectly called from
+ima_calc_file_hash() and this function only tests for the return value
+being zero or nonzero and also doesn't forward the return value.
+Nevertheless there's no point in returning a positive value as an error,
+so translate a short read into -EINVAL.
+
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/integrity/ima/ima_crypto.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index 20e66291ca99a..5155c343406e0 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -298,8 +298,11 @@ static int ima_calc_file_hash_atfm(struct file *file,
+ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+ rc = integrity_kernel_read(file, offset, rbuf[active],
+ rbuf_len);
+- if (rc != rbuf_len)
++ if (rc != rbuf_len) {
++ if (rc >= 0)
++ rc = -EINVAL;
+ goto out3;
++ }
+
+ if (rbuf[1] && offset) {
+ /* Using two buffers, and it is not the first
+--
+2.20.1
+
--- /dev/null
+From 24d65eb995fcdeb9a59fdfe16d1448545282bd5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2019 16:45:59 -0700
+Subject: kernel/elfcore.c: include proper prototypes
+
+From: Valdis Kletnieks <valdis.kletnieks@vt.edu>
+
+[ Upstream commit 0f74914071ab7e7b78731ed62bf350e3a344e0a5 ]
+
+When building with W=1, gcc properly complains that there's no prototypes:
+
+ CC kernel/elfcore.o
+kernel/elfcore.c:7:17: warning: no previous prototype for 'elf_core_extra_phdrs' [-Wmissing-prototypes]
+ 7 | Elf_Half __weak elf_core_extra_phdrs(void)
+ | ^~~~~~~~~~~~~~~~~~~~
+kernel/elfcore.c:12:12: warning: no previous prototype for 'elf_core_write_extra_phdrs' [-Wmissing-prototypes]
+ 12 | int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~
+kernel/elfcore.c:17:12: warning: no previous prototype for 'elf_core_write_extra_data' [-Wmissing-prototypes]
+ 17 | int __weak elf_core_write_extra_data(struct coredump_params *cprm)
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~
+kernel/elfcore.c:22:15: warning: no previous prototype for 'elf_core_extra_data_size' [-Wmissing-prototypes]
+ 22 | size_t __weak elf_core_extra_data_size(void)
+ | ^~~~~~~~~~~~~~~~~~~~~~~~
+
+Provide the include file so gcc is happy, and we don't have potential code drift
+
+Link: http://lkml.kernel.org/r/29875.1565224705@turing-police
+Signed-off-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/elfcore.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/kernel/elfcore.c b/kernel/elfcore.c
+index e556751d15d94..a2b29b9bdfcb2 100644
+--- a/kernel/elfcore.c
++++ b/kernel/elfcore.c
+@@ -2,6 +2,7 @@
+ #include <linux/fs.h>
+ #include <linux/mm.h>
+ #include <linux/binfmts.h>
++#include <linux/elfcore.h>
+
+ Elf_Half __weak elf_core_extra_phdrs(void)
+ {
+--
+2.20.1
+
--- /dev/null
+From 101c8bf5706229c6e1fb07fde261d8c32da32dd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2019 15:17:38 +0530
+Subject: perf stat: Fix a segmentation fault when using repeat forever
+
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+
+[ Upstream commit 443f2d5ba13d65ccfd879460f77941875159d154 ]
+
+Observe a segmentation fault when 'perf stat' is asked to repeat forever
+with the interval option.
+
+Without fix:
+
+ # perf stat -r 0 -I 5000 -e cycles -a sleep 10
+ # time counts unit events
+ 5.000211692 3,13,89,82,34,157 cycles
+ 10.000380119 1,53,98,52,22,294 cycles
+ 10.040467280 17,16,79,265 cycles
+ Segmentation fault
+
+This problem was only observed when we use forever option aka -r 0 and
+works with limited repeats. Calling print_counter with ts being set to
+NULL, is not a correct option when interval is set. Hence avoid
+print_counter(NULL,..) if interval is set.
+
+With fix:
+
+ # perf stat -r 0 -I 5000 -e cycles -a sleep 10
+ # time counts unit events
+ 5.019866622 3,15,14,43,08,697 cycles
+ 10.039865756 3,15,16,31,95,261 cycles
+ 10.059950628 1,26,05,47,158 cycles
+ 5.009902655 3,14,52,62,33,932 cycles
+ 10.019880228 3,14,52,22,89,154 cycles
+ 10.030543876 66,90,18,333 cycles
+ 5.009848281 3,14,51,98,25,437 cycles
+ 10.029854402 3,15,14,93,04,918 cycles
+ 5.009834177 3,14,51,95,92,316 cycles
+
+Committer notes:
+
+Did the 'git bisect' to find the cset introducing the problem to add the
+Fixes tag below, and at that time the problem reproduced as:
+
+ (gdb) run stat -r0 -I500 sleep 1
+ <SNIP>
+ Program received signal SIGSEGV, Segmentation fault.
+ print_interval (prefix=prefix@entry=0x7fffffffc8d0 "", ts=ts@entry=0x0) at builtin-stat.c:866
+ 866 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
+ (gdb) bt
+ #0 print_interval (prefix=prefix@entry=0x7fffffffc8d0 "", ts=ts@entry=0x0) at builtin-stat.c:866
+ #1 0x000000000041860a in print_counters (ts=ts@entry=0x0, argc=argc@entry=2, argv=argv@entry=0x7fffffffd640) at builtin-stat.c:938
+ #2 0x0000000000419a7f in cmd_stat (argc=2, argv=0x7fffffffd640, prefix=<optimized out>) at builtin-stat.c:1411
+ #3 0x000000000045c65a in run_builtin (p=p@entry=0x6291b8 <commands+216>, argc=argc@entry=5, argv=argv@entry=0x7fffffffd640) at perf.c:370
+ #4 0x000000000045c893 in handle_internal_command (argc=5, argv=0x7fffffffd640) at perf.c:429
+ #5 0x000000000045c8f1 in run_argv (argcp=argcp@entry=0x7fffffffd4ac, argv=argv@entry=0x7fffffffd4a0) at perf.c:473
+ #6 0x000000000045cac9 in main (argc=<optimized out>, argv=<optimized out>) at perf.c:588
+ (gdb)
+
+Mostly the same as just before this patch:
+
+ Program received signal SIGSEGV, Segmentation fault.
+ 0x00000000005874a7 in print_interval (config=0xa1f2a0 <stat_config>, evlist=0xbc9b90, prefix=0x7fffffffd1c0 "`", ts=0x0) at util/stat-display.c:964
+ 964 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, config->csv_sep);
+ (gdb) bt
+ #0 0x00000000005874a7 in print_interval (config=0xa1f2a0 <stat_config>, evlist=0xbc9b90, prefix=0x7fffffffd1c0 "`", ts=0x0) at util/stat-display.c:964
+ #1 0x0000000000588047 in perf_evlist__print_counters (evlist=0xbc9b90, config=0xa1f2a0 <stat_config>, _target=0xa1f0c0 <target>, ts=0x0, argc=2, argv=0x7fffffffd670)
+ at util/stat-display.c:1172
+ #2 0x000000000045390f in print_counters (ts=0x0, argc=2, argv=0x7fffffffd670) at builtin-stat.c:656
+ #3 0x0000000000456bb5 in cmd_stat (argc=2, argv=0x7fffffffd670) at builtin-stat.c:1960
+ #4 0x00000000004dd2e0 in run_builtin (p=0xa30e00 <commands+288>, argc=5, argv=0x7fffffffd670) at perf.c:310
+ #5 0x00000000004dd54d in handle_internal_command (argc=5, argv=0x7fffffffd670) at perf.c:362
+ #6 0x00000000004dd694 in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:406
+ #7 0x00000000004dda11 in main (argc=5, argv=0x7fffffffd670) at perf.c:531
+ (gdb)
+
+Fixes: d4f63a4741a8 ("perf stat: Introduce print_counters function")
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org # v4.2+
+Link: http://lore.kernel.org/lkml/20190904094738.9558-3-srikar@linux.vnet.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 43d5f35e90747..e55dbceadad6c 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -2565,7 +2565,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
+ run_idx + 1);
+
+ status = run_perf_stat(argc, argv);
+- if (forever && status != -1) {
++ if (forever && status != -1 && !interval) {
+ print_counters(NULL, argc, argv);
+ perf_stat__reset_stats();
+ }
+--
+2.20.1
+
--- /dev/null
+From 70f228064c32b9ee642c2ff9cc03cf3a587fa876 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2019 15:17:37 +0530
+Subject: perf stat: Reset previous counts on repeat with interval
+
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+
+[ Upstream commit b63fd11cced17fcb8e133def29001b0f6aaa5e06 ]
+
+When using 'perf stat' with repeat and interval option, it shows wrong
+values for events.
+
+The wrong values will be shown for the first interval on the second and
+subsequent repetitions.
+
+Without the fix:
+
+ # perf stat -r 3 -I 2000 -e faults -e sched:sched_switch -a sleep 5
+
+ 2.000282489 53 faults
+ 2.000282489 513 sched:sched_switch
+ 4.005478208 3,721 faults
+ 4.005478208 2,666 sched:sched_switch
+ 5.025470933 395 faults
+ 5.025470933 1,307 sched:sched_switch
+ 2.009602825 1,84,46,74,40,73,70,95,47,520 faults <------
+ 2.009602825 1,84,46,74,40,73,70,95,49,568 sched:sched_switch <------
+ 4.019612206 4,730 faults
+ 4.019612206 2,746 sched:sched_switch
+ 5.039615484 3,953 faults
+ 5.039615484 1,496 sched:sched_switch
+ 2.000274620 1,84,46,74,40,73,70,95,47,520 faults <------
+ 2.000274620 1,84,46,74,40,73,70,95,47,520 sched:sched_switch <------
+ 4.000480342 4,282 faults
+ 4.000480342 2,303 sched:sched_switch
+ 5.000916811 1,322 faults
+ 5.000916811 1,064 sched:sched_switch
+ #
+
+prev_raw_counts is allocated when using intervals. This is used when
+calculating the difference in the counts of events when using interval.
+
+The current counts are stored in prev_raw_counts to calculate the
+differences in the next iteration.
+
+On the first interval of the second and subsequent repetitions,
+prev_raw_counts would be the values stored in the last interval of the
+previous repetitions, while the current counts will only be for the
+first interval of the current repetition.
+
+Hence there is a possibility of events showing up as big number.
+
+Fix this by resetting prev_raw_counts whenever perf stat repeats the
+command.
+
+With the fix:
+
+ # perf stat -r 3 -I 2000 -e faults -e sched:sched_switch -a sleep 5
+
+ 2.019349347 2,597 faults
+ 2.019349347 2,753 sched:sched_switch
+ 4.019577372 3,098 faults
+ 4.019577372 2,532 sched:sched_switch
+ 5.019415481 1,879 faults
+ 5.019415481 1,356 sched:sched_switch
+ 2.000178813 8,468 faults
+ 2.000178813 2,254 sched:sched_switch
+ 4.000404621 7,440 faults
+ 4.000404621 1,266 sched:sched_switch
+ 5.040196079 2,458 faults
+ 5.040196079 556 sched:sched_switch
+ 2.000191939 6,870 faults
+ 2.000191939 1,170 sched:sched_switch
+ 4.000414103 541 faults
+ 4.000414103 902 sched:sched_switch
+ 5.000809863 450 faults
+ 5.000809863 364 sched:sched_switch
+ #
+
+Committer notes:
+
+This was broken since the cset introducing the --interval feature, i.e.
+--repeat + --interval wasn't tested at that point, add the Fixes tag so
+that automatic scripts can pick this up.
+
+Fixes: 13370a9b5bb8 ("perf stat: Add interval printing")
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Tested-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: stable@vger.kernel.org # v3.9+
+Link: http://lore.kernel.org/lkml/20190904094738.9558-2-srikar@linux.vnet.ibm.com
+[ Fixed up conflicts with libperf, i.e. some perf_{evsel,evlist} lost the 'perf' prefix ]
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 3 +++
+ tools/perf/util/stat.c | 17 +++++++++++++++++
+ tools/perf/util/stat.h | 1 +
+ 3 files changed, 21 insertions(+)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index e55dbceadad6c..5cb58f3afa355 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -2564,6 +2564,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
+ fprintf(output, "[ perf stat: executing run #%d ... ]\n",
+ run_idx + 1);
+
++ if (run_idx != 0)
++ perf_evlist__reset_prev_raw_counts(evsel_list);
++
+ status = run_perf_stat(argc, argv);
+ if (forever && status != -1 && !interval) {
+ print_counters(NULL, argc, argv);
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index 39345c2ddfc22..d4f872f1750e6 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -145,6 +145,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
+ evsel->prev_raw_counts = NULL;
+ }
+
++static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
++{
++ if (evsel->prev_raw_counts) {
++ evsel->prev_raw_counts->aggr.val = 0;
++ evsel->prev_raw_counts->aggr.ena = 0;
++ evsel->prev_raw_counts->aggr.run = 0;
++ }
++}
++
+ static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
+ {
+ int ncpus = perf_evsel__nr_cpus(evsel);
+@@ -195,6 +204,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
+ }
+ }
+
++void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
++{
++ struct perf_evsel *evsel;
++
++ evlist__for_each_entry(evlist, evsel)
++ perf_evsel__reset_prev_raw_counts(evsel);
++}
++
+ static void zero_per_pkg(struct perf_evsel *counter)
+ {
+ if (counter->per_pkg_mask)
+diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
+index c29bb94c48a4b..b8845aceac31a 100644
+--- a/tools/perf/util/stat.h
++++ b/tools/perf/util/stat.h
+@@ -94,6 +94,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
+ int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
+ void perf_evlist__free_stats(struct perf_evlist *evlist);
+ void perf_evlist__reset_stats(struct perf_evlist *evlist);
++void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
+
+ int perf_stat_process_counter(struct perf_stat_config *config,
+ struct perf_evsel *counter);
+--
+2.20.1
+
--- /dev/null
+From c1fdb5a253088cc79ee9911f324df30143d2e20c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2019 12:52:35 +0200
+Subject: perf tools: Fix segfault in cpu_cache_level__read()
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit 0216234c2eed1367a318daeb9f4a97d8217412a0 ]
+
+We release wrong pointer on error path in cpu_cache_level__read
+function, leading to segfault:
+
+ (gdb) r record ls
+ Starting program: /root/perf/tools/perf/perf record ls
+ ...
+ [ perf record: Woken up 1 times to write data ]
+ double free or corruption (out)
+
+ Thread 1 "perf" received signal SIGABRT, Aborted.
+ 0x00007ffff7463798 in raise () from /lib64/power9/libc.so.6
+ (gdb) bt
+ #0 0x00007ffff7463798 in raise () from /lib64/power9/libc.so.6
+ #1 0x00007ffff7443bac in abort () from /lib64/power9/libc.so.6
+ #2 0x00007ffff74af8bc in __libc_message () from /lib64/power9/libc.so.6
+ #3 0x00007ffff74b92b8 in malloc_printerr () from /lib64/power9/libc.so.6
+ #4 0x00007ffff74bb874 in _int_free () from /lib64/power9/libc.so.6
+ #5 0x0000000010271260 in __zfree (ptr=0x7fffffffa0b0) at ../../lib/zalloc..
+ #6 0x0000000010139340 in cpu_cache_level__read (cache=0x7fffffffa090, cac..
+ #7 0x0000000010143c90 in build_caches (cntp=0x7fffffffa118, size=<optimiz..
+ ...
+
+Releasing the proper pointer.
+
+Fixes: 720e98b5faf1 ("perf tools: Add perf data cache feature")
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org: # v4.6+
+Link: http://lore.kernel.org/lkml/20190912105235.10689-1-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/header.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 258b19b251a82..b3d947b98a7c2 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -949,7 +949,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
+
+ scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
+ if (sysfs__read_str(file, &cache->map, &len)) {
+- free(cache->map);
++ free(cache->size);
+ free(cache->type);
+ return -1;
+ }
+--
+2.20.1
+
--- /dev/null
+From dfea0daf2da837f3be5d9b2e81d245fd03e40066 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Sep 2019 06:53:28 +0000
+Subject: sched/core: Fix migration to invalid CPU in __set_cpus_allowed_ptr()
+
+From: KeMeng Shi <shikemeng@huawei.com>
+
+[ Upstream commit 714e501e16cd473538b609b3e351b2cc9f7f09ed ]
+
+An oops can be triggered in the scheduler when running qemu on arm64:
+
+ Unable to handle kernel paging request at virtual address ffff000008effe40
+ Internal error: Oops: 96000007 [#1] SMP
+ Process migration/0 (pid: 12, stack limit = 0x00000000084e3736)
+ pstate: 20000085 (nzCv daIf -PAN -UAO)
+ pc : __ll_sc___cmpxchg_case_acq_4+0x4/0x20
+ lr : move_queued_task.isra.21+0x124/0x298
+ ...
+ Call trace:
+ __ll_sc___cmpxchg_case_acq_4+0x4/0x20
+ __migrate_task+0xc8/0xe0
+ migration_cpu_stop+0x170/0x180
+ cpu_stopper_thread+0xec/0x178
+ smpboot_thread_fn+0x1ac/0x1e8
+ kthread+0x134/0x138
+ ret_from_fork+0x10/0x18
+
+__set_cpus_allowed_ptr() will choose an active dest_cpu in affinity mask to
+migrage the process if process is not currently running on any one of the
+CPUs specified in affinity mask. __set_cpus_allowed_ptr() will choose an
+invalid dest_cpu (dest_cpu >= nr_cpu_ids, 1024 in my virtual machine) if
+CPUS in an affinity mask are deactived by cpu_down after cpumask_intersects
+check. cpumask_test_cpu() of dest_cpu afterwards is overflown and may pass if
+corresponding bit is coincidentally set. As a consequence, kernel will
+access an invalid rq address associate with the invalid CPU in
+migration_cpu_stop->__migrate_task->move_queued_task and the Oops occurs.
+
+The reproduce the crash:
+
+ 1) A process repeatedly binds itself to cpu0 and cpu1 in turn by calling
+ sched_setaffinity.
+
+ 2) A shell script repeatedly does "echo 0 > /sys/devices/system/cpu/cpu1/online"
+ and "echo 1 > /sys/devices/system/cpu/cpu1/online" in turn.
+
+ 3) Oops appears if the invalid CPU is set in memory after tested cpumask.
+
+Signed-off-by: KeMeng Shi <shikemeng@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/1568616808-16808-1-git-send-email-shikemeng@huawei.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 63be0bcfa286d..82cec9a666e7b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1162,7 +1162,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ if (cpumask_equal(&p->cpus_allowed, new_mask))
+ goto out;
+
+- if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++ if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1183,7 +1184,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ if (cpumask_test_cpu(task_cpu(p), new_mask))
+ goto out;
+
+- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+ /* Need help from migration thread: drop lock and wait. */
+--
+2.20.1
+
mmc-sdhci-of-esdhc-set-dma-snooping-based-on-dma-coherence.patch
ieee802154-atusb-fix-use-after-free-at-disconnect.patch
cfg80211-initialize-on-stack-chandefs.patch
+ima-always-return-negative-code-for-error.patch
+fs-nfs-fix-possible-null-pointer-dereferences-in-enc.patch
+9p-avoid-attaching-writeback_fid-on-mmap-with-type-p.patch
+xen-pci-reserve-mcfg-areas-earlier.patch
+ceph-fix-directories-inode-i_blkbits-initialization.patch
+ceph-reconnect-connection-if-session-hang-in-opening.patch
+drm-amdgpu-check-for-valid-number-of-registers-to-re.patch
+thermal-fix-use-after-free-when-unregistering-therma.patch
+fuse-fix-memleak-in-cuse_channel_open.patch
+sched-core-fix-migration-to-invalid-cpu-in-__set_cpu.patch
+kernel-elfcore.c-include-proper-prototypes.patch
+tools-lib-traceevent-do-not-free-tep-cmdlines-in-add.patch
+perf-tools-fix-segfault-in-cpu_cache_level__read.patch
+perf-stat-fix-a-segmentation-fault-when-using-repeat.patch
+perf-stat-reset-previous-counts-on-repeat-with-inter.patch
+drm-i915-userptr-acquire-the-page-lock-around-set_pa.patch
--- /dev/null
+From 189a6663d53b87b1edb90774d2e222d56704e4b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2019 13:14:52 +0300
+Subject: thermal: Fix use-after-free when unregistering thermal zone device
+
+From: Ido Schimmel <idosch@mellanox.com>
+
+[ Upstream commit 1851799e1d2978f68eea5d9dff322e121dcf59c1 ]
+
+thermal_zone_device_unregister() cancels the delayed work that polls the
+thermal zone, but it does not wait for it to finish. This is racy with
+respect to the freeing of the thermal zone device, which can result in a
+use-after-free [1].
+
+Fix this by waiting for the delayed work to finish before freeing the
+thermal zone device. Note that thermal_zone_device_set_polling() is
+never invoked from an atomic context, so it is safe to call
+cancel_delayed_work_sync() that can block.
+
+[1]
+[ +0.002221] ==================================================================
+[ +0.000064] BUG: KASAN: use-after-free in __mutex_lock+0x1076/0x11c0
+[ +0.000016] Read of size 8 at addr ffff8881e48e0450 by task kworker/1:0/17
+
+[ +0.000023] CPU: 1 PID: 17 Comm: kworker/1:0 Not tainted 5.2.0-rc6-custom-02495-g8e73ca3be4af #1701
+[ +0.000010] Hardware name: Mellanox Technologies Ltd. MSN2100-CB2FO/SA001017, BIOS 5.6.5 06/07/2016
+[ +0.000016] Workqueue: events_freezable_power_ thermal_zone_device_check
+[ +0.000012] Call Trace:
+[ +0.000021] dump_stack+0xa9/0x10e
+[ +0.000020] print_address_description.cold.2+0x9/0x25e
+[ +0.000018] __kasan_report.cold.3+0x78/0x9d
+[ +0.000016] kasan_report+0xe/0x20
+[ +0.000016] __mutex_lock+0x1076/0x11c0
+[ +0.000014] step_wise_throttle+0x72/0x150
+[ +0.000018] handle_thermal_trip+0x167/0x760
+[ +0.000019] thermal_zone_device_update+0x19e/0x5f0
+[ +0.000019] process_one_work+0x969/0x16f0
+[ +0.000017] worker_thread+0x91/0xc40
+[ +0.000014] kthread+0x33d/0x400
+[ +0.000015] ret_from_fork+0x3a/0x50
+
+[ +0.000020] Allocated by task 1:
+[ +0.000015] save_stack+0x19/0x80
+[ +0.000015] __kasan_kmalloc.constprop.4+0xc1/0xd0
+[ +0.000014] kmem_cache_alloc_trace+0x152/0x320
+[ +0.000015] thermal_zone_device_register+0x1b4/0x13a0
+[ +0.000015] mlxsw_thermal_init+0xc92/0x23d0
+[ +0.000014] __mlxsw_core_bus_device_register+0x659/0x11b0
+[ +0.000013] mlxsw_core_bus_device_register+0x3d/0x90
+[ +0.000013] mlxsw_pci_probe+0x355/0x4b0
+[ +0.000014] local_pci_probe+0xc3/0x150
+[ +0.000013] pci_device_probe+0x280/0x410
+[ +0.000013] really_probe+0x26a/0xbb0
+[ +0.000013] driver_probe_device+0x208/0x2e0
+[ +0.000013] device_driver_attach+0xfe/0x140
+[ +0.000013] __driver_attach+0x110/0x310
+[ +0.000013] bus_for_each_dev+0x14b/0x1d0
+[ +0.000013] driver_register+0x1c0/0x400
+[ +0.000015] mlxsw_sp_module_init+0x5d/0xd3
+[ +0.000014] do_one_initcall+0x239/0x4dd
+[ +0.000013] kernel_init_freeable+0x42b/0x4e8
+[ +0.000012] kernel_init+0x11/0x18b
+[ +0.000013] ret_from_fork+0x3a/0x50
+
+[ +0.000015] Freed by task 581:
+[ +0.000013] save_stack+0x19/0x80
+[ +0.000014] __kasan_slab_free+0x125/0x170
+[ +0.000013] kfree+0xf3/0x310
+[ +0.000013] thermal_release+0xc7/0xf0
+[ +0.000014] device_release+0x77/0x200
+[ +0.000014] kobject_put+0x1a8/0x4c0
+[ +0.000014] device_unregister+0x38/0xc0
+[ +0.000014] thermal_zone_device_unregister+0x54e/0x6a0
+[ +0.000014] mlxsw_thermal_fini+0x184/0x35a
+[ +0.000014] mlxsw_core_bus_device_unregister+0x10a/0x640
+[ +0.000013] mlxsw_devlink_core_bus_device_reload+0x92/0x210
+[ +0.000015] devlink_nl_cmd_reload+0x113/0x1f0
+[ +0.000014] genl_family_rcv_msg+0x700/0xee0
+[ +0.000013] genl_rcv_msg+0xca/0x170
+[ +0.000013] netlink_rcv_skb+0x137/0x3a0
+[ +0.000012] genl_rcv+0x29/0x40
+[ +0.000013] netlink_unicast+0x49b/0x660
+[ +0.000013] netlink_sendmsg+0x755/0xc90
+[ +0.000013] __sys_sendto+0x3de/0x430
+[ +0.000013] __x64_sys_sendto+0xe2/0x1b0
+[ +0.000013] do_syscall_64+0xa4/0x4d0
+[ +0.000013] entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+[ +0.000017] The buggy address belongs to the object at ffff8881e48e0008
+ which belongs to the cache kmalloc-2k of size 2048
+[ +0.000012] The buggy address is located 1096 bytes inside of
+ 2048-byte region [ffff8881e48e0008, ffff8881e48e0808)
+[ +0.000007] The buggy address belongs to the page:
+[ +0.000012] page:ffffea0007923800 refcount:1 mapcount:0 mapping:ffff88823680d0c0 index:0x0 compound_mapcount: 0
+[ +0.000020] flags: 0x200000000010200(slab|head)
+[ +0.000019] raw: 0200000000010200 ffffea0007682008 ffffea00076ab808 ffff88823680d0c0
+[ +0.000016] raw: 0000000000000000 00000000000d000d 00000001ffffffff 0000000000000000
+[ +0.000007] page dumped because: kasan: bad access detected
+
+[ +0.000012] Memory state around the buggy address:
+[ +0.000012] ffff8881e48e0300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ +0.000012] ffff8881e48e0380: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ +0.000012] >ffff8881e48e0400: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ +0.000008] ^
+[ +0.000012] ffff8881e48e0480: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ +0.000012] ffff8881e48e0500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ +0.000007] ==================================================================
+
+Fixes: b1569e99c795 ("ACPI: move thermal trip handling to generic thermal layer")
+Reported-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Ido Schimmel <idosch@mellanox.com>
+Acked-by: Jiri Pirko <jiri@mellanox.com>
+Signed-off-by: Zhang Rui <rui.zhang@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/thermal_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index cd82ae34ddfa3..57603be42c507 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -402,7 +402,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+ mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+ msecs_to_jiffies(delay));
+ else
+- cancel_delayed_work(&tz->poll_queue);
++ cancel_delayed_work_sync(&tz->poll_queue);
+ }
+
+ static void monitor_thermal_zone(struct thermal_zone_device *tz)
+--
+2.20.1
+
--- /dev/null
+From 945e113f829f673b2e2da48663abf66a2ab1f7c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Aug 2019 15:05:28 -0400
+Subject: tools lib traceevent: Do not free tep->cmdlines in add_new_comm() on
+ failure
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit e0d2615856b2046c2e8d5bfd6933f37f69703b0b ]
+
+If the re-allocation of tep->cmdlines succeeds, then the previous
+allocation of tep->cmdlines will be freed. If we later fail in
+add_new_comm(), we must not free cmdlines, and also should assign
+tep->cmdlines to the new allocation. Otherwise when freeing tep, the
+tep->cmdlines will be pointing to garbage.
+
+Fixes: a6d2a61ac653a ("tools lib traceevent: Remove some die() calls")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: linux-trace-devel@vger.kernel.org
+Cc: stable@vger.kernel.org
+Link: http://lkml.kernel.org/r/20190828191819.970121417@goodmis.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/lib/traceevent/event-parse.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index def61125ac36d..62f4cacf253ab 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -267,10 +267,10 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
+ errno = ENOMEM;
+ return -1;
+ }
++ pevent->cmdlines = cmdlines;
+
+ cmdlines[pevent->cmdline_count].comm = strdup(comm);
+ if (!cmdlines[pevent->cmdline_count].comm) {
+- free(cmdlines);
+ errno = ENOMEM;
+ return -1;
+ }
+@@ -281,7 +281,6 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
+ pevent->cmdline_count++;
+
+ qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+- pevent->cmdlines = cmdlines;
+
+ return 0;
+ }
+--
+2.20.1
+
--- /dev/null
+From cd32e40d6ce57af8be8a81644c58f8d903379493 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2019 19:31:51 +0100
+Subject: xen/pci: reserve MCFG areas earlier
+
+From: Igor Druzhinin <igor.druzhinin@citrix.com>
+
+[ Upstream commit a4098bc6eed5e31e0391bcc068e61804c98138df ]
+
+If MCFG area is not reserved in E820, Xen by default will defer its usage
+until Dom0 registers it explicitly after ACPI parser recognizes it as
+a reserved resource in DSDT. Having it reserved in E820 is not
+mandatory according to "PCI Firmware Specification, rev 3.2" (par. 4.1.2)
+and firmware is free to keep a hole in E820 in that place. Xen doesn't know
+what exactly is inside this hole since it lacks full ACPI view of the
+platform therefore it's potentially harmful to access MCFG region
+without additional checks as some machines are known to provide
+inconsistent information on the size of the region.
+
+Now xen_mcfg_late() runs after acpi_init() which is too late as some basic
+PCI enumeration starts exactly there as well. Trying to register a device
+prior to MCFG reservation causes multiple problems with PCIe extended
+capability initializations in Xen (e.g. SR-IOV VF BAR sizing). There are
+no convenient hooks for us to subscribe to so register MCFG areas earlier
+upon the first invocation of xen_add_device(). It should be safe to do once
+since all the boot time buses must have their MCFG areas in MCFG table
+already and we don't support PCI bus hot-plug.
+
+Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/pci.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
+index 7494dbeb4409c..db58aaa4dc598 100644
+--- a/drivers/xen/pci.c
++++ b/drivers/xen/pci.c
+@@ -29,6 +29,8 @@
+ #include "../pci/pci.h"
+ #ifdef CONFIG_PCI_MMCONFIG
+ #include <asm/pci_x86.h>
++
++static int xen_mcfg_late(void);
+ #endif
+
+ static bool __read_mostly pci_seg_supported = true;
+@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
+ #ifdef CONFIG_PCI_IOV
+ struct pci_dev *physfn = pci_dev->physfn;
+ #endif
+-
++#ifdef CONFIG_PCI_MMCONFIG
++ static bool pci_mcfg_reserved = false;
++ /*
++ * Reserve MCFG areas in Xen on first invocation due to this being
++ * potentially called from inside of acpi_init immediately after
++ * MCFG table has been finally parsed.
++ */
++ if (!pci_mcfg_reserved) {
++ xen_mcfg_late();
++ pci_mcfg_reserved = true;
++ }
++#endif
+ if (pci_seg_supported) {
+ struct {
+ struct physdev_pci_device_add add;
+@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
+ arch_initcall(register_xen_pci_notifier);
+
+ #ifdef CONFIG_PCI_MMCONFIG
+-static int __init xen_mcfg_late(void)
++static int xen_mcfg_late(void)
+ {
+ struct pci_mmcfg_region *cfg;
+ int rc;
+@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
+ }
+ return 0;
+ }
+-/*
+- * Needs to be done after acpi_init which are subsys_initcall.
+- */
+-subsys_initcall_sync(xen_mcfg_late);
+ #endif
+--
+2.20.1
+