--- /dev/null
+From 59463eb88829f646aed13283fd84d02a475334fe Mon Sep 17 00:00:00 2001
+From: Aurelien Aptel <aaptel@suse.com>
+Date: Thu, 3 Dec 2020 19:46:08 +0100
+Subject: cifs: add NULL check for ses->tcon_ipc
+
+From: Aurelien Aptel <aaptel@suse.com>
+
+commit 59463eb88829f646aed13283fd84d02a475334fe upstream.
+
+In some scenarios (DFS and BAD_NETWORK_NAME) set_root_set() can be
+called with a NULL ses->tcon_ipc.
+
+Signed-off-by: Aurelien Aptel <aaptel@suse.com>
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4768,7 +4768,8 @@ static void set_root_ses(struct cifs_sb_
+ if (ses) {
+ spin_lock(&cifs_tcp_ses_lock);
+ ses->ses_count++;
+- ses->tcon_ipc->remap = cifs_remap(cifs_sb);
++ if (ses->tcon_ipc)
++ ses->tcon_ipc->remap = cifs_remap(cifs_sb);
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
+ *root_ses = ses;
--- /dev/null
+From 6988a619f5b79e4efadea6e19dcfe75fbcd350b5 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@cjr.nz>
+Date: Sat, 28 Nov 2020 15:57:06 -0300
+Subject: cifs: allow syscalls to be restarted in __smb_send_rqst()
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+commit 6988a619f5b79e4efadea6e19dcfe75fbcd350b5 upstream.
+
+A customer has reported that several files in their multi-threaded app
+were left with size of 0 because most of the read(2) calls returned
+-EINTR and they assumed no bytes were read. Obviously, they could
+have fixed it by simply retrying on -EINTR.
+
+We noticed that most of the -EINTR on read(2) were due to real-time
+signals sent by glibc to process wide credential changes (SIGRT_1),
+and its signal handler had been established with SA_RESTART, in which
+case those calls could have been automatically restarted by the
+kernel.
+
+Let the kernel decide to whether or not restart the syscalls when
+there is a signal pending in __smb_send_rqst() by returning
+-ERESTARTSYS. If it can't, it will return -EINTR anyway.
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+CC: Stable <stable@vger.kernel.org>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/transport.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -339,8 +339,8 @@ __smb_send_rqst(struct TCP_Server_Info *
+ return -EAGAIN;
+
+ if (signal_pending(current)) {
+- cifs_dbg(FYI, "signal is pending before sending any data\n");
+- return -EINTR;
++ cifs_dbg(FYI, "signal pending before send request\n");
++ return -ERESTARTSYS;
+ }
+
+ /* cork the socket */
--- /dev/null
+From 212253367dc7b49ed3fc194ce71b0992eacaecf2 Mon Sep 17 00:00:00 2001
+From: Paulo Alcantara <pc@cjr.nz>
+Date: Sat, 28 Nov 2020 16:54:02 -0300
+Subject: cifs: fix potential use-after-free in cifs_echo_request()
+
+From: Paulo Alcantara <pc@cjr.nz>
+
+commit 212253367dc7b49ed3fc194ce71b0992eacaecf2 upstream.
+
+This patch fixes a potential use-after-free bug in
+cifs_echo_request().
+
+For instance,
+
+ thread 1
+ --------
+ cifs_demultiplex_thread()
+ clean_demultiplex_info()
+ kfree(server)
+
+ thread 2 (workqueue)
+ --------
+ apic_timer_interrupt()
+ smp_apic_timer_interrupt()
+ irq_exit()
+ __do_softirq()
+ run_timer_softirq()
+ call_timer_fn()
+ cifs_echo_request() <- use-after-free in server ptr
+
+Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+CC: Stable <stable@vger.kernel.org>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -935,6 +935,8 @@ static void clean_demultiplex_info(struc
+ list_del_init(&server->tcp_ses_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
++ cancel_delayed_work_sync(&server->echo);
++
+ spin_lock(&GlobalMid_Lock);
+ server->tcpStatus = CifsExiting;
+ spin_unlock(&GlobalMid_Lock);
--- /dev/null
+From ea64370bcae126a88cd26a16f1abcc23ab2b9a55 Mon Sep 17 00:00:00 2001
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+Date: Mon, 30 Nov 2020 11:29:20 +1000
+Subject: cifs: refactor create_sd_buf() and and avoid corrupting the buffer
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+commit ea64370bcae126a88cd26a16f1abcc23ab2b9a55 upstream.
+
+When mounting with "idsfromsid" mount option, Azure
+corrupted the owner SIDs due to excessive padding
+caused by placing the owner fields at the end of the
+security descriptor on create. Placing owners at the
+front of the security descriptor (rather than the end)
+is also safer, as the number of ACEs (that follow it)
+are variable.
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Suggested-by: Rohith Surabattula <rohiths@microsoft.com>
+CC: Stable <stable@vger.kernel.org> # v5.8
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 69 ++++++++++++++++++++++++++++--------------------------
+ fs/cifs/smb2pdu.h | 2 -
+ 2 files changed, 37 insertions(+), 34 deletions(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2237,17 +2237,15 @@ static struct crt_sd_ctxt *
+ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ {
+ struct crt_sd_ctxt *buf;
+- struct cifs_ace *pace;
+- unsigned int sdlen, acelen;
++ __u8 *ptr, *aclptr;
++ unsigned int acelen, acl_size, ace_count;
+ unsigned int owner_offset = 0;
+ unsigned int group_offset = 0;
++ struct smb3_acl acl;
+
+- *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 2), 8);
++ *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
+
+ if (set_owner) {
+- /* offset fields are from beginning of security descriptor not of create context */
+- owner_offset = sizeof(struct smb3_acl) + (sizeof(struct cifs_ace) * 2);
+-
+ /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
+ *len += sizeof(struct owner_group_sids);
+ }
+@@ -2256,26 +2254,22 @@ create_sd_buf(umode_t mode, bool set_own
+ if (buf == NULL)
+ return buf;
+
++ ptr = (__u8 *)&buf[1];
+ if (set_owner) {
++ /* offset fields are from beginning of security descriptor not of create context */
++ owner_offset = ptr - (__u8 *)&buf->sd;
+ buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
+- group_offset = owner_offset + sizeof(struct owner_sid);
++ group_offset = owner_offset + offsetof(struct owner_group_sids, group);
+ buf->sd.OffsetGroup = cpu_to_le32(group_offset);
++
++ setup_owner_group_sids(ptr);
++ ptr += sizeof(struct owner_group_sids);
+ } else {
+ buf->sd.OffsetOwner = 0;
+ buf->sd.OffsetGroup = 0;
+ }
+
+- sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) +
+- 2 * sizeof(struct cifs_ace);
+- if (set_owner) {
+- sdlen += sizeof(struct owner_group_sids);
+- setup_owner_group_sids(owner_offset + sizeof(struct create_context) + 8 /* name */
+- + (char *)buf);
+- }
+-
+- buf->ccontext.DataOffset = cpu_to_le16(offsetof
+- (struct crt_sd_ctxt, sd));
+- buf->ccontext.DataLength = cpu_to_le32(sdlen);
++ buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
+ buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
+ buf->ccontext.NameLength = cpu_to_le16(4);
+ /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
+@@ -2284,6 +2278,7 @@ create_sd_buf(umode_t mode, bool set_own
+ buf->Name[2] = 'c';
+ buf->Name[3] = 'D';
+ buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
++
+ /*
+ * ACL is "self relative" ie ACL is stored in contiguous block of memory
+ * and "DP" ie the DACL is present
+@@ -2291,28 +2286,38 @@ create_sd_buf(umode_t mode, bool set_own
+ buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
+
+ /* offset owner, group and Sbz1 and SACL are all zero */
+- buf->sd.OffsetDacl = cpu_to_le32(sizeof(struct smb3_sd));
+- buf->acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
++ buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++ /* Ship the ACL for now. we will copy it into buf later. */
++ aclptr = ptr;
++ ptr += sizeof(struct cifs_acl);
+
+ /* create one ACE to hold the mode embedded in reserved special SID */
+- pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf);
+- acelen = setup_special_mode_ACE(pace, (__u64)mode);
++ acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
++ ptr += acelen;
++ acl_size = acelen + sizeof(struct smb3_acl);
++ ace_count = 1;
+
+ if (set_owner) {
+ /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
+- pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + (char *)buf));
+- acelen += setup_special_user_owner_ACE(pace);
+- /* it does not appear necessary to add an ACE for the NFS group SID */
+- buf->acl.AceCount = cpu_to_le16(3);
+- } else
+- buf->acl.AceCount = cpu_to_le16(2);
++ acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
++ ptr += acelen;
++ acl_size += acelen;
++ ace_count += 1;
++ }
+
+ /* and one more ACE to allow access for authenticated users */
+- pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) +
+- (char *)buf));
+- acelen += setup_authusers_ACE(pace);
++ acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
++ ptr += acelen;
++ acl_size += acelen;
++ ace_count += 1;
++
++ acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
++ acl.AclSize = cpu_to_le16(acl_size);
++ acl.AceCount = cpu_to_le16(ace_count);
++ memcpy(aclptr, &acl, sizeof(struct cifs_acl));
+
+- buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen);
++ buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++ *len = ptr - (__u8 *)buf;
+
+ return buf;
+ }
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -900,8 +900,6 @@ struct crt_sd_ctxt {
+ struct create_context ccontext;
+ __u8 Name[8];
+ struct smb3_sd sd;
+- struct smb3_acl acl;
+- /* Followed by at least 4 ACEs */
+ } __packed;
+
+
--- /dev/null
+From 2bf509d96d84c3336d08375e8af34d1b85ee71c8 Mon Sep 17 00:00:00 2001
+From: Menglong Dong <dong.menglong@zte.com.cn>
+Date: Sat, 5 Dec 2020 22:14:42 -0800
+Subject: coredump: fix core_pattern parse error
+
+From: Menglong Dong <dong.menglong@zte.com.cn>
+
+commit 2bf509d96d84c3336d08375e8af34d1b85ee71c8 upstream.
+
+'format_corename()' will splite 'core_pattern' on spaces when it is in
+pipe mode, and take helper_argv[0] as the path to usermode executable.
+It works fine in most cases.
+
+However, if there is a space between '|' and '/file/path', such as
+'| /usr/lib/systemd/systemd-coredump %P %u %g', then helper_argv[0] will
+be parsed as '', and users will get a 'Core dump to | disabled'.
+
+It is not friendly to users, as the pattern above was valid previously.
+Fix this by ignoring the spaces between '|' and '/file/path'.
+
+Fixes: 315c69261dd3 ("coredump: split pipe command whitespace before expanding template")
+Signed-off-by: Menglong Dong <dong.menglong@zte.com.cn>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Paul Wise <pabs3@bonedaddy.net>
+Cc: Jakub Wilk <jwilk@jwilk.net> [https://bugs.debian.org/924398]
+Cc: Neil Horman <nhorman@tuxdriver.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/5fb62870.1c69fb81.8ef5d.af76@mx.google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/coredump.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -229,7 +229,8 @@ static int format_corename(struct core_n
+ */
+ if (ispipe) {
+ if (isspace(*pat_ptr)) {
+- was_space = true;
++ if (cn->used != 0)
++ was_space = true;
+ pat_ptr++;
+ continue;
+ } else if (was_space) {
--- /dev/null
+From 89478335718c98557f10470a9bc5c555b9261c4e Mon Sep 17 00:00:00 2001
+From: Sergei Shtepa <sergei.shtepa@veeam.com>
+Date: Wed, 11 Nov 2020 15:55:46 +0300
+Subject: dm: fix bug with RCU locking in dm_blk_report_zones
+
+From: Sergei Shtepa <sergei.shtepa@veeam.com>
+
+commit 89478335718c98557f10470a9bc5c555b9261c4e upstream.
+
+The dm_get_live_table() function makes RCU read lock so
+dm_put_live_table() must be called even if dm_table map is not found.
+
+Fixes: e76239a3748c9 ("block: add a report_zones method")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sergei Shtepa <sergei.shtepa@veeam.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -491,8 +491,10 @@ static int dm_blk_report_zones(struct ge
+ return -EAGAIN;
+
+ map = dm_get_live_table(md, &srcu_idx);
+- if (!map)
+- return -EIO;
++ if (!map) {
++ ret = -EIO;
++ goto out;
++ }
+
+ do {
+ struct dm_target *tgt;
--- /dev/null
+From f05c4403db5bba881d4964e731f6da35be46aabd Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Fri, 4 Dec 2020 15:19:27 -0500
+Subject: dm: fix double RCU unlock in dm_dax_zero_page_range() error path
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit f05c4403db5bba881d4964e731f6da35be46aabd upstream.
+
+Remove redundant dm_put_live_table() in dm_dax_zero_page_range() error
+path to fix sparse warning:
+drivers/md/dm.c:1208:9: warning: context imbalance in 'dm_dax_zero_page_range' - unexpected unlock
+
+Fixes: cdf6cdcd3b99a ("dm,dax: Add dax zero_page_range operation")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1219,11 +1219,9 @@ static int dm_dax_zero_page_range(struct
+ * ->zero_page_range() is mandatory dax operation. If we are
+ * here, something is wrong.
+ */
+- dm_put_live_table(md, srcu_idx);
+ goto out;
+ }
+ ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
+-
+ out:
+ dm_put_live_table(md, srcu_idx);
+
--- /dev/null
+From bde3808bc8c2741ad3d804f84720409aee0c2972 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Fri, 4 Dec 2020 15:25:18 -0500
+Subject: dm: remove invalid sparse __acquires and __releases annotations
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit bde3808bc8c2741ad3d804f84720409aee0c2972 upstream.
+
+Fixes sparse warnings:
+drivers/md/dm.c:508:12: warning: context imbalance in 'dm_prepare_ioctl' - wrong count at exit
+drivers/md/dm.c:543:13: warning: context imbalance in 'dm_unprepare_ioctl' - wrong count at exit
+
+Fixes: 971888c46993f ("dm: hold DM table for duration of ioctl rather than use blkdev_get")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -524,7 +524,6 @@ out:
+
+ static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
+ struct block_device **bdev)
+- __acquires(md->io_barrier)
+ {
+ struct dm_target *tgt;
+ struct dm_table *map;
+@@ -558,7 +557,6 @@ retry:
+ }
+
+ static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
+- __releases(md->io_barrier)
+ {
+ dm_put_live_table(md, srcu_idx);
+ }
--- /dev/null
+From e5d41cbca1b2036362c9e29d705d3a175a01eff8 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 10 Nov 2020 07:44:01 -0500
+Subject: dm writecache: advance the number of arguments when reporting max_age
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit e5d41cbca1b2036362c9e29d705d3a175a01eff8 upstream.
+
+When reporting the "max_age" value the number of arguments must
+advance by two.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Fixes: 3923d4854e18 ("dm writecache: implement gradual cleanup")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-writecache.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_
+ extra_args += 2;
+ if (wc->autocommit_time_set)
+ extra_args += 2;
++ if (wc->max_age != MAX_AGE_UNSPECIFIED)
++ extra_args += 2;
+ if (wc->cleaner)
+ extra_args++;
+ if (wc->writeback_fua_set)
--- /dev/null
+From 67aa3ec3dbc43d6e34401d9b2a40040ff7bb57af Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 10 Nov 2020 07:45:13 -0500
+Subject: dm writecache: fix the maximum number of arguments
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 67aa3ec3dbc43d6e34401d9b2a40040ff7bb57af upstream.
+
+Advance the maximum number of arguments to 16.
+This fixes issue where certain operations, combined with table
+configured args, exceed 10 arguments.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Fixes: 48debafe4f2f ("dm: add writecache target")
+Cc: stable@vger.kernel.org # v4.18+
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-writecache.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_targ
+ struct wc_memory_superblock s;
+
+ static struct dm_arg _args[] = {
+- {0, 10, "Invalid number of feature args"},
++ {0, 16, "Invalid number of feature args"},
+ };
+
+ as.argc = argc;
--- /dev/null
+From efd6d85a18102241538dd1cc257948a0dbe6fae6 Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Tue, 19 May 2020 11:38:44 -0400
+Subject: drm/amdgpu/vcn3.0: remove old DPG workaround
+
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+
+commit efd6d85a18102241538dd1cc257948a0dbe6fae6 upstream.
+
+Port from VCN2.5
+SCRATCH2 is used to keep decode wptr as a workaround
+which fix a hardware DPG decode wptr update bug for
+vcn2.5 beforehand.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.9.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1587,9 +1587,6 @@ static int vcn_v3_0_pause_dpg_mode(struc
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+
+- WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+- RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+-
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+@@ -1650,10 +1647,6 @@ static void vcn_v3_0_dec_ring_set_wptr(s
+ {
+ struct amdgpu_device *adev = ring->adev;
+
+- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+- WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
+- lower_32_bits(ring->wptr) | 0x80000000);
+-
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
--- /dev/null
+From ac2db9488cf21de0be7899c1e5963e5ac0ff351f Mon Sep 17 00:00:00 2001
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+Date: Sun, 10 May 2020 15:47:03 -0400
+Subject: drm/amdgpu/vcn3.0: stall DPG when WPTR/RPTR reset
+
+From: Boyuan Zhang <boyuan.zhang@amd.com>
+
+commit ac2db9488cf21de0be7899c1e5963e5ac0ff351f upstream.
+
+Port from VCN2.5
+Add vcn dpg harware synchronization to fix race condition
+issue between vcn driver and hardware.
+
+Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
+Reviewed-by: James Zhu <James.Zhu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.9.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1011,6 +1011,11 @@ static int vcn_v3_0_start_dpg_mode(struc
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
++ /* Stall DPG before WPTR/RPTR reset */
++ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
++ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ /* set the write pointer delay */
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+@@ -1033,6 +1038,10 @@ static int vcn_v3_0_start_dpg_mode(struc
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+ lower_32_bits(ring->wptr));
+
++ /* Unstall DPG */
++ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ return 0;
+ }
+
+@@ -1556,8 +1565,14 @@ static int vcn_v3_0_pause_dpg_mode(struc
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
++ /* Stall DPG before WPTR/RPTR reset */
++ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
++ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ /* Restore */
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
++ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+@@ -1565,6 +1580,7 @@ static int vcn_v3_0_pause_dpg_mode(struc
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[1];
++ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+@@ -1574,6 +1590,10 @@ static int vcn_v3_0_pause_dpg_mode(struc
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+ RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+
++ /* Unstall DPG */
++ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ }
--- /dev/null
+From aff76ab795364569b1cac58c1d0bc7df956e3899 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Tue, 24 Nov 2020 18:35:21 +0000
+Subject: drm/i915/gt: Limit frequency drop to RPe on parking
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit aff76ab795364569b1cac58c1d0bc7df956e3899 upstream.
+
+We treat idling the GT (intel_rps_park) as a downclock event, and reduce
+the frequency we intend to restart the GT with. Since the two workloads
+are likely related (e.g. a compositor rendering every 16ms), we want to
+carry the frequency and load information from across the idling.
+However, we do also need to update the frequencies so that workloads
+that run for less than 1ms are autotuned by RPS (otherwise we leave
+compositors running at max clocks, draining excess power). Conversely,
+if we try to run too slowly, the next workload has to run longer. Since
+there is a hysteresis in the power graph, below a certain frequency
+running a short workload for longer consumes more energy than running it
+slightly higher for less time. The exact balance point is unknown
+beforehand, but measurements with 30fps media playback indicate that RPe
+is a better choice.
+
+Reported-by: Edward Baker <edward.baker@intel.com>
+Tested-by: Edward Baker <edward.baker@intel.com>
+Fixes: 043cd2d14ede ("drm/i915/gt: Leave rps->cur_freq on unpark")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Edward Baker <edward.baker@intel.com>
+Cc: Andi Shyti <andi.shyti@intel.com>
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: <stable@vger.kernel.org> # v5.8+
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201124183521.28623-1-chris@chris-wilson.co.uk
+(cherry picked from commit f7ed83cc1925f0b8ce2515044d674354035c3af9)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_rps.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -882,6 +882,10 @@ void intel_rps_park(struct intel_rps *rp
+ adj = -2;
+ rps->last_adj = adj;
+ rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
++ if (rps->cur_freq < rps->efficient_freq) {
++ rps->cur_freq = rps->efficient_freq;
++ rps->last_adj = 0;
++ }
+
+ GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
+ }
--- /dev/null
+From 777a7717d60ccdc9b84f35074f848d3f746fc3bf Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 26 Nov 2020 14:08:41 +0000
+Subject: drm/i915/gt: Program mocs:63 for cache eviction on gen9
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 777a7717d60ccdc9b84f35074f848d3f746fc3bf upstream.
+
+Ville noticed that the last mocs entry is used unconditionally by the HW
+when it performs cache evictions, and noted that while the value is not
+meant to be writable by the driver, we should program it to a reasonable
+value nevertheless.
+
+As it turns out, we can change the value of mocs:63 and the value we
+were programming into it would cause hard hangs in conjunction with
+atomic operations.
+
+v2: Add details from bspec about how it is used by HW
+
+Suggested-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2707
+Fixes: 3bbaba0ceaa2 ("drm/i915: Added Programming of the MOCS")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Cc: Jason Ekstrand <jason@jlekstrand.net>
+Cc: <stable@vger.kernel.org> # v4.3+
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201126140841.1982-1-chris@chris-wilson.co.uk
+(cherry picked from commit 977933b5da7c16f39295c4c1d4259a58ece65dbe)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/intel_mocs.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -131,7 +131,19 @@ static const struct drm_i915_mocs_entry
+ GEN9_MOCS_ENTRIES,
+ MOCS_ENTRY(I915_MOCS_CACHED,
+ LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+- L3_3_WB)
++ L3_3_WB),
++
++ /*
++ * mocs:63
++ * - used by the L3 for all of its evictions.
++ * Thus it is expected to allow LLC cacheability to enable coherent
++ * flows to be maintained.
++ * - used to force L3 uncachable cycles.
++ * Thus it is expected to make the surface L3 uncacheable.
++ */
++ MOCS_ENTRY(63,
++ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
++ L3_1_UC)
+ };
+
+ /* NOTE: the LE_TGT_CACHE is not used on Broxton */
--- /dev/null
+From 78b2eb8a1f10f366681acad8d21c974c1f66791a Mon Sep 17 00:00:00 2001
+From: Venkata Ramana Nayana <venkata.ramana.nayana@intel.com>
+Date: Fri, 27 Nov 2020 12:07:16 +0000
+Subject: drm/i915/gt: Retain default context state across shrinking
+
+From: Venkata Ramana Nayana <venkata.ramana.nayana@intel.com>
+
+commit 78b2eb8a1f10f366681acad8d21c974c1f66791a upstream.
+
+As we use a shmemfs file to hold the context state, when not in use it
+may be swapped out, such as across suspend. Since we wrote into the
+shmemfs without marking the pages as dirty, the contents may be dropped
+instead of being written back to swap. On re-using the shmemfs file,
+such as creating a new context after resume, the contents of that file
+were likely garbage and so the new context could then hang the GPU.
+
+Simply mark the page as being written when copying into the shmemfs
+file, and it the new contents will be retained across swapout.
+
+Fixes: be1cb55a07bf ("drm/i915/gt: Keep a no-frills swappable copy of the default context state")
+Cc: Sudeep Dutt <sudeep.dutt@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Ramalingam C <ramalingam.c@intel.com>
+Signed-off-by: CQ Tang <cq.tang@intel.com>
+Signed-off-by: Venkata Ramana Nayana <venkata.ramana.nayana@intel.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v5.8+
+Link: https://patchwork.freedesktop.org/patch/msgid/20201127120718.454037-161-matthew.auld@intel.com
+(cherry picked from commit a9d71f76ccfd309f3bd5f7c9b60e91a4decae792)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gt/shmem_utils.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
++++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
+@@ -143,10 +143,13 @@ static int __shmem_rw(struct file *file,
+ return PTR_ERR(page);
+
+ vaddr = kmap(page);
+- if (write)
++ if (write) {
+ memcpy(vaddr + offset_in_page(off), ptr, this);
+- else
++ set_page_dirty(page);
++ } else {
+ memcpy(ptr, vaddr + offset_in_page(off), this);
++ }
++ mark_page_accessed(page);
+ kunmap(page);
+ put_page(page);
+
--- /dev/null
+From fd4e788e971ce763e50762d7b1a0048992949dd0 Mon Sep 17 00:00:00 2001
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Date: Fri, 27 Nov 2020 10:52:41 +0200
+Subject: drm/omap: sdi: fix bridge enable/disable
+
+From: Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+commit fd4e788e971ce763e50762d7b1a0048992949dd0 upstream.
+
+When the SDI output was converted to DRM bridge, the atomic versions of
+enable and disable funcs were used. This was not intended, as that would
+require implementing other atomic funcs too. This leads to:
+
+WARNING: CPU: 0 PID: 18 at drivers/gpu/drm/drm_bridge.c:708 drm_atomic_helper_commit_modeset_enables+0x134/0x268
+
+and display not working.
+
+Fix this by using the legacy enable/disable funcs.
+
+Fixes: 8bef8a6d5da81b909a190822b96805a47348146f ("drm/omap: sdi: Register a drm_bridge")
+Reported-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Tested-by: Ivaylo Dimitrov <ivo.g.dimitrov.75@gmail.com>
+Tested-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: stable@vger.kernel.org # v5.7+
+Link: https://patchwork.freedesktop.org/patch/msgid/20201127085241.848461-1-tomi.valkeinen@ti.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/omapdrm/dss/sdi.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
++++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
+@@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct d
+ sdi->pixelclock = adjusted_mode->clock * 1000;
+ }
+
+-static void sdi_bridge_enable(struct drm_bridge *bridge,
+- struct drm_bridge_state *bridge_state)
++static void sdi_bridge_enable(struct drm_bridge *bridge)
+ {
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ struct dispc_clock_info dispc_cinfo;
+@@ -259,8 +258,7 @@ err_get_dispc:
+ regulator_disable(sdi->vdds_sdi_reg);
+ }
+
+-static void sdi_bridge_disable(struct drm_bridge *bridge,
+- struct drm_bridge_state *bridge_state)
++static void sdi_bridge_disable(struct drm_bridge *bridge)
+ {
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+@@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi
+ .mode_valid = sdi_bridge_mode_valid,
+ .mode_fixup = sdi_bridge_mode_fixup,
+ .mode_set = sdi_bridge_mode_set,
+- .atomic_enable = sdi_bridge_enable,
+- .atomic_disable = sdi_bridge_disable,
++ .enable = sdi_bridge_enable,
++ .disable = sdi_bridge_disable,
+ };
+
+ static void sdi_bridge_init(struct sdi_device *sdi)
--- /dev/null
+From 49a962c075dfa41c78e34784772329bc8784d217 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Thu, 26 Nov 2020 23:38:39 +0530
+Subject: ftrace: Fix DYNAMIC_FTRACE_WITH_DIRECT_CALLS dependency
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 49a962c075dfa41c78e34784772329bc8784d217 upstream.
+
+DYNAMIC_FTRACE_WITH_DIRECT_CALLS should depend on
+DYNAMIC_FTRACE_WITH_REGS since we need ftrace_regs_caller().
+
+Link: https://lkml.kernel.org/r/fc4b257ea8689a36f086d2389a9ed989496ca63a.1606412433.git.naveen.n.rao@linux.vnet.ibm.com
+
+Cc: stable@vger.kernel.org
+Fixes: 763e34e74bb7d5c ("ftrace: Add register_ftrace_direct()")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -202,7 +202,7 @@ config DYNAMIC_FTRACE_WITH_REGS
+
+ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ def_bool y
+- depends on DYNAMIC_FTRACE
++ depends on DYNAMIC_FTRACE_WITH_REGS
+ depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+ config FUNCTION_PROFILER
--- /dev/null
+From 4c75b0ff4e4bf7a45b5aef9639799719c28d0073 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Thu, 26 Nov 2020 23:38:38 +0530
+Subject: ftrace: Fix updating FTRACE_FL_TRAMP
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 4c75b0ff4e4bf7a45b5aef9639799719c28d0073 upstream.
+
+On powerpc, kprobe-direct.tc triggered FTRACE_WARN_ON() in
+ftrace_get_addr_new() followed by the below message:
+ Bad trampoline accounting at: 000000004222522f (wake_up_process+0xc/0x20) (f0000001)
+
+The set of steps leading to this involved:
+- modprobe ftrace-direct-too
+- enable_probe
+- modprobe ftrace-direct
+- rmmod ftrace-direct <-- trigger
+
+The problem turned out to be that we were not updating flags in the
+ftrace record properly. From the above message about the trampoline
+accounting being bad, it can be seen that the ftrace record still has
+FTRACE_FL_TRAMP set though ftrace-direct module is going away. This
+happens because we are checking if any ftrace_ops has the
+FTRACE_FL_TRAMP flag set _before_ updating the filter hash.
+
+The fix for this is to look for any _other_ ftrace_ops that also needs
+FTRACE_FL_TRAMP.
+
+Link: https://lkml.kernel.org/r/56c113aa9c3e10c19144a36d9684c7882bf09af5.1606412433.git.naveen.n.rao@linux.vnet.ibm.com
+
+Cc: stable@vger.kernel.org
+Fixes: a124692b698b0 ("ftrace: Enable trampoline when rec count returns back to one")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1629,6 +1629,8 @@ static bool test_rec_ops_needs_regs(stru
+ static struct ftrace_ops *
+ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+ static struct ftrace_ops *
++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
++static struct ftrace_ops *
+ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
+ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+@@ -1778,7 +1780,7 @@ static bool __ftrace_hash_rec_update(str
+ * to it.
+ */
+ if (ftrace_rec_count(rec) == 1 &&
+- ftrace_find_tramp_ops_any(rec))
++ ftrace_find_tramp_ops_any_other(rec, ops))
+ rec->flags |= FTRACE_FL_TRAMP;
+ else
+ rec->flags &= ~FTRACE_FL_TRAMP;
+@@ -2238,6 +2240,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftr
+ continue;
+
+ if (hash_contains_ip(ip, op->func_hash))
++ return op;
++ } while_for_each_ftrace_op(op);
++
++ return NULL;
++}
++
++static struct ftrace_ops *
++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
++{
++ struct ftrace_ops *op;
++ unsigned long ip = rec->ip;
++
++ do_for_each_ftrace_op(op, ftrace_ops_list) {
++
++ if (op == op_exclude || !op->trampoline)
++ continue;
++
++ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+ } while_for_each_ftrace_op(op);
+
--- /dev/null
+From dd0ecf544125639e54056d851e4887dbb94b6d2f Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Mon, 30 Nov 2020 16:07:25 +0100
+Subject: gfs2: Fix deadlock between gfs2_{create_inode,inode_lookup} and delete_work_func
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit dd0ecf544125639e54056d851e4887dbb94b6d2f upstream.
+
+In gfs2_create_inode and gfs2_inode_lookup, make sure to cancel any pending
+delete work before taking the inode glock. Otherwise, gfs2_cancel_delete_work
+may block waiting for delete_work_func to complete, and delete_work_func may
+block trying to acquire the inode glock in gfs2_inode_lookup.
+
+Reported-by: Alexander Aring <aahringo@redhat.com>
+Fixes: a0e3cc65fa29 ("gfs2: Turn gl_delete into a delayed work")
+Cc: stable@vger.kernel.org # v5.8+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/inode.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -150,6 +150,8 @@ struct inode *gfs2_inode_lookup(struct s
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ if (unlikely(error))
+ goto fail;
++ if (blktype != GFS2_BLKST_UNLINKED)
++ gfs2_cancel_delete_work(io_gl);
+
+ if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
+ /*
+@@ -180,8 +182,6 @@ struct inode *gfs2_inode_lookup(struct s
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ if (unlikely(error))
+ goto fail;
+- if (blktype != GFS2_BLKST_UNLINKED)
+- gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ gfs2_glock_put(io_gl);
+ io_gl = NULL;
+@@ -725,13 +725,19 @@ static int gfs2_create_inode(struct inod
+ flush_delayed_work(&ip->i_gl->gl_work);
+ glock_set_object(ip->i_gl, ip);
+
+- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++ error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ if (error)
+ goto fail_free_inode;
++ gfs2_cancel_delete_work(io_gl);
++ glock_set_object(io_gl, ip);
++
++ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++ if (error)
++ goto fail_gunlock2;
+
+ error = gfs2_trans_begin(sdp, blocks, 0);
+ if (error)
+- goto fail_free_inode;
++ goto fail_gunlock2;
+
+ if (blocks > 1) {
+ ip->i_eattr = ip->i_no_addr + 1;
+@@ -740,18 +746,12 @@ static int gfs2_create_inode(struct inod
+ init_dinode(dip, ip, symname);
+ gfs2_trans_end(sdp);
+
+- error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+- if (error)
+- goto fail_free_inode;
+-
+ BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ if (error)
+ goto fail_gunlock2;
+
+- gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
+- glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ gfs2_set_iop(inode);
+ insert_inode_hash(inode);
+
+@@ -803,6 +803,7 @@ fail_gunlock3:
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ fail_gunlock2:
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++ glock_clear_object(io_gl, ip);
+ gfs2_glock_put(io_gl);
+ fail_free_inode:
+ if (ip->i_gl) {
--- /dev/null
+From 82e938bd5382b322ce81e6cb8fd030987f2da022 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Wed, 25 Nov 2020 23:37:18 +0100
+Subject: gfs2: Upgrade shared glocks for atime updates
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit 82e938bd5382b322ce81e6cb8fd030987f2da022 upstream.
+
+Commit 20f829999c38 ("gfs2: Rework read and page fault locking") lifted
+the glock lock taking from the low-level ->readpage and ->readahead
+address space operations to the higher-level ->read_iter file and
+->fault vm operations. The glocks are still taken in LM_ST_SHARED mode
+only. On filesystems mounted without the noatime option, ->read_iter
+sometimes needs to update the atime as well, though. Right now, this
+leads to a failed locking mode assertion in gfs2_dirty_inode.
+
+Fix that by introducing a new update_time inode operation. There, if
+the glock is held non-exclusively, upgrade it to an exclusive lock.
+
+Reported-by: Alexander Aring <aahringo@redhat.com>
+Fixes: 20f829999c38 ("gfs2: Rework read and page fault locking")
+Cc: stable@vger.kernel.org # v5.8+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/inode.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -2116,6 +2116,25 @@ loff_t gfs2_seek_hole(struct file *file,
+ return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
+ }
+
++static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
++ int flags)
++{
++ struct gfs2_inode *ip = GFS2_I(inode);
++ struct gfs2_glock *gl = ip->i_gl;
++ struct gfs2_holder *gh;
++ int error;
++
++ gh = gfs2_glock_is_locked_by_me(gl);
++ if (gh && !gfs2_glock_is_held_excl(gl)) {
++ gfs2_glock_dq(gh);
++ gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
++ error = gfs2_glock_nq(gh);
++ if (error)
++ return error;
++ }
++ return generic_update_time(inode, time, flags);
++}
++
+ const struct inode_operations gfs2_file_iops = {
+ .permission = gfs2_permission,
+ .setattr = gfs2_setattr,
+@@ -2124,6 +2143,7 @@ const struct inode_operations gfs2_file_
+ .fiemap = gfs2_fiemap,
+ .get_acl = gfs2_get_acl,
+ .set_acl = gfs2_set_acl,
++ .update_time = gfs2_update_time,
+ };
+
+ const struct inode_operations gfs2_dir_iops = {
+@@ -2143,6 +2163,7 @@ const struct inode_operations gfs2_dir_i
+ .fiemap = gfs2_fiemap,
+ .get_acl = gfs2_get_acl,
+ .set_acl = gfs2_set_acl,
++ .update_time = gfs2_update_time,
+ .atomic_open = gfs2_atomic_open,
+ };
+
--- /dev/null
+From 1de67a3dee7a279ebe4d892b359fe3696938ec15 Mon Sep 17 00:00:00 2001
+From: Christian Eggers <ceggers@arri.de>
+Date: Fri, 9 Oct 2020 13:03:19 +0200
+Subject: i2c: imx: Check for I2SR_IAL after every byte
+
+From: Christian Eggers <ceggers@arri.de>
+
+commit 1de67a3dee7a279ebe4d892b359fe3696938ec15 upstream.
+
+Arbitration Lost (IAL) can happen after every single byte transfer. If
+arbitration is lost, the I2C hardware will autonomously switch from
+master mode to slave. If a transfer is not aborted in this state,
+consecutive transfers will not be executed by the hardware and will
+timeout.
+
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Tested (not extensively) on Vybrid VF500 (Toradex VF50):
+Tested-by: Krzysztof Kozlowski <krzk@kernel.org>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-imx.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -490,6 +490,16 @@ static int i2c_imx_trx_complete(struct i
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
++
++ /* check for arbitration lost */
++ if (i2c_imx->i2csr & I2SR_IAL) {
++ dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
++ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
++
++ i2c_imx->i2csr = 0;
++ return -EAGAIN;
++ }
++
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
+ i2c_imx->i2csr = 0;
+ return 0;
--- /dev/null
+From 61e6fe59ede155881a622f5901551b1cc8748f6a Mon Sep 17 00:00:00 2001
+From: Christian Eggers <ceggers@arri.de>
+Date: Fri, 9 Oct 2020 13:03:20 +0200
+Subject: i2c: imx: Don't generate STOP condition if arbitration has been lost
+
+From: Christian Eggers <ceggers@arri.de>
+
+commit 61e6fe59ede155881a622f5901551b1cc8748f6a upstream.
+
+If arbitration is lost, the master automatically changes to slave mode.
+I2SR_IBB may or may not be reset by hardware. Raising a STOP condition
+by resetting I2CR_MSTA has no effect and will not clear I2SR_IBB.
+
+So calling i2c_imx_bus_busy() is not required and would busy-wait until
+timeout.
+
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Tested (not extensively) on Vybrid VF500 (Toradex VF50):
+Tested-by: Krzysztof Kozlowski <krzk@kernel.org>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Cc: stable@vger.kernel.org # Requires trivial backporting, simple remove
+ # the 3rd argument from the calls to
+ # i2c_imx_bus_busy().
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-imx.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -615,6 +615,8 @@ static void i2c_imx_stop(struct imx_i2c_
+ /* Stop I2C transaction */
+ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ if (!(temp & I2CR_MSTA))
++ i2c_imx->stopped = 1;
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
+ if (i2c_imx->dma)
+ temp &= ~I2CR_DMAEN;
+@@ -778,9 +780,12 @@ static int i2c_imx_dma_read(struct imx_i
+ */
+ dev_dbg(dev, "<%s> clear MSTA\n", __func__);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ if (!(temp & I2CR_MSTA))
++ i2c_imx->stopped = 1;
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+- i2c_imx_bus_busy(i2c_imx, 0, false);
++ if (!i2c_imx->stopped)
++ i2c_imx_bus_busy(i2c_imx, 0, false);
+ } else {
+ /*
+ * For i2c master receiver repeat restart operation like:
+@@ -905,9 +910,12 @@ static int i2c_imx_read(struct imx_i2c_s
+ dev_dbg(&i2c_imx->adapter.dev,
+ "<%s> clear MSTA\n", __func__);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++ if (!(temp & I2CR_MSTA))
++ i2c_imx->stopped = 1;
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+- i2c_imx_bus_busy(i2c_imx, 0, atomic);
++ if (!i2c_imx->stopped)
++ i2c_imx_bus_busy(i2c_imx, 0, atomic);
+ } else {
+ /*
+ * For i2c master receiver repeat restart operation like:
--- /dev/null
+From 384a9565f70a876c2e78e58c5ca0bbf0547e4f6d Mon Sep 17 00:00:00 2001
+From: Christian Eggers <ceggers@arri.de>
+Date: Fri, 9 Oct 2020 13:03:18 +0200
+Subject: i2c: imx: Fix reset of I2SR_IAL flag
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian Eggers <ceggers@arri.de>
+
+commit 384a9565f70a876c2e78e58c5ca0bbf0547e4f6d upstream.
+
+According to the "VFxxx Controller Reference Manual" (and the comment
+block starting at line 97), Vybrid requires writing a one for clearing
+an interrupt flag. Syncing the method for clearing I2SR_IIF in
+i2c_imx_isr().
+
+Signed-off-by: Christian Eggers <ceggers@arri.de>
+Fixes: 4b775022f6fd ("i2c: imx: add struct to hold more configurable quirks")
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Acked-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-imx.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -412,6 +412,19 @@ static void i2c_imx_dma_free(struct imx_
+ dma->chan_using = NULL;
+ }
+
++static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
++{
++ unsigned int temp;
++
++ /*
++ * i2sr_clr_opcode is the value to clear all interrupts. Here we want to
++ * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
++ * toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
++ */
++ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++}
++
+ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
+ {
+ unsigned long orig_jiffies = jiffies;
+@@ -424,8 +437,7 @@ static int i2c_imx_bus_busy(struct imx_i
+
+ /* check for arbitration lost */
+ if (temp & I2SR_IAL) {
+- temp &= ~I2SR_IAL;
+- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++ i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
+ return -EAGAIN;
+ }
+
+@@ -469,7 +481,7 @@ static int i2c_imx_trx_complete(struct i
+ */
+ readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
+ i2c_imx->i2csr = regval;
+- imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
++ i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL);
+ } else {
+ wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ }
+@@ -623,9 +635,7 @@ static irqreturn_t i2c_imx_isr(int irq,
+ if (temp & I2SR_IIF) {
+ /* save status register */
+ i2c_imx->i2csr = temp;
+- temp &= ~I2SR_IIF;
+- temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
+- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++ i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
+ wake_up(&i2c_imx->queue);
+ return IRQ_HANDLED;
+ }
--- /dev/null
+From 2d280bc8930ba9ed1705cfd548c6c8924949eaf1 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Sun, 29 Nov 2020 18:33:32 +0000
+Subject: io_uring: fix recvmsg setup with compat buf-select
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 2d280bc8930ba9ed1705cfd548c6c8924949eaf1 upstream.
+
+__io_compat_recvmsg_copy_hdr() with REQ_F_BUFFER_SELECT reads out iov
+len but never assigns it to iov/fast_iov, leaving sr->len with garbage.
+Hopefully, following io_buffer_select() truncates it to the selected
+buffer size, but the value is still may be under what was specified.
+
+Cc: <stable@vger.kernel.org> # 5.7
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4300,7 +4300,8 @@ static int __io_compat_recvmsg_copy_hdr(
+ return -EFAULT;
+ if (clen < 0)
+ return -EINVAL;
+- sr->len = iomsg->iov[0].iov_len;
++ sr->len = clen;
++ iomsg->iov[0].iov_len = clen;
+ iomsg->iov = NULL;
+ } else {
+ ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
--- /dev/null
+From f54db39fbe40731c40aefdd3bc26e7d56d668c64 Mon Sep 17 00:00:00 2001
+From: Greg Kurz <groug@kaod.org>
+Date: Mon, 30 Nov 2020 13:19:27 +0100
+Subject: KVM: PPC: Book3S HV: XIVE: Fix vCPU id sanity check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Greg Kurz <groug@kaod.org>
+
+commit f54db39fbe40731c40aefdd3bc26e7d56d668c64 upstream.
+
+Commit 062cfab7069f ("KVM: PPC: Book3S HV: XIVE: Make VP block size
+configurable") updated kvmppc_xive_vcpu_id_valid() in a way that
+allows userspace to trigger an assertion in skiboot and crash the host:
+
+[ 696.186248988,3] XIVE[ IC 08 ] eq_blk != vp_blk (0 vs. 1) for target 0x4300008c/0
+[ 696.186314757,0] Assert fail: hw/xive.c:2370:0
+[ 696.186342458,0] Aborting!
+xive-kvCPU 0043 Backtrace:
+ S: 0000000031e2b8f0 R: 0000000030013840 .backtrace+0x48
+ S: 0000000031e2b990 R: 000000003001b2d0 ._abort+0x4c
+ S: 0000000031e2ba10 R: 000000003001b34c .assert_fail+0x34
+ S: 0000000031e2ba90 R: 0000000030058984 .xive_eq_for_target.part.20+0xb0
+ S: 0000000031e2bb40 R: 0000000030059fdc .xive_setup_silent_gather+0x2c
+ S: 0000000031e2bc20 R: 000000003005a334 .opal_xive_set_vp_info+0x124
+ S: 0000000031e2bd20 R: 00000000300051a4 opal_entry+0x134
+ --- OPAL call token: 0x8a caller R1: 0xc000001f28563850 ---
+
+XIVE maintains the interrupt context state of non-dispatched vCPUs in
+an internal VP structure. We allocate a bunch of those on startup to
+accommodate all possible vCPUs. Each VP has an id, that we derive from
+the vCPU id for efficiency:
+
+static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
+The KVM XIVE device used to allocate KVM_MAX_VCPUS VPs. This was
+limitting the number of concurrent VMs because the VP space is
+limited on the HW. Since most of the time, VMs run with a lot less
+vCPUs, commit 062cfab7069f ("KVM: PPC: Book3S HV: XIVE: Make VP
+block size configurable") gave the possibility for userspace to
+tune the size of the VP block through the KVM_DEV_XIVE_NR_SERVERS
+attribute.
+
+The check in kvmppc_pack_vcpu_id() was changed from
+
+ cpu < KVM_MAX_VCPUS * xive->kvm->arch.emul_smt_mode
+
+to
+
+ cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode
+
+The previous check was based on the fact that the VP block had
+KVM_MAX_VCPUS entries and that kvmppc_pack_vcpu_id() guarantees
+that packed vCPU ids are below KVM_MAX_VCPUS. We've changed the
+size of the VP block, but kvmppc_pack_vcpu_id() has nothing to
+do with it and it certainly doesn't ensure that the packed vCPU
+ids are below xive->nr_servers. kvmppc_xive_vcpu_id_valid() might
+thus return true when the VM was configured with a non-standard
+VSMT mode, even if the packed vCPU id is higher than what we
+expect. We end up using an unallocated VP id, which confuses
+OPAL. The assert in OPAL is probably abusive and should be
+converted to a regular error that the kernel can handle, but
+we shouldn't really use broken VP ids in the first place.
+
+Fix kvmppc_xive_vcpu_id_valid() so that it checks the packed
+vCPU id is below xive->nr_servers, which is explicitly what we
+want.
+
+Fixes: 062cfab7069f ("KVM: PPC: Book3S HV: XIVE: Make VP block size configurable")
+Cc: stable@vger.kernel.org # v5.5+
+Signed-off-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Cédric Le Goater <clg@kaod.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/160673876747.695514.1809676603724514920.stgit@bahia.lan
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_xive.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm
+ static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
+ {
+ /* We have a block of xive->nr_servers VPs. We just need to check
+- * raw vCPU ids are below the expected limit for this guest's
+- * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
+- * index that can be safely used to compute a VP id that belongs
+- * to the VP block.
++ * packed vCPU ids are below that.
+ */
+- return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
++ return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
+ }
+
+ int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
--- /dev/null
+From a1ee28117077c3bf24e5ab6324c835eaab629c45 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Sat, 28 Nov 2020 17:07:21 +1000
+Subject: powerpc/64s/powernv: Fix memory corruption when saving SLB entries on MCE
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit a1ee28117077c3bf24e5ab6324c835eaab629c45 upstream.
+
+This can be hit by an HPT guest running on an HPT host and bring down
+the host, so it's quite important to fix.
+
+Fixes: 7290f3b3d3e6 ("powerpc/64s/powernv: machine check dump SLB contents")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201128070728.825934-2-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/setup.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -186,11 +186,16 @@ static void __init pnv_init(void)
+ add_preferred_console("hvc", 0, NULL);
+
+ if (!radix_enabled()) {
++ size_t size = sizeof(struct slb_entry) * mmu_slb_size;
+ int i;
+
+ /* Allocate per cpu area to save old slb contents during MCE */
+- for_each_possible_cpu(i)
+- paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
++ for_each_possible_cpu(i) {
++ paca_ptrs[i]->mce_faulty_slbs =
++ memblock_alloc_node(size,
++ __alignof__(struct slb_entry),
++ cpu_to_node(i));
++ }
+ }
+ }
+
--- /dev/null
+From 9ea69a55b3b9a71cded9726af591949c1138f235 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Thu, 26 Nov 2020 09:28:52 +0100
+Subject: powerpc/pseries: Pass MSI affinity to irq_create_mapping()
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+commit 9ea69a55b3b9a71cded9726af591949c1138f235 upstream.
+
+With virtio multiqueue, normally each queue IRQ is mapped to a CPU.
+
+Commit 0d9f0a52c8b9f ("virtio_scsi: use virtio IRQ affinity") exposed
+an existing shortcoming of the arch code by moving virtio_scsi to
+the automatic IRQ affinity assignment.
+
+The affinity is correctly computed in msi_desc but this is not applied
+to the system IRQs.
+
+It appears the affinity is correctly passed to rtas_setup_msi_irqs() but
+lost at this point and never passed to irq_domain_alloc_descs()
+(see commit 06ee6d571f0e ("genirq: Add affinity hint to irq allocation"))
+because irq_create_mapping() doesn't take an affinity parameter.
+
+Use the new irq_create_mapping_affinity() function, which allows to forward
+the affinity setting from rtas_setup_msi_irqs() to irq_domain_alloc_descs().
+
+With this change, the virtqueues are correctly dispatched between the CPUs
+on pseries.
+
+Fixes: e75eafb9b039 ("genirq/msi: Switch to new irq spreading infrastructure")
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20201126082852.1178497-3-lvivier@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/msi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -458,7 +458,8 @@ again:
+ return hwirq;
+ }
+
+- virq = irq_create_mapping(NULL, hwirq);
++ virq = irq_create_mapping_affinity(NULL, hwirq,
++ entry->affinity);
+
+ if (!virq) {
+ pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
--- /dev/null
+From 68e10d5ff512b503dcba1246ad5620f32035e135 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 30 Nov 2020 23:16:03 -0500
+Subject: ring-buffer: Always check to put back before stamp when crossing pages
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 68e10d5ff512b503dcba1246ad5620f32035e135 upstream.
+
+The current ring buffer logic checks to see if the updating of the event
+buffer was interrupted, and if it is, it will try to fix up the before stamp
+with the write stamp to make them equal again. This logic is flawed, because
+if it is not interrupted, the two are guaranteed to be different, as the
+current event just updated the before stamp before allocation. This
+guarantees that the next event (this one or another interrupting one) will
+think it interrupted the time updates of a previous event and inject an
+absolute time stamp to compensate.
+
+The correct logic is to always update the timestamps when traversing to a
+new sub buffer.
+
+Cc: stable@vger.kernel.org
+Fixes: a389d86f7fd09 ("ring-buffer: Have nested events still record running time stamp")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3234,14 +3234,12 @@ __rb_reserve_next(struct ring_buffer_per
+
+ /* See if we shot pass the end of this buffer page */
+ if (unlikely(write > BUF_PAGE_SIZE)) {
+- if (tail != w) {
+- /* before and after may now different, fix it up*/
+- b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+- a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+- if (a_ok && b_ok && info->before != info->after)
+- (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+- info->before, info->after);
+- }
++ /* before and after may now different, fix it up*/
++ b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
++ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
++ if (a_ok && b_ok && info->before != info->after)
++ (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
++ info->before, info->after);
+ return rb_move_tail(cpu_buffer, tail, info);
+ }
+
--- /dev/null
+From 8785f51a17083eee7c37606079c6447afc6ba102 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Sat, 28 Nov 2020 10:15:17 +0100
+Subject: ring-buffer: Set the right timestamp in the slow path of __rb_reserve_next()
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+commit 8785f51a17083eee7c37606079c6447afc6ba102 upstream.
+
+In the slow path of __rb_reserve_next() a nested event(s) can happen
+between evaluating the timestamp delta of the current event and updating
+write_stamp via local_cmpxchg(); in this case the delta is not valid
+anymore and it should be set to 0 (same timestamp as the interrupting
+event), since the event that we are currently processing is not the last
+event in the buffer.
+
+Link: https://lkml.kernel.org/r/X8IVJcp1gRE+FJCJ@xps-13-7390
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lwn.net/Articles/831207
+Fixes: a389d86f7fd0 ("ring-buffer: Have nested events still record running time stamp")
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3287,11 +3287,11 @@ __rb_reserve_next(struct ring_buffer_per
+ ts = rb_time_stamp(cpu_buffer->buffer);
+ barrier();
+ /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+- info->after < ts) {
++ info->after < ts &&
++ rb_time_cmpxchg(&cpu_buffer->write_stamp,
++ info->after, ts)) {
+ /* Nothing came after this event between C and E */
+ info->delta = ts - info->after;
+- (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+- info->after, ts);
+ info->ts = ts;
+ } else {
+ /*
--- /dev/null
+From 55ea4cf403800af2ce6b125bc3d853117e0c0456 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 27 Nov 2020 11:20:58 -0500
+Subject: ring-buffer: Update write stamp with the correct ts
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 55ea4cf403800af2ce6b125bc3d853117e0c0456 upstream.
+
+The write stamp, used to calculate deltas between events, was updated with
+the stale "ts" value in the "info" structure, and not with the updated "ts"
+variable. This caused the deltas between events to be inaccurate, and when
+crossing into a new sub buffer, had time go backwards.
+
+Link: https://lkml.kernel.org/r/20201124223917.795844-1-elavila@google.com
+
+Cc: stable@vger.kernel.org
+Fixes: a389d86f7fd09 ("ring-buffer: Have nested events still record running time stamp")
+Reported-by: "J. Avila" <elavila@google.com>
+Tested-by: Daniel Mentz <danielmentz@google.com>
+Tested-by: Will McVicker <willmcvicker@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3291,7 +3291,7 @@ __rb_reserve_next(struct ring_buffer_per
+ /* Nothing came after this event between C and E */
+ info->delta = ts - info->after;
+ (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+- info->after, info->ts);
++ info->after, ts);
+ info->ts = ts;
+ } else {
+ /*
--- /dev/null
+From a2bd4097b3ec242f4de4924db463a9c94530e03a Mon Sep 17 00:00:00 2001
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+Date: Thu, 26 Nov 2020 18:00:37 +0100
+Subject: s390/pci: fix CPU address in MSI for directed IRQ
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+commit a2bd4097b3ec242f4de4924db463a9c94530e03a upstream.
+
+The directed MSIs are delivered to CPUs whose address is
+written to the MSI message address. The current code assumes
+that a CPU logical number (as it is seen by the kernel)
+is also the CPU address.
+
+The above assumption is not correct, as the CPU address
+is rather the value returned by STAP instruction. That
+value does not necessarily match the kernel logical CPU
+number.
+
+Fixes: e979ce7bced2 ("s390/pci: provide support for CPU directed interrupts")
+Cc: <stable@vger.kernel.org> # v5.2+
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/pci/pci_irq.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -103,9 +103,10 @@ static int zpci_set_irq_affinity(struct
+ {
+ struct msi_desc *entry = irq_get_msi_desc(data->irq);
+ struct msi_msg msg = entry->msg;
++ int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
+
+ msg.address_lo &= 0xff0000ff;
+- msg.address_lo |= (cpumask_first(dest) << 8);
++ msg.address_lo |= (cpu_addr << 8);
+ pci_write_msi_msg(data->irq, &msg);
+
+ return IRQ_SET_MASK_OK;
+@@ -238,6 +239,7 @@ int arch_setup_msi_irqs(struct pci_dev *
+ unsigned long bit;
+ struct msi_desc *msi;
+ struct msi_msg msg;
++ int cpu_addr;
+ int rc, irq;
+
+ zdev->aisb = -1UL;
+@@ -287,9 +289,15 @@ int arch_setup_msi_irqs(struct pci_dev *
+ handle_percpu_irq);
+ msg.data = hwirq - bit;
+ if (irq_delivery == DIRECTED) {
++ if (msi->affinity)
++ cpu = cpumask_first(&msi->affinity->mask);
++ else
++ cpu = 0;
++ cpu_addr = smp_cpu_get_cpu_address(cpu);
++
+ msg.address_lo = zdev->msi_addr & 0xff0000ff;
+- msg.address_lo |= msi->affinity ?
+- (cpumask_first(&msi->affinity->mask) << 8) : 0;
++ msg.address_lo |= (cpu_addr << 8);
++
+ for_each_possible_cpu(cpu) {
+ airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ }
--- /dev/null
+From 42f687038bcc34aa919e0e4c29b04e4cda3f6a79 Mon Sep 17 00:00:00 2001
+From: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+Date: Wed, 25 Nov 2020 15:18:38 +0530
+Subject: scsi: mpt3sas: Fix ioctl timeout
+
+From: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+
+commit 42f687038bcc34aa919e0e4c29b04e4cda3f6a79 upstream.
+
+Commit c1a6c5ac4278 ("scsi: mpt3sas: For NVME device, issue a protocol
+level reset") modified the ioctl path 'timeout' variable type to u8 from
+unsigned long, limiting the maximum timeout value that the driver can
+support to 255 seconds.
+
+If the management application is requesting a higher value the resulting
+timeout will be zero. The operation times out immediately and the ioctl
+request fails.
+
+Change datatype back to unsigned long.
+
+Link: https://lore.kernel.org/r/20201125094838.4340-1-suganath-prabu.subramani@broadcom.com
+Fixes: c1a6c5ac4278 ("scsi: mpt3sas: For NVME device, issue a protocol level reset")
+Cc: <stable@vger.kernel.org> #v4.18+
+Signed-off-by: Suganath Prabu S <suganath-prabu.subramani@broadcom.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/mpt3sas/mpt3sas_ctl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -664,7 +664,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPT
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
+ struct _pcie_device *pcie_device = NULL;
+ u16 smid;
+- u8 timeout;
++ unsigned long timeout;
+ u8 issue_reset;
+ u32 sz, sz_arg;
+ void *psge;
alsa-hda-realtek-add-new-codec-supported-for-alc897.patch
alsa-hda-realtek-fixed-dell-aio-wrong-sound-tone.patch
alsa-hda-generic-add-option-to-enforce-preferred_dacs-pairs.patch
+ring-buffer-update-write-stamp-with-the-correct-ts.patch
+ring-buffer-set-the-right-timestamp-in-the-slow-path-of-__rb_reserve_next.patch
+ring-buffer-always-check-to-put-back-before-stamp-when-crossing-pages.patch
+ftrace-fix-updating-ftrace_fl_tramp.patch
+ftrace-fix-dynamic_ftrace_with_direct_calls-dependency.patch
+cifs-allow-syscalls-to-be-restarted-in-__smb_send_rqst.patch
+cifs-fix-potential-use-after-free-in-cifs_echo_request.patch
+cifs-refactor-create_sd_buf-and-and-avoid-corrupting-the-buffer.patch
+cifs-add-null-check-for-ses-tcon_ipc.patch
+gfs2-upgrade-shared-glocks-for-atime-updates.patch
+gfs2-fix-deadlock-between-gfs2_-create_inode-inode_lookup-and-delete_work_func.patch
+s390-pci-fix-cpu-address-in-msi-for-directed-irq.patch
+i2c-imx-fix-reset-of-i2sr_ial-flag.patch
+i2c-imx-check-for-i2sr_ial-after-every-byte.patch
+i2c-imx-don-t-generate-stop-condition-if-arbitration-has-been-lost.patch
+tracing-fix-userstacktrace-option-for-instances.patch
+thunderbolt-fix-use-after-free-in-remove_unplugged_switch.patch
+drm-omap-sdi-fix-bridge-enable-disable.patch
+drm-amdgpu-vcn3.0-stall-dpg-when-wptr-rptr-reset.patch
+drm-amdgpu-vcn3.0-remove-old-dpg-workaround.patch
+drm-i915-gt-retain-default-context-state-across-shrinking.patch
+drm-i915-gt-limit-frequency-drop-to-rpe-on-parking.patch
+drm-i915-gt-program-mocs-63-for-cache-eviction-on-gen9.patch
+kvm-ppc-book3s-hv-xive-fix-vcpu-id-sanity-check.patch
+scsi-mpt3sas-fix-ioctl-timeout.patch
+io_uring-fix-recvmsg-setup-with-compat-buf-select.patch
+dm-writecache-advance-the-number-of-arguments-when-reporting-max_age.patch
+dm-writecache-fix-the-maximum-number-of-arguments.patch
+powerpc-64s-powernv-fix-memory-corruption-when-saving-slb-entries-on-mce.patch
+powerpc-pseries-pass-msi-affinity-to-irq_create_mapping.patch
+dm-fix-bug-with-rcu-locking-in-dm_blk_report_zones.patch
+dm-fix-double-rcu-unlock-in-dm_dax_zero_page_range-error-path.patch
+dm-remove-invalid-sparse-__acquires-and-__releases-annotations.patch
+x86-uprobes-do-not-use-prefixes.nbytes-when-looping-over-prefixes.bytes.patch
+coredump-fix-core_pattern-parse-error.patch
--- /dev/null
+From 600c0849cf86b75d86352f59745226273290986a Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Wed, 18 Nov 2020 13:08:21 +0200
+Subject: thunderbolt: Fix use-after-free in remove_unplugged_switch()
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit 600c0849cf86b75d86352f59745226273290986a upstream.
+
+Paulian reported a crash that happens when a dock is unplugged during
+hibernation:
+
+[78436.228217] thunderbolt 0-1: device disconnected
+[78436.228365] BUG: kernel NULL pointer dereference, address: 00000000000001e0
+...
+[78436.228397] RIP: 0010:icm_free_unplugged_children+0x109/0x1a0
+...
+[78436.228432] Call Trace:
+[78436.228439] icm_rescan_work+0x24/0x30
+[78436.228444] process_one_work+0x1a3/0x3a0
+[78436.228449] worker_thread+0x30/0x370
+[78436.228454] ? process_one_work+0x3a0/0x3a0
+[78436.228457] kthread+0x13d/0x160
+[78436.228461] ? kthread_park+0x90/0x90
+[78436.228465] ret_from_fork+0x1f/0x30
+
+This happens because remove_unplugged_switch() calls tb_switch_remove()
+that releases the memory pointed by sw so the following lines reference
+to a memory that might be released already.
+
+Fix this by saving pointer to the parent device before calling
+tb_switch_remove().
+
+Reported-by: Paulian Bogdan Marinca <paulian@marinca.net>
+Fixes: 4f7c2e0d8765 ("thunderbolt: Make sure device runtime resume completes before taking domain lock")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/thunderbolt/icm.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -1973,7 +1973,9 @@ static int complete_rpm(struct device *d
+
+ static void remove_unplugged_switch(struct tb_switch *sw)
+ {
+- pm_runtime_get_sync(sw->dev.parent);
++ struct device *parent = get_device(sw->dev.parent);
++
++ pm_runtime_get_sync(parent);
+
+ /*
+ * Signal this and switches below for rpm_complete because
+@@ -1984,8 +1986,10 @@ static void remove_unplugged_switch(stru
+ bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
+ tb_switch_remove(sw);
+
+- pm_runtime_mark_last_busy(sw->dev.parent);
+- pm_runtime_put_autosuspend(sw->dev.parent);
++ pm_runtime_mark_last_busy(parent);
++ pm_runtime_put_autosuspend(parent);
++
++ put_device(parent);
+ }
+
+ static void icm_free_unplugged_children(struct tb_switch *sw)
--- /dev/null
+From bcee5278958802b40ee8b26679155a6d9231783e Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 4 Dec 2020 16:36:16 -0500
+Subject: tracing: Fix userstacktrace option for instances
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit bcee5278958802b40ee8b26679155a6d9231783e upstream.
+
+When the instances were able to use their own options, the userstacktrace
+option was left hardcoded for the top level. This made the instance
+userstacktrace option bascially into a nop, and will confuse users that set
+it, but nothing happens (I was confused when it happened to me!)
+
+Cc: stable@vger.kernel.org
+Fixes: 16270145ce6b ("tracing: Add trace options for core options to instances")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -163,7 +163,8 @@ static union trace_eval_map_item *trace_
+ #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
+
+ int tracing_set_tracer(struct trace_array *tr, const char *buf);
+-static void ftrace_trace_userstack(struct trace_buffer *buffer,
++static void ftrace_trace_userstack(struct trace_array *tr,
++ struct trace_buffer *buffer,
+ unsigned long flags, int pc);
+
+ #define MAX_TRACER_SIZE 100
+@@ -2729,7 +2730,7 @@ void trace_buffer_unlock_commit_regs(str
+ * two. They are not that meaningful.
+ */
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
+- ftrace_trace_userstack(buffer, flags, pc);
++ ftrace_trace_userstack(tr, buffer, flags, pc);
+ }
+
+ /*
+@@ -3038,13 +3039,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
+ static DEFINE_PER_CPU(int, user_stack_count);
+
+ static void
+-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
++ftrace_trace_userstack(struct trace_array *tr,
++ struct trace_buffer *buffer, unsigned long flags, int pc)
+ {
+ struct trace_event_call *call = &event_user_stack;
+ struct ring_buffer_event *event;
+ struct userstack_entry *entry;
+
+- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
++ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
+ return;
+
+ /*
+@@ -3083,7 +3085,8 @@ ftrace_trace_userstack(struct trace_buff
+ preempt_enable();
+ }
+ #else /* CONFIG_USER_STACKTRACE_SUPPORT */
+-static void ftrace_trace_userstack(struct trace_buffer *buffer,
++static void ftrace_trace_userstack(struct trace_array *tr,
++ struct trace_buffer *buffer,
+ unsigned long flags, int pc)
+ {
+ }
--- /dev/null
+From 4e9a5ae8df5b3365183150f6df49e49dece80d8c Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Thu, 3 Dec 2020 13:50:37 +0900
+Subject: x86/uprobes: Do not use prefixes.nbytes when looping over prefixes.bytes
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 4e9a5ae8df5b3365183150f6df49e49dece80d8c upstream.
+
+Since insn.prefixes.nbytes can be bigger than the size of
+insn.prefixes.bytes[] when a prefix is repeated, the proper check must
+be
+
+ insn.prefixes.bytes[i] != 0 and i < 4
+
+instead of using insn.prefixes.nbytes.
+
+Introduce a for_each_insn_prefix() macro for this purpose. Debugged by
+Kees Cook <keescook@chromium.org>.
+
+ [ bp: Massage commit message, sync with the respective header in tools/
+ and drop "we". ]
+
+Fixes: 2b1444983508 ("uprobes, mm, x86: Add the ability to install and remove uprobes breakpoints")
+Reported-by: syzbot+9b64b619f10f19d19a7c@syzkaller.appspotmail.com
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/160697103739.3146288.7437620795200799020.stgit@devnote2
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/insn.h | 15 +++++++++++++++
+ arch/x86/kernel/uprobes.c | 10 ++++++----
+ tools/arch/x86/include/asm/insn.h | 15 +++++++++++++++
+ 3 files changed, 36 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -201,6 +201,21 @@ static inline int insn_offset_immediate(
+ return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+
++/**
++ * for_each_insn_prefix() -- Iterate prefixes in the instruction
++ * @insn: Pointer to struct insn.
++ * @idx: Index storage.
++ * @prefix: Prefix byte.
++ *
++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
++ * and the index is stored in @idx (note that this @idx is just for a cursor,
++ * do not change it.)
++ * Since prefixes.nbytes can be bigger than 4 if some prefixes
++ * are repeated, it cannot be used for looping over the prefixes.
++ */
++#define for_each_insn_prefix(insn, idx, prefix) \
++ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
++
+ #define POP_SS_OPCODE 0x1f
+ #define MOV_SREG_OPCODE 0x8e
+
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256
+
+ static bool is_prefix_bad(struct insn *insn)
+ {
++ insn_byte_t p;
+ int i;
+
+- for (i = 0; i < insn->prefixes.nbytes; i++) {
++ for_each_insn_prefix(insn, i, p) {
+ insn_attr_t attr;
+
+- attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
++ attr = inat_get_opcode_attribute(p);
+ switch (attr) {
+ case INAT_MAKE_PREFIX(INAT_PFX_ES):
+ case INAT_MAKE_PREFIX(INAT_PFX_CS):
+@@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_
+ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ {
+ u8 opc1 = OPCODE1(insn);
++ insn_byte_t p;
+ int i;
+
+ switch (opc1) {
+@@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct a
+ * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
+ * No one uses these insns, reject any branch insns with such prefix.
+ */
+- for (i = 0; i < insn->prefixes.nbytes; i++) {
+- if (insn->prefixes.bytes[i] == 0x66)
++ for_each_insn_prefix(insn, i, p) {
++ if (p == 0x66)
+ return -ENOTSUPP;
+ }
+
+--- a/tools/arch/x86/include/asm/insn.h
++++ b/tools/arch/x86/include/asm/insn.h
+@@ -201,6 +201,21 @@ static inline int insn_offset_immediate(
+ return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+
++/**
++ * for_each_insn_prefix() -- Iterate prefixes in the instruction
++ * @insn: Pointer to struct insn.
++ * @idx: Index storage.
++ * @prefix: Prefix byte.
++ *
++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
++ * and the index is stored in @idx (note that this @idx is just for a cursor,
++ * do not change it.)
++ * Since prefixes.nbytes can be bigger than 4 if some prefixes
++ * are repeated, it cannot be used for looping over the prefixes.
++ */
++#define for_each_insn_prefix(insn, idx, prefix) \
++ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
++
+ #define POP_SS_OPCODE 0x1f
+ #define MOV_SREG_OPCODE 0x8e
+