--- /dev/null
+From bb0c18f726150f446238a46800665d16b1fc718b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Aug 2020 03:21:03 +0300
+Subject: ALSA: usb-audio: Add capture support for Saffire 6 (USB 1.1)
+
+From: Alexander Tsoy <alexander@tsoy.me>
+
+[ Upstream commit 470757f5b3a46bd85741bb0d8c1fd3f21048a2af ]
+
+Capture and playback endpoints on Saffire 6 (USB 1.1) resides on the same
+interface. This was not supported by the composite quirk back in the day
+when initial support for this device was added, thus only playback was
+enabled until now.
+
+Fixes: 11e424e88bd4 ("ALSA: usb-audio: Add support for Focusrite Saffire 6 USB")
+Signed-off-by: Alexander Tsoy <alexander@tsoy.me>
+Cc: <stable.vger.kernel.org>
+Link: https://lore.kernel.org/r/20200815002103.29247-1-alexander@tsoy.me
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks-table.h | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 1573229d8cf4c..2d335fdae28ed 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2695,6 +2695,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_STANDARD_MIXER,
++ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+@@ -2707,6 +2711,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
++ .datainterval = 1,
++ .maxpacksize = 0x024c,
++ .rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_48000,
++ .rate_min = 44100,
++ .rate_max = 48000,
++ .nr_rates = 2,
++ .rate_table = (unsigned int[]) {
++ 44100, 48000
++ }
++ }
++ },
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++ .data = &(const struct audioformat) {
++ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++ .channels = 2,
++ .iface = 0,
++ .altsetting = 1,
++ .altset_idx = 1,
++ .attributes = 0,
++ .endpoint = 0x82,
++ .ep_attr = USB_ENDPOINT_XFER_ISOC,
++ .datainterval = 1,
++ .maxpacksize = 0x0126,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+--
+2.25.1
+
--- /dev/null
+From cff0ac37b546635ccc05c1b4e42f3916e00efc7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2020 12:41:07 +0000
+Subject: btrfs: factor out inode items copy loop from btrfs_log_inode()
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit da447009a25609327309f695b244710f12794471 ]
+
+The function btrfs_log_inode() is quite large and so is its loop which
+iterates the inode items from the fs/subvolume tree and copies them into
+a log tree. Because this is a large loop inside a very large function
+and because an upcoming patch in this series needs to add some more logic
+inside that loop, move the loop into a helper function to make it a bit
+more manageable.
+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 274 ++++++++++++++++++++++----------------------
+ 1 file changed, 138 insertions(+), 136 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 3c090549ed07d..cc407c68d356f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4994,6 +4994,138 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
++static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode,
++ struct btrfs_key *min_key,
++ const struct btrfs_key *max_key,
++ struct btrfs_path *path,
++ struct btrfs_path *dst_path,
++ const u64 logged_isize,
++ const bool recursive_logging,
++ const int inode_only,
++ struct btrfs_log_ctx *ctx,
++ bool *need_log_inode_item)
++{
++ struct btrfs_root *root = inode->root;
++ int ins_start_slot = 0;
++ int ins_nr = 0;
++ int ret;
++
++ while (1) {
++ ret = btrfs_search_forward(root, min_key, path, trans->transid);
++ if (ret < 0)
++ return ret;
++ if (ret > 0) {
++ ret = 0;
++ break;
++ }
++again:
++ /* Note, ins_nr might be > 0 here, cleanup outside the loop */
++ if (min_key->objectid != max_key->objectid)
++ break;
++ if (min_key->type > max_key->type)
++ break;
++
++ if (min_key->type == BTRFS_INODE_ITEM_KEY)
++ *need_log_inode_item = false;
++
++ if ((min_key->type == BTRFS_INODE_REF_KEY ||
++ min_key->type == BTRFS_INODE_EXTREF_KEY) &&
++ inode->generation == trans->transid &&
++ !recursive_logging) {
++ u64 other_ino = 0;
++ u64 other_parent = 0;
++
++ ret = btrfs_check_ref_name_override(path->nodes[0],
++ path->slots[0], min_key, inode,
++ &other_ino, &other_parent);
++ if (ret < 0) {
++ return ret;
++ } else if (ret > 0 && ctx &&
++ other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
++ if (ins_nr > 0) {
++ ins_nr++;
++ } else {
++ ins_nr = 1;
++ ins_start_slot = path->slots[0];
++ }
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot, ins_nr,
++ inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++
++ ret = log_conflicting_inodes(trans, root, path,
++ ctx, other_ino, other_parent);
++ if (ret)
++ return ret;
++ btrfs_release_path(path);
++ goto next_key;
++ }
++ }
++
++ /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
++ if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
++ if (ins_nr == 0)
++ goto next_slot;
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++ goto next_slot;
++ }
++
++ if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
++ ins_nr++;
++ goto next_slot;
++ } else if (!ins_nr) {
++ ins_start_slot = path->slots[0];
++ ins_nr = 1;
++ goto next_slot;
++ }
++
++ ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 1;
++ ins_start_slot = path->slots[0];
++next_slot:
++ path->slots[0]++;
++ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
++ btrfs_item_key_to_cpu(path->nodes[0], min_key,
++ path->slots[0]);
++ goto again;
++ }
++ if (ins_nr) {
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot, ins_nr, inode_only,
++ logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++ }
++ btrfs_release_path(path);
++next_key:
++ if (min_key->offset < (u64)-1) {
++ min_key->offset++;
++ } else if (min_key->type < max_key->type) {
++ min_key->type++;
++ min_key->offset = 0;
++ } else {
++ break;
++ }
++ }
++ if (ins_nr)
++ ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++
++ return ret;
++}
++
+ /* log a single inode in the tree log.
+ * At least one parent directory for this inode must exist in the tree
+ * or be logged already.
+@@ -5023,9 +5155,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log = root->log_root;
+ int err = 0;
+ int ret;
+- int nritems;
+- int ins_start_slot = 0;
+- int ins_nr;
+ bool fast_search = false;
+ u64 ino = btrfs_ino(inode);
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+@@ -5156,139 +5285,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ goto out_unlock;
+ }
+
+- while (1) {
+- ins_nr = 0;
+- ret = btrfs_search_forward(root, &min_key,
+- path, trans->transid);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- if (ret != 0)
+- break;
+-again:
+- /* note, ins_nr might be > 0 here, cleanup outside the loop */
+- if (min_key.objectid != ino)
+- break;
+- if (min_key.type > max_key.type)
+- break;
+-
+- if (min_key.type == BTRFS_INODE_ITEM_KEY)
+- need_log_inode_item = false;
+-
+- if ((min_key.type == BTRFS_INODE_REF_KEY ||
+- min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+- inode->generation == trans->transid &&
+- !recursive_logging) {
+- u64 other_ino = 0;
+- u64 other_parent = 0;
+-
+- ret = btrfs_check_ref_name_override(path->nodes[0],
+- path->slots[0], &min_key, inode,
+- &other_ino, &other_parent);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- } else if (ret > 0 && ctx &&
+- other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
+- if (ins_nr > 0) {
+- ins_nr++;
+- } else {
+- ins_nr = 1;
+- ins_start_slot = path->slots[0];
+- }
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+-
+- err = log_conflicting_inodes(trans, root, path,
+- ctx, other_ino, other_parent);
+- if (err)
+- goto out_unlock;
+- btrfs_release_path(path);
+- goto next_key;
+- }
+- }
+-
+- /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+- if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+- if (ins_nr == 0)
+- goto next_slot;
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only, logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- goto next_slot;
+- }
+-
+- if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
+- ins_nr++;
+- goto next_slot;
+- } else if (!ins_nr) {
+- ins_start_slot = path->slots[0];
+- ins_nr = 1;
+- goto next_slot;
+- }
+-
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot, ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 1;
+- ins_start_slot = path->slots[0];
+-next_slot:
+-
+- nritems = btrfs_header_nritems(path->nodes[0]);
+- path->slots[0]++;
+- if (path->slots[0] < nritems) {
+- btrfs_item_key_to_cpu(path->nodes[0], &min_key,
+- path->slots[0]);
+- goto again;
+- }
+- if (ins_nr) {
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only, logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- }
+- btrfs_release_path(path);
+-next_key:
+- if (min_key.offset < (u64)-1) {
+- min_key.offset++;
+- } else if (min_key.type < max_key.type) {
+- min_key.type++;
+- min_key.offset = 0;
+- } else {
+- break;
+- }
+- }
+- if (ins_nr) {
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot, ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- }
++ err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
++ path, dst_path, logged_isize,
++ recursive_logging, inode_only, ctx,
++ &need_log_inode_item);
++ if (err)
++ goto out_unlock;
+
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
+--
+2.25.1
+
--- /dev/null
+From 9975a0bea696268fecab6b2e19bc03fcfe14cba0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jul 2020 12:32:20 +0100
+Subject: btrfs: only commit delayed items at fsync if we are logging a
+ directory
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 5aa7d1a7f4a2f8ca6be1f32415e9365d026e8fa7 ]
+
+When logging an inode we are committing its delayed items if either the
+inode is a directory or if it is a new inode, created in the current
+transaction.
+
+We need to do it for directories, since new directory indexes are stored
+as delayed items of the inode and when logging a directory we need to be
+able to access all indexes from the fs/subvolume tree in order to figure
+out which index ranges need to be logged.
+
+However for new inodes that are not directories, we do not need to do it
+because the only type of delayed item they can have is the inode item, and
+we are guaranteed to always log an up to date version of the inode item:
+
+*) for a full fsync we do it by committing the delayed inode and then
+ copying the item from the fs/subvolume tree with
+ copy_inode_items_to_log();
+
+*) for a fast fsync we always log the inode item based on the contents of
+ the in-memory struct btrfs_inode. We guarantee this is always done since
+ commit e4545de5b035c7 ("Btrfs: fix fsync data loss after append write").
+
+So stop running delayed items for a new inodes that are not directories,
+since that forces committing the delayed inode into the fs/subvolume tree,
+wasting time and adding contention to the tree when a full fsync is not
+required. We will only do it in case a fast fsync is needed.
+
+This patch is part of a series that has the following patches:
+
+1/4 btrfs: only commit the delayed inode when doing a full fsync
+2/4 btrfs: only commit delayed items at fsync if we are logging a directory
+3/4 btrfs: stop incremening log_batch for the log root tree when syncing log
+4/4 btrfs: remove no longer needed use of log_writers for the log root tree
+
+After the entire patchset applied I saw about 12% decrease on max latency
+reported by dbench. The test was done on a qemu vm, with 8 cores, 16Gb of
+ram, using kvm and using a raw NVMe device directly (no intermediary fs on
+the host). The test was invoked like the following:
+
+ mkfs.btrfs -f /dev/sdk
+ mount -o ssd -o nospace_cache /dev/sdk /mnt/sdk
+ dbench -D /mnt/sdk -t 300 8
+ umount /mnt/dsk
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 0525b191843e1..709026698d915 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5147,7 +5147,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ const loff_t end,
+ struct btrfs_log_ctx *ctx)
+ {
+- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_path *path;
+ struct btrfs_path *dst_path;
+ struct btrfs_key min_key;
+@@ -5190,15 +5189,17 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ max_key.offset = (u64)-1;
+
+ /*
+- * Only run delayed items if we are a dir or a new file.
++ * Only run delayed items if we are a directory. We want to make sure
++ * all directory indexes hit the fs/subvolume tree so we can find them
++ * and figure out which index ranges have to be logged.
++ *
+ * Otherwise commit the delayed inode only if the full sync flag is set,
+ * as we want to make sure an up to date version is in the subvolume
+ * tree so copy_inode_items_to_log() / copy_items() can find it and copy
+ * it to the log tree. For a non full sync, we always log the inode item
+ * based on the in-memory struct btrfs_inode which is always up to date.
+ */
+- if (S_ISDIR(inode->vfs_inode.i_mode) ||
+- inode->generation > fs_info->last_trans_committed)
++ if (S_ISDIR(inode->vfs_inode.i_mode))
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ ret = btrfs_commit_inode_delayed_inode(inode);
+--
+2.25.1
+
--- /dev/null
+From cfc29caaaf8e90a6ab62bb7d4b4bba8bf8f0df65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jul 2020 12:31:59 +0100
+Subject: btrfs: only commit the delayed inode when doing a full fsync
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 8c8648dd1f6d62aeb912deeb788b6ac33cb782e7 ]
+
+Commit 2c2c452b0cafdc ("Btrfs: fix fsync when extend references are added
+to an inode") forced a commit of the delayed inode when logging an inode
+in order to ensure we would end up logging the inode item during a full
+fsync. By committing the delayed inode, we updated the inode item in the
+fs/subvolume tree and then later when copying items from leafs modified in
+the current transaction into the log tree (with copy_inode_items_to_log())
+we ended up copying the inode item from the fs/subvolume tree into the log
+tree. Logging an up to date version of the inode item is required to make
+sure at log replay time we get the link count fixup triggered among other
+things (replay xattr deletes, etc). The test case generic/040 from fstests
+exercises the bug which that commit fixed.
+
+However for a fast fsync we don't need to commit the delayed inode because
+we always log an up to date version of the inode item based on the struct
+btrfs_inode we have in-memory. We started doing this for fast fsyncs since
+commit e4545de5b035c7 ("Btrfs: fix fsync data loss after append write").
+
+So just stop committing the delayed inode if we are doing a fast fsync,
+we are only wasting time and adding contention on fs/subvolume tree.
+
+This patch is part of a series that has the following patches:
+
+1/4 btrfs: only commit the delayed inode when doing a full fsync
+2/4 btrfs: only commit delayed items at fsync if we are logging a directory
+3/4 btrfs: stop incremening log_batch for the log root tree when syncing log
+4/4 btrfs: remove no longer needed use of log_writers for the log root tree
+
+After the entire patchset applied I saw about 12% decrease on max latency
+reported by dbench. The test was done on a qemu vm, with 8 cores, 16Gb of
+ram, using kvm and using a raw NVMe device directly (no intermediary fs on
+the host). The test was invoked like the following:
+
+ mkfs.btrfs -f /dev/sdk
+ mount -o ssd -o nospace_cache /dev/sdk /mnt/sdk
+ dbench -D /mnt/sdk -t 300 8
+ umount /mnt/dsk
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index cc407c68d356f..0525b191843e1 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5154,7 +5154,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_key max_key;
+ struct btrfs_root *log = root->log_root;
+ int err = 0;
+- int ret;
++ int ret = 0;
+ bool fast_search = false;
+ u64 ino = btrfs_ino(inode);
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+@@ -5191,14 +5191,16 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+
+ /*
+ * Only run delayed items if we are a dir or a new file.
+- * Otherwise commit the delayed inode only, which is needed in
+- * order for the log replay code to mark inodes for link count
+- * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
++ * Otherwise commit the delayed inode only if the full sync flag is set,
++ * as we want to make sure an up to date version is in the subvolume
++ * tree so copy_inode_items_to_log() / copy_items() can find it and copy
++ * it to the log tree. For a non full sync, we always log the inode item
++ * based on the in-memory struct btrfs_inode which is always up to date.
+ */
+ if (S_ISDIR(inode->vfs_inode.i_mode) ||
+ inode->generation > fs_info->last_trans_committed)
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+- else
++ else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ ret = btrfs_commit_inode_delayed_inode(inode);
+
+ if (ret) {
+--
+2.25.1
+
--- /dev/null
+From cde23cc6ede5be8b7b506f3d3bc4d79c9b89e207 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 18:32:03 -0700
+Subject: cma: don't quit at first error when activating reserved areas
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+[ Upstream commit 3a5139f1c5bb76d69756fb8f13fffa173e261153 ]
+
+The routine cma_init_reserved_areas is designed to activate all
+reserved cma areas. It quits when it first encounters an error.
+This can leave some areas in a state where they are reserved but
+not activated. There is no feedback to code which performed the
+reservation. Attempting to allocate memory from areas in such a
+state will result in a BUG.
+
+Modify cma_init_reserved_areas to always attempt to activate all
+areas. The called routine, cma_activate_area is responsible for
+leaving the area in a valid state. No one is making active use
+of returned error codes, so change the routine to void.
+
+How to reproduce: This example uses kernelcore, hugetlb and cma
+as an easy way to reproduce. However, this is a more general cma
+issue.
+
+Two node x86 VM 16GB total, 8GB per node
+Kernel command line parameters, kernelcore=4G hugetlb_cma=8G
+Related boot time messages,
+ hugetlb_cma: reserve 8192 MiB, up to 4096 MiB per node
+ cma: Reserved 4096 MiB at 0x0000000100000000
+ hugetlb_cma: reserved 4096 MiB on node 0
+ cma: Reserved 4096 MiB at 0x0000000300000000
+ hugetlb_cma: reserved 4096 MiB on node 1
+ cma: CMA area hugetlb could not be activated
+
+ # echo 8 > /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] SMP PTI
+ ...
+ Call Trace:
+ bitmap_find_next_zero_area_off+0x51/0x90
+ cma_alloc+0x1a5/0x310
+ alloc_fresh_huge_page+0x78/0x1a0
+ alloc_pool_huge_page+0x6f/0xf0
+ set_max_huge_pages+0x10c/0x250
+ nr_hugepages_store_common+0x92/0x120
+ ? __kmalloc+0x171/0x270
+ kernfs_fop_write+0xc1/0x1a0
+ vfs_write+0xc7/0x1f0
+ ksys_write+0x5f/0xe0
+ do_syscall_64+0x4d/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fixes: c64be2bb1c6e ("drivers: add Contiguous Memory Allocator")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Roman Gushchin <guro@fb.com>
+Acked-by: Barry Song <song.bao.hua@hisilicon.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Cc: Kyungmin Park <kyungmin.park@samsung.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200730163123.6451-1-mike.kravetz@oracle.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/cma.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+diff --git a/mm/cma.c b/mm/cma.c
+index be55d1988c675..7de520c0a1db6 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -93,17 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
+ mutex_unlock(&cma->lock);
+ }
+
+-static int __init cma_activate_area(struct cma *cma)
++static void __init cma_activate_area(struct cma *cma)
+ {
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
+ struct zone *zone;
+
+ cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
+- if (!cma->bitmap) {
+- cma->count = 0;
+- return -ENOMEM;
+- }
++ if (!cma->bitmap)
++ goto out_error;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+@@ -133,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma)
+ spin_lock_init(&cma->mem_head_lock);
+ #endif
+
+- return 0;
++ return;
+
+ not_in_zone:
+- pr_err("CMA area %s could not be activated\n", cma->name);
+ bitmap_free(cma->bitmap);
++out_error:
+ cma->count = 0;
+- return -EINVAL;
++ pr_err("CMA area %s could not be activated\n", cma->name);
++ return;
+ }
+
+ static int __init cma_init_reserved_areas(void)
+ {
+ int i;
+
+- for (i = 0; i < cma_area_count; i++) {
+- int ret = cma_activate_area(&cma_areas[i]);
+-
+- if (ret)
+- return ret;
+- }
++ for (i = 0; i < cma_area_count; i++)
++ cma_activate_area(&cma_areas[i]);
+
+ return 0;
+ }
+--
+2.25.1
+
--- /dev/null
+From 19289c2bceec65a4f5424e32409c06390ec6a06a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jul 2020 19:09:32 +0200
+Subject: cpufreq: intel_pstate: Fix EPP setting via sysfs in active mode
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit de002c55cadfc2f6cdf0ed427526f6085d240238 ]
+
+Because intel_pstate_set_energy_pref_index() reads and writes the
+MSR_HWP_REQUEST register without using the cached value of it used by
+intel_pstate_hwp_boost_up() and intel_pstate_hwp_boost_down(), those
+functions may overwrite the value written by it and so the EPP value
+set via sysfs may be lost.
+
+To avoid that, make intel_pstate_set_energy_pref_index() take the
+cached value of MSR_HWP_REQUEST just like the other two routines
+mentioned above and update it with the new EPP value coming from
+user space in addition to updating the MSR.
+
+Note that the MSR itself still needs to be updated too in case
+hwp_boost is unset or the boosting mechanism is not active at the
+EPP change time.
+
+Fixes: e0efd5be63e8 ("cpufreq: intel_pstate: Add HWP boost utility and sched util hooks")
+Reported-by: Francisco Jerez <currojerez@riseup.net>
+Cc: 4.18+ <stable@vger.kernel.org> # 4.18+: 3da97d4db8ee cpufreq: intel_pstate: Rearrange ...
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Francisco Jerez <currojerez@riseup.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/intel_pstate.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 53dc0fd6f6d3c..270ad1b5ae61b 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+- u64 value;
+-
+- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
+- if (ret)
+- goto return_pref;
++ /*
++ * Use the cached HWP Request MSR value, because the register
++ * itself may be updated by intel_pstate_hwp_boost_up() or
++ * intel_pstate_hwp_boost_down() at any time.
++ */
++ u64 value = READ_ONCE(cpu_data->hwp_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+
+@@ -661,6 +662,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ epp = epp_values[pref_index - 1];
+
+ value |= (u64)epp << 24;
++ /*
++ * The only other updater of hwp_req_cached in the active mode,
++ * intel_pstate_hwp_set(), is called under the same lock as this
++ * function, so it cannot run in parallel with the update below.
++ */
++ WRITE_ONCE(cpu_data->hwp_req_cached, value);
+ ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+ } else {
+ if (epp == -EINVAL)
+--
+2.25.1
+
--- /dev/null
+From cf612356eb70fd9b3d040c340633ae212df393c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jul 2020 12:37:35 -0400
+Subject: drm/amd/display: Add additional config guards for DCN
+
+From: Aurabindo Pillai <aurabindo.pillai@amd.com>
+
+[ Upstream commit e10517b3cb93f90c8a790def6ae884d1e2b65ee7 ]
+
+[Why&How]
+
+Fix build error by protecting code with config guard
+to enable building amdgpu without CONFIG_DRM_AMD_DC_DCN
+enabled. This option is disabled by default for allmodconfig.
+
+Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 5bde49a13f8c7..3eb77f343bbfa 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7229,7 +7229,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
+ *out_type = update_type;
+ return ret;
+ }
+-
++#if defined(CONFIG_DRM_AMD_DC_DCN)
+ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+ {
+ struct drm_connector *connector;
+@@ -7252,6 +7252,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+ }
++#endif
+
+ /**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+@@ -7305,6 +7306,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (ret)
+ goto fail;
+
++#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->asic_type >= CHIP_NAVI10) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+@@ -7314,7 +7316,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ }
+ }
+ }
+-
++#endif
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+--
+2.25.1
+
--- /dev/null
+From e38617b9173c96686e2db15095c9cbcd0ac3ee24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jun 2020 17:55:29 +0800
+Subject: drm/amd/display: Fix dmesg warning from setting abm level
+
+From: Stylon Wang <stylon.wang@amd.com>
+
+[ Upstream commit c5892a10218214d729699ab61bad6fc109baf0ce ]
+
+[Why]
+Setting abm level does not correctly update CRTC state. As a result
+no surface update is added to dc stream state and triggers warning.
+
+[How]
+Correctly update CRTC state when setting abm level property.
+
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Stylon Wang <stylon.wang@amd.com>
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Eryk Brol <eryk.brol@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 23 +++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 3eb77f343bbfa..247f53d41993d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7306,6 +7306,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (ret)
+ goto fail;
+
++ /* Check connector changes */
++ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
++ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
++ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
++
++ /* Skip connectors that are disabled or part of modeset already. */
++ if (!old_con_state->crtc && !new_con_state->crtc)
++ continue;
++
++ if (!new_con_state->crtc)
++ continue;
++
++ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
++ if (IS_ERR(new_crtc_state)) {
++ ret = PTR_ERR(new_crtc_state);
++ goto fail;
++ }
++
++ if (dm_old_con_state->abm_level !=
++ dm_new_con_state->abm_level)
++ new_crtc_state->connectors_changed = true;
++ }
++
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->asic_type >= CHIP_NAVI10) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+--
+2.25.1
+
--- /dev/null
+From 60284bc7a74788adf8dcc48f3215a320ae570672 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Nov 2019 09:14:15 -0500
+Subject: drm/amd/display: Trigger modesets on MST DSC connectors
+
+From: Mikita Lipski <mikita.lipski@amd.com>
+
+[ Upstream commit 44be939ff7ac5858f0dbd8a2a4af1fe198e14db1 ]
+
+Whenever a connector on an MST network is attached, detached, or
+undergoes a modeset, the DSC configs for each stream on that
+topology will be recalculated. This can change their required
+bandwidth, requiring a full reprogramming, as though a modeset
+was performed, even if that stream did not change timing.
+
+Therefore, whenever a crtc has drm_atomic_crtc_needs_modeset,
+for each crtc that shares a MST topology with that stream and
+supports DSC, add that crtc (and all affected connectors and
+planes) to the atomic state and set mode_changed on its state
+
+v2: Do this check only on Navi and before adding connectors
+and planes on modesetting crtcs
+
+v3: Call the drm_dp_mst_add_affected_dsc_crtcs() to update
+all affected CRTCs
+
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: David Francis <David.Francis@amd.com>
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 33 +++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2c0eb7140ca0e..5bde49a13f8c7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7230,6 +7230,29 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
+ return ret;
+ }
+
++static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
++{
++ struct drm_connector *connector;
++ struct drm_connector_state *conn_state;
++ struct amdgpu_dm_connector *aconnector = NULL;
++ int i;
++ for_each_new_connector_in_state(state, connector, conn_state, i) {
++ if (conn_state->crtc != crtc)
++ continue;
++
++ aconnector = to_amdgpu_dm_connector(connector);
++ if (!aconnector->port || !aconnector->mst_port)
++ aconnector = NULL;
++ else
++ break;
++ }
++
++ if (!aconnector)
++ return 0;
++
++ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
++}
++
+ /**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+@@ -7282,6 +7305,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (ret)
+ goto fail;
+
++ if (adev->asic_type >= CHIP_NAVI10) {
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
++ ret = add_affected_mst_dsc_crtcs(state, crtc);
++ if (ret)
++ goto fail;
++ }
++ }
++ }
++
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+--
+2.25.1
+
--- /dev/null
+From 9bb7d4607538a97c61ec84c7fcca646d68d1b664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jul 2020 18:38:35 +0200
+Subject: drm/ingenic: Fix incorrect assumption about plane->index
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+[ Upstream commit ca43f274e03f91c533643299ae4984965ce03205 ]
+
+plane->index is NOT the index of the color plane in a YUV frame.
+Actually, a YUV frame is represented by a single drm_plane, even though
+it contains three Y, U, V planes.
+
+v2-v3: No change
+
+Cc: stable@vger.kernel.org # v5.3
+Fixes: 90b86fcc47b4 ("DRM: Add KMS driver for the Ingenic JZ47xx SoCs")
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Reviewed-by: Sam Ravnborg <sam@ravnborg.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200716163846.174790-1-paul@crapouillou.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/ingenic/ingenic-drm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
+index e746b3a6f7cbc..7e6179fe63f86 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
+@@ -377,7 +377,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+ addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+ width = state->src_w >> 16;
+ height = state->src_h >> 16;
+- cpp = state->fb->format->cpp[plane->index];
++ cpp = state->fb->format->cpp[0];
+
+ priv->dma_hwdesc->addr = addr;
+ priv->dma_hwdesc->cmd = width * height * cpp / 4;
+--
+2.25.1
+
--- /dev/null
+From 91180ead4416c7a6dc84bb00dedc79a64c8dcf77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Mar 2020 17:59:07 +0800
+Subject: drm/xen: fix passing zero to 'PTR_ERR' warning
+
+From: Ding Xiang <dingxiang@cmss.chinamobile.com>
+
+[ Upstream commit 4c1cb04e0e7ac4ba1ef5457929ef9b5671d9eed3 ]
+
+Fix a static code checker warning:
+ drivers/gpu/drm/xen/xen_drm_front.c:404 xen_drm_drv_dumb_create()
+ warn: passing zero to 'PTR_ERR'
+
+Signed-off-by: Ding Xiang <dingxiang@cmss.chinamobile.com>
+Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/1585562347-30214-1-git-send-email-dingxiang@cmss.chinamobile.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xen/xen_drm_front.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
+index 4be49c1aef518..374142018171c 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front.c
++++ b/drivers/gpu/drm/xen/xen_drm_front.c
+@@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
+
+ obj = xen_drm_front_gem_create(dev, args->size);
+ if (IS_ERR_OR_NULL(obj)) {
+- ret = PTR_ERR(obj);
++ ret = PTR_ERR_OR_ZERO(obj);
+ goto fail;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 3473efa62ededab9a0e922111faeec60f4ee37cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Aug 2020 09:21:10 +0300
+Subject: drm/xen-front: Fix misused IS_ERR_OR_NULL checks
+
+From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+
+[ Upstream commit 14dee058610446aa464254fc5c8e88c7535195e0 ]
+
+The patch c575b7eeb89f: "drm/xen-front: Add support for Xen PV
+display frontend" from Apr 3, 2018, leads to the following static
+checker warning:
+
+ drivers/gpu/drm/xen/xen_drm_front_gem.c:140 xen_drm_front_gem_create()
+ warn: passing zero to 'ERR_CAST'
+
+drivers/gpu/drm/xen/xen_drm_front_gem.c
+ 133 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+ 134 size_t size)
+ 135 {
+ 136 struct xen_gem_object *xen_obj;
+ 137
+ 138 xen_obj = gem_create(dev, size);
+ 139 if (IS_ERR_OR_NULL(xen_obj))
+ 140 return ERR_CAST(xen_obj);
+
+Fix this and the rest of misused places with IS_ERR_OR_NULL in the
+driver.
+
+Fixes: c575b7eeb89f: "drm/xen-front: Add support for Xen PV display frontend"
+
+Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200813062113.11030-3-andr2000@gmail.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xen/xen_drm_front.c | 4 ++--
+ drivers/gpu/drm/xen/xen_drm_front_gem.c | 8 ++++----
+ drivers/gpu/drm/xen/xen_drm_front_kms.c | 2 +-
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
+index 374142018171c..09894a1d343f3 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front.c
++++ b/drivers/gpu/drm/xen/xen_drm_front.c
+@@ -400,8 +400,8 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
+ args->size = args->pitch * args->height;
+
+ obj = xen_drm_front_gem_create(dev, args->size);
+- if (IS_ERR_OR_NULL(obj)) {
+- ret = PTR_ERR_OR_ZERO(obj);
++ if (IS_ERR(obj)) {
++ ret = PTR_ERR(obj);
+ goto fail;
+ }
+
+diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+index f0b85e0941114..4ec8a49241e17 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
++++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+@@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+
+ size = round_up(size, PAGE_SIZE);
+ xen_obj = gem_create_obj(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return xen_obj;
+
+ if (drm_info->front_info->cfg.be_alloc) {
+@@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+ */
+ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
+- if (IS_ERR_OR_NULL(xen_obj->pages)) {
++ if (IS_ERR(xen_obj->pages)) {
+ ret = PTR_ERR(xen_obj->pages);
+ xen_obj->pages = NULL;
+ goto fail;
+@@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+ struct xen_gem_object *xen_obj;
+
+ xen_obj = gem_create(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ return &xen_obj->base;
+@@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+
+ size = attach->dmabuf->size;
+ xen_obj = gem_create_obj(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ ret = gem_alloc_pages_array(xen_obj, size);
+diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
+index 21ad1c359b613..e4dedbb184ab7 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
++++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
+@@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
+ int ret;
+
+ fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+- if (IS_ERR_OR_NULL(fb))
++ if (IS_ERR(fb))
+ return fb;
+
+ gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+--
+2.25.1
+
--- /dev/null
+From de55ea8b7b8cedd5226c9bec5a883949253701fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jul 2020 12:43:24 -0700
+Subject: EDAC/{i7core,sb,pnd2,skx}: Fix error event severity
+
+From: Tony Luck <tony.luck@intel.com>
+
+[ Upstream commit 45bc6098a3e279d8e391d22428396687562797e2 ]
+
+IA32_MCG_STATUS.RIPV indicates whether the return RIP value pushed onto
+the stack as part of machine check delivery is valid or not.
+
+Various drivers copied a code fragment that uses the RIPV bit to
+determine the severity of the error as either HW_EVENT_ERR_UNCORRECTED
+or HW_EVENT_ERR_FATAL, but this check is reversed (marking errors where
+RIPV is set as "FATAL").
+
+Reverse the tests so that the error is marked fatal when RIPV is not set.
+
+Reported-by: Gabriele Paoloni <gabriele.paoloni@intel.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200707194324.14884-1-tony.luck@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/i7core_edac.c | 4 ++--
+ drivers/edac/pnd2_edac.c | 2 +-
+ drivers/edac/sb_edac.c | 4 ++--
+ drivers/edac/skx_common.c | 4 ++--
+ 4 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index a71cca6eeb333..6be7e65f7389d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1711,9 +1711,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv)
+- tp_event = HW_EVENT_ERR_FATAL;
+- else
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ else
++ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
+index b1193be1ef1d8..dac45e2071b3f 100644
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ int rc;
+
+- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
++ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
+ HW_EVENT_ERR_CORRECTED;
+
+ /*
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index a2fd39d330d67..b557a53c75c46 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- tp_event = HW_EVENT_ERR_FATAL;
+- } else {
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ } else {
++ tp_event = HW_EVENT_ERR_FATAL;
+ }
+ } else {
+ tp_event = HW_EVENT_ERR_CORRECTED;
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 4ca87723dcdcd..99dea4f66b5e9 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -490,9 +490,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- tp_event = HW_EVENT_ERR_FATAL;
+- } else {
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ } else {
++ tp_event = HW_EVENT_ERR_FATAL;
+ }
+ } else {
+ tp_event = HW_EVENT_ERR_CORRECTED;
+--
+2.25.1
+
--- /dev/null
+From 162fb7aef6d13e87c50cc5841c059ff8b59b7e3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Sep 2019 11:17:16 -0300
+Subject: EDAC: sb_edac: get rid of unused vars
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+
+[ Upstream commit 323014d85d2699b2879ecb15cd06a15408e3e801 ]
+
+There are several vars unused on this driver, probably because
+it was a modified copy of another driver. Get rid of them.
+
+ drivers/edac/sb_edac.c: In function ‘knl_get_dimm_capacity’:
+ drivers/edac/sb_edac.c:1343:16: warning: variable ‘sad_size’ set but not used [-Wunused-but-set-variable]
+ 1343 | u64 sad_base, sad_size, sad_limit = 0;
+ | ^~~~~~~~
+ drivers/edac/sb_edac.c: In function ‘sbridge_mce_output_error’:
+ drivers/edac/sb_edac.c:2955:8: warning: variable ‘type’ set but not used [-Wunused-but-set-variable]
+ 2955 | char *type, *optype, msg[256];
+ | ^~~~
+ drivers/edac/sb_edac.c: In function ‘sbridge_unregister_mci’:
+ drivers/edac/sb_edac.c:3203:22: warning: variable ‘pvt’ set but not used [-Wunused-but-set-variable]
+ 3203 | struct sbridge_pvt *pvt;
+ | ^~~
+ At top level:
+ drivers/edac/sb_edac.c:266:18: warning: ‘correrrthrsld’ defined but not used [-Wunused-const-variable=]
+ 266 | static const u32 correrrthrsld[] = {
+ | ^~~~~~~~~~~~~
+ drivers/edac/sb_edac.c:257:18: warning: ‘correrrcnt’ defined but not used [-Wunused-const-variable=]
+ 257 | static const u32 correrrcnt[] = {
+ | ^~~~~~~~~~
+
+Acked-by: Borislav Petkov <bp@alien8.de>
+Acked-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/sb_edac.c | 21 ++++++++-------------
+ 1 file changed, 8 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index f743502ca9b72..a2fd39d330d67 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
+ * FIXME: Implement the error count reads directly
+ */
+
+-static const u32 correrrcnt[] = {
+- 0x104, 0x108, 0x10c, 0x110,
+-};
+-
+ #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
+ #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
+ #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
+ #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
+
++#if 0 /* Currently unused*/
++static const u32 correrrcnt[] = {
++ 0x104, 0x108, 0x10c, 0x110,
++};
++
+ static const u32 correrrthrsld[] = {
+ 0x11c, 0x120, 0x124, 0x128,
+ };
++#endif
+
+ #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
+ #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
+@@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s)
+ */
+ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
+ {
+- u64 sad_base, sad_size, sad_limit = 0;
++ u64 sad_base, sad_limit = 0;
+ u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
+ int sad_rule = 0;
+ int tad_rule = 0;
+@@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
+ edram_only = KNL_EDRAM_ONLY(dram_rule);
+
+ sad_limit = pvt->info.sad_limit(dram_rule)+1;
+- sad_size = sad_limit - sad_base;
+
+ pci_read_config_dword(pvt->pci_sad0,
+ pvt->info.interleave_list[sad_rule], &interleave_reg);
+@@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ enum hw_event_mc_err_type tp_event;
+- char *type, *optype, msg[256];
++ char *optype, msg[256];
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+@@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+- type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+- type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+@@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = {
+ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ {
+ struct mem_ctl_info *mci = sbridge_dev->mci;
+- struct sbridge_pvt *pvt;
+
+ if (unlikely(!mci || !mci->pvt_info)) {
+ edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
+@@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ return;
+ }
+
+- pvt = mci->pvt_info;
+-
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
+
+--
+2.25.1
+
--- /dev/null
+From 2f9c9e01c93ad0a98bd68b37a37dca6eda55d527 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Sep 2019 11:23:08 -0300
+Subject: EDAC: skx_common: get rid of unused type var
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+
+[ Upstream commit f05390d30e20cccd8f8de981dee42bcdd8d2d137 ]
+
+ drivers/edac/skx_common.c: In function ‘skx_mce_output_error’:
+ drivers/edac/skx_common.c:478:8: warning: variable ‘type’ set but not used [-Wunused-but-set-variable]
+ 478 | char *type, *optype;
+ | ^~~~
+
+Acked-by: Borislav Petkov <bp@alien8.de>
+Acked-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/edac/skx_common.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 2177ad765bd16..4ca87723dcdcd 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -475,7 +475,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ struct decoded_addr *res)
+ {
+ enum hw_event_mc_err_type tp_event;
+- char *type, *optype;
++ char *optype;
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+@@ -490,14 +490,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+- type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+- type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 203dd6d91722766b4a8d7e4a28d5dffefd9957f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Dec 2019 15:41:39 +0100
+Subject: gpu/drm: ingenic: Use the plane's src_[x,y] to configure DMA length
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+[ Upstream commit 52e4607dace1eeeb2e012fca291dc4e6cb449bff ]
+
+Instead of obtaining the width/height of the framebuffer from the CRTC
+state, obtain it from the current plane state.
+
+v2: No change
+
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191210144142.33143-3-paul@crapouillou.net
+# *** extracted tags ***
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/ingenic/ingenic-drm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
+index 376fca6ca9f47..e746b3a6f7cbc 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
+@@ -375,8 +375,8 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+
+ if (state && state->fb) {
+ addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+- width = state->crtc->state->adjusted_mode.hdisplay;
+- height = state->crtc->state->adjusted_mode.vdisplay;
++ width = state->src_w >> 16;
++ height = state->src_h >> 16;
+ cpp = state->fb->format->cpp[plane->index];
+
+ priv->dma_hwdesc->addr = addr;
+--
+2.25.1
+
--- /dev/null
+From 5ab4f827dedfb8f3d4b39a849530646c8aaa7493 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Nov 2019 17:57:22 -0800
+Subject: mm/cma.c: switch to bitmap_zalloc() for cma bitmap allocation
+
+From: Yunfeng Ye <yeyunfeng@huawei.com>
+
+[ Upstream commit 2184f9928ab52f26c2ae5e9ba37faf29c78f50b8 ]
+
+kzalloc() is used for cma bitmap allocation in cma_activate_area(),
+switch to bitmap_zalloc() for clarity.
+
+Link: http://lkml.kernel.org/r/895d4627-f115-c77a-d454-c0a196116426@huawei.com
+Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Yue Hu <huyue2@yulong.com>
+Cc: Peng Fan <peng.fan@nxp.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Ryohei Suzuki <ryh.szk.cmnty@gmail.com>
+Cc: Andrey Konovalov <andreyknvl@google.com>
+Cc: Doug Berger <opendmb@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/cma.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/mm/cma.c b/mm/cma.c
+index 7fe0b8356775f..be55d1988c675 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -95,13 +95,11 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
+
+ static int __init cma_activate_area(struct cma *cma)
+ {
+- int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
+ struct zone *zone;
+
+- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+-
++ cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
+ if (!cma->bitmap) {
+ cma->count = 0;
+ return -ENOMEM;
+@@ -139,7 +137,7 @@ static int __init cma_activate_area(struct cma *cma)
+
+ not_in_zone:
+ pr_err("CMA area %s could not be activated\n", cma->name);
+- kfree(cma->bitmap);
++ bitmap_free(cma->bitmap);
+ cma->count = 0;
+ return -EINVAL;
+ }
+--
+2.25.1
+
--- /dev/null
+From 56d1393566834697afb8abc437058df80293de05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 23:17:16 -0700
+Subject: mm: fix kthread_use_mm() vs TLB invalidate
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 38cf307c1f2011d413750c5acb725456f47d9172 ]
+
+For SMP systems using IPI based TLB invalidation, looking at
+current->active_mm is entirely reasonable. This then presents the
+following race condition:
+
+ CPU0 CPU1
+
+ flush_tlb_mm(mm) use_mm(mm)
+ <send-IPI>
+ tsk->active_mm = mm;
+ <IPI>
+ if (tsk->active_mm == mm)
+ // flush TLBs
+ </IPI>
+ switch_mm(old_mm,mm,tsk);
+
+Where it is possible the IPI flushed the TLBs for @old_mm, not @mm,
+because the IPI lands before we actually switched.
+
+Avoid this by disabling IRQs across changing ->active_mm and
+switch_mm().
+
+Of the (SMP) architectures that have IPI based TLB invalidate:
+
+ Alpha - checks active_mm
+ ARC - ASID specific
+ IA64 - checks active_mm
+ MIPS - ASID specific flush
+ OpenRISC - shoots down world
+ PARISC - shoots down world
+ SH - ASID specific
+ SPARC - ASID specific
+ x86 - N/A
+ xtensa - checks active_mm
+
+So at the very least Alpha, IA64 and Xtensa are suspect.
+
+On top of this, for scheduler consistency we need at least preemption
+disabled across changing tsk->mm and doing switch_mm(), which is
+currently provided by task_lock(), but that's not sufficient for
+PREEMPT_RT.
+
+[akpm@linux-foundation.org: add comment]
+
+Reported-by: Andy Lutomirski <luto@amacapital.net>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200721154106.GE10769@hirez.programming.kicks-ass.net
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/mmu_context.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index 3e612ae748e96..a1da47e027479 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -25,13 +25,16 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ /* Hold off tlb flush IPIs while switching mm's */
++ local_irq_disable();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ mmgrab(mm);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+- switch_mm(active_mm, mm, tsk);
++ switch_mm_irqs_off(active_mm, mm, tsk);
++ local_irq_enable();
+ task_unlock(tsk);
+ #ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+@@ -56,9 +59,11 @@ void unuse_mm(struct mm_struct *mm)
+
+ task_lock(tsk);
+ sync_mm_rss(mm);
++ local_irq_disable();
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
++ local_irq_enable();
+ task_unlock(tsk);
+ }
+ EXPORT_SYMBOL_GPL(unuse_mm);
+--
+2.25.1
+
--- /dev/null
+From 6438a6013b9df7304281cf33953e8c7166e18fc2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Aug 2020 21:30:41 -0700
+Subject: mm/memory.c: skip spurious TLB flush for retried page fault
+
+From: Yang Shi <shy828301@gmail.com>
+
+[ Upstream commit b7333b58f358f38d90d78e00c1ee5dec82df10ad ]
+
+Recently we found regression when running will_it_scale/page_fault3 test
+on ARM64. Over 70% down for the multi processes cases and over 20% down
+for the multi threads cases. It turns out the regression is caused by
+commit 89b15332af7c ("mm: drop mmap_sem before calling
+balance_dirty_pages() in write fault").
+
+The test mmaps a memory size file then write to the mapping, this would
+make all memory dirty and trigger dirty pages throttle, that upstream
+commit would release mmap_sem then retry the page fault. The retried
+page fault would see correct PTEs installed then just fall through to
+spurious TLB flush. The regression is caused by the excessive spurious
+TLB flush. It is fine on x86 since x86's spurious TLB flush is no-op.
+
+We could just skip the spurious TLB flush to mitigate the regression.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Reported-by: Xu Yu <xuyu@linux.alibaba.com>
+Debugged-by: Xu Yu <xuyu@linux.alibaba.com>
+Tested-by: Xu Yu <xuyu@linux.alibaba.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/mm/memory.c b/mm/memory.c
+index cb7c940cf800c..4d4c4d6782a22 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3886,6 +3886,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
+ vmf->flags & FAULT_FLAG_WRITE)) {
+ update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
+ } else {
++ /* Skip spurious TLB flush for retried page fault */
++ if (vmf->flags & FAULT_FLAG_TRIED)
++ goto unlock;
+ /*
+ * This is needed only for protection faults but the arch code
+ * is not yet telling us if this is a protection fault or not.
+--
+2.25.1
+
--- /dev/null
+From b526f279d61d5cf18c94ef26ff888ee2cec1ad28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 23:17:13 -0700
+Subject: mm/shuffle: don't move pages between zones and don't read garbage
+ memmaps
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit 4a93025cbe4a0b19d1a25a2d763a3d2018bad0d9 ]
+
+Especially with memory hotplug, we can have offline sections (with a
+garbage memmap) and overlapping zones. We have to make sure to only touch
+initialized memmaps (online sections managed by the buddy) and that the
+zone matches, to not move pages between zones.
+
+To test if this can actually happen, I added a simple
+
+ BUG_ON(page_zone(page_i) != page_zone(page_j));
+
+right before the swap. When hotplugging a 256M DIMM to a 4G x86-64 VM and
+onlining the first memory block "online_movable" and the second memory
+block "online_kernel", it will trigger the BUG, as both zones (NORMAL and
+MOVABLE) overlap.
+
+This might result in all kinds of weird situations (e.g., double
+allocations, list corruptions, unmovable allocations ending up in the
+movable zone).
+
+Fixes: e900a918b098 ("mm: shuffle initial free memory to improve memory-side-cache utilization")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Wei Yang <richard.weiyang@linux.alibaba.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: Wei Yang <richard.weiyang@gmail.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org> [5.2+]
+Link: http://lkml.kernel.org/r/20200624094741.9918-2-david@redhat.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/shuffle.c | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/mm/shuffle.c b/mm/shuffle.c
+index b3fe97fd66541..56958ffa5a3a9 100644
+--- a/mm/shuffle.c
++++ b/mm/shuffle.c
+@@ -58,25 +58,25 @@ module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
+ * For two pages to be swapped in the shuffle, they must be free (on a
+ * 'free_area' lru), have the same order, and have the same migratetype.
+ */
+-static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
++static struct page * __meminit shuffle_valid_page(struct zone *zone,
++ unsigned long pfn, int order)
+ {
+- struct page *page;
++ struct page *page = pfn_to_online_page(pfn);
+
+ /*
+ * Given we're dealing with randomly selected pfns in a zone we
+ * need to ask questions like...
+ */
+
+- /* ...is the pfn even in the memmap? */
+- if (!pfn_valid_within(pfn))
++ /* ... is the page managed by the buddy? */
++ if (!page)
+ return NULL;
+
+- /* ...is the pfn in a present section or a hole? */
+- if (!pfn_present(pfn))
++ /* ... is the page assigned to the same zone? */
++ if (page_zone(page) != zone)
+ return NULL;
+
+ /* ...is the page free and currently on a free_area list? */
+- page = pfn_to_page(pfn);
+ if (!PageBuddy(page))
+ return NULL;
+
+@@ -123,7 +123,7 @@ void __meminit __shuffle_zone(struct zone *z)
+ * page_j randomly selected in the span @zone_start_pfn to
+ * @spanned_pages.
+ */
+- page_i = shuffle_valid_page(i, order);
++ page_i = shuffle_valid_page(z, i, order);
+ if (!page_i)
+ continue;
+
+@@ -137,7 +137,7 @@ void __meminit __shuffle_zone(struct zone *z)
+ j = z->zone_start_pfn +
+ ALIGN_DOWN(get_random_long() % z->spanned_pages,
+ order_pages);
+- page_j = shuffle_valid_page(j, order);
++ page_j = shuffle_valid_page(z, j, order);
+ if (page_j && page_j != page_i)
+ break;
+ }
+--
+2.25.1
+
--- /dev/null
+From 4eab8a1ff6b99ff194b8f0efda18ac5265c5514e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Aug 2020 15:27:12 -0400
+Subject: mm/vunmap: add cond_resched() in vunmap_pmd_range
+
+[ Upstream commit e47110e90584a22e9980510b00d0dfad3a83354e ]
+
+Like zap_pte_range add cond_resched so that we can avoid softlockups as
+reported below. On non-preemptible kernel with large I/O map region (like
+the one we get when using persistent memory with sector mode), an unmap of
+the namespace can report below softlockups.
+
+22724.027334] watchdog: BUG: soft lockup - CPU#49 stuck for 23s! [ndctl:50777]
+ NIP [c0000000000dc224] plpar_hcall+0x38/0x58
+ LR [c0000000000d8898] pSeries_lpar_hpte_invalidate+0x68/0xb0
+ Call Trace:
+ flush_hash_page+0x114/0x200
+ hpte_need_flush+0x2dc/0x540
+ vunmap_page_range+0x538/0x6f0
+ free_unmap_vmap_area+0x30/0x70
+ remove_vm_area+0xfc/0x140
+ __vunmap+0x68/0x270
+ __iounmap.part.0+0x34/0x60
+ memunmap+0x54/0x70
+ release_nodes+0x28c/0x300
+ device_release_driver_internal+0x16c/0x280
+ unbind_store+0x124/0x170
+ drv_attr_store+0x44/0x60
+ sysfs_kf_write+0x64/0x90
+ kernfs_fop_write+0x1b0/0x290
+ __vfs_write+0x3c/0x70
+ vfs_write+0xd8/0x260
+ ksys_write+0xdc/0x130
+ system_call+0x5c/0x70
+
+Reported-by: Harish Sriram <harish@linux.ibm.com>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200807075933.310240-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/vmalloc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ad4d00bd79147..5797e1eeaa7e6 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -85,6 +85,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ vunmap_pte_range(pmd, addr, next);
++
++ cond_resched();
+ } while (pmd++, addr = next, addr != end);
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 3115a1f0c03789b88fb3b0ed25cf71ac69847cbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 23:05:57 +0200
+Subject: PCI: qcom: Add missing ipq806x clocks in PCIe driver
+
+From: Ansuel Smith <ansuelsmth@gmail.com>
+
+[ Upstream commit 8b6f0330b5f9a7543356bfa9e76d580f03aa2c1e ]
+
+Aux and Ref clk are missing in PCIe qcom driver. Add support for this
+optional clks for ipq8064/apq8064 SoC.
+
+Link: https://lore.kernel.org/r/20200615210608.21469-2-ansuelsmth@gmail.com
+Fixes: 82a823833f4e ("PCI: qcom: Add Qualcomm PCIe controller driver")
+Signed-off-by: Sham Muthayyan <smuthayy@codeaurora.org>
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Acked-by: Stanimir Varbanov <svarbanov@mm-sol.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 38 ++++++++++++++++++++++----
+ 1 file changed, 33 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 270d502b8cd50..380a77a914fa0 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -103,6 +103,8 @@ struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *phy_clk;
++ struct clk *aux_clk;
++ struct clk *ref_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+@@ -253,6 +255,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
++ res->aux_clk = devm_clk_get_optional(dev, "aux");
++ if (IS_ERR(res->aux_clk))
++ return PTR_ERR(res->aux_clk);
++
++ res->ref_clk = devm_clk_get_optional(dev, "ref");
++ if (IS_ERR(res->ref_clk))
++ return PTR_ERR(res->ref_clk);
++
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+@@ -285,6 +295,8 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->core_clk);
+ clk_disable_unprepare(res->phy_clk);
++ clk_disable_unprepare(res->aux_clk);
++ clk_disable_unprepare(res->ref_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+
+@@ -315,16 +327,28 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ goto err_assert_ahb;
+ }
+
++ ret = clk_prepare_enable(res->core_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable core clock\n");
++ goto err_clk_core;
++ }
++
+ ret = clk_prepare_enable(res->phy_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_phy;
+ }
+
+- ret = clk_prepare_enable(res->core_clk);
++ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+- dev_err(dev, "cannot prepare/enable core clock\n");
+- goto err_clk_core;
++ dev_err(dev, "cannot prepare/enable aux clock\n");
++ goto err_clk_aux;
++ }
++
++ ret = clk_prepare_enable(res->ref_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable ref clock\n");
++ goto err_clk_ref;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+@@ -400,10 +424,14 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ return 0;
+
+ err_deassert_ahb:
+- clk_disable_unprepare(res->core_clk);
+-err_clk_core:
++ clk_disable_unprepare(res->ref_clk);
++err_clk_ref:
++ clk_disable_unprepare(res->aux_clk);
++err_clk_aux:
+ clk_disable_unprepare(res->phy_clk);
+ err_clk_phy:
++ clk_disable_unprepare(res->core_clk);
++err_clk_core:
+ clk_disable_unprepare(res->iface_clk);
+ err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+--
+2.25.1
+
--- /dev/null
+From 3a55a786c208d8fb6d3dd2e11906a16ce0f30f87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 23:06:00 +0200
+Subject: PCI: qcom: Add missing reset for ipq806x
+
+From: Ansuel Smith <ansuelsmth@gmail.com>
+
+[ Upstream commit ee367e2cdd2202b5714982739e684543cd2cee0e ]
+
+Add missing ext reset used by ipq8064 SoC in PCIe qcom driver.
+
+Link: https://lore.kernel.org/r/20200615210608.21469-5-ansuelsmth@gmail.com
+Fixes: 82a823833f4e ("PCI: qcom: Add Qualcomm PCIe controller driver")
+Signed-off-by: Sham Muthayyan <smuthayy@codeaurora.org>
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Acked-by: Stanimir Varbanov <svarbanov@mm-sol.com>
+Cc: stable@vger.kernel.org # v4.5+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 9cf7599a198c4..374db5d59cf87 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -110,6 +110,7 @@ struct qcom_pcie_resources_2_1_0 {
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
++ struct reset_control *ext_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+ };
+
+@@ -279,6 +280,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
++ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
++ if (IS_ERR(res->ext_reset))
++ return PTR_ERR(res->ext_reset);
++
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+ }
+@@ -292,6 +297,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
++ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->core_clk);
+@@ -351,6 +357,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ goto err_deassert_ahb;
+ }
+
++ ret = reset_control_deassert(res->ext_reset);
++ if (ret) {
++ dev_err(dev, "cannot deassert ext reset\n");
++ goto err_deassert_ahb;
++ }
++
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+--
+2.25.1
+
--- /dev/null
+From fff6e416373a4b1e712d0e0e95412d6c6137c927 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 23:05:59 +0200
+Subject: PCI: qcom: Change duplicate PCI reset to phy reset
+
+From: Abhishek Sahu <absahu@codeaurora.org>
+
+[ Upstream commit dd58318c019f10bc94db36df66af6c55d4c0cbba ]
+
+The deinit issues reset_control_assert for PCI twice and does not contain
+phy reset.
+
+Link: https://lore.kernel.org/r/20200615210608.21469-4-ansuelsmth@gmail.com
+Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Acked-by: Stanimir Varbanov <svarbanov@mm-sol.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/dwc/pcie-qcom.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 380a77a914fa0..9cf7599a198c4 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -287,14 +287,14 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ {
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
++ clk_disable_unprepare(res->phy_clk);
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+- reset_control_assert(res->pci_reset);
++ reset_control_assert(res->phy_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->core_clk);
+- clk_disable_unprepare(res->phy_clk);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->ref_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+@@ -333,12 +333,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ goto err_clk_core;
+ }
+
+- ret = clk_prepare_enable(res->phy_clk);
+- if (ret) {
+- dev_err(dev, "cannot prepare/enable phy clock\n");
+- goto err_clk_phy;
+- }
+-
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+@@ -411,6 +405,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ return ret;
+ }
+
++ ret = clk_prepare_enable(res->phy_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable phy clock\n");
++ goto err_deassert_ahb;
++ }
++
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+@@ -428,8 +428,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ err_clk_ref:
+ clk_disable_unprepare(res->aux_clk);
+ err_clk_aux:
+- clk_disable_unprepare(res->phy_clk);
+-err_clk_phy:
+ clk_disable_unprepare(res->core_clk);
+ err_clk_core:
+ clk_disable_unprepare(res->iface_clk);
+--
+2.25.1
+
--- /dev/null
+From f8f615f1faac9a64cd07f787ba7cfbcdbd82aa3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2019 18:11:30 +0000
+Subject: PM / devfreq: rk3399_dmc: Add missing of_node_put()
+
+From: Yangtao Li <tiny.windzz@gmail.com>
+
+[ Upstream commit 29d867e97f7d781972ed542acfca3c2c0b512603 ]
+
+of_node_put() needs to be called when the device node which is got
+from of_parse_phandle has finished using.
+
+Signed-off-by: Yangtao Li <tiny.windzz@gmail.com>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/devfreq/rk3399_dmc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
+index 2e65d7279d79e..2f1027c5b6475 100644
+--- a/drivers/devfreq/rk3399_dmc.c
++++ b/drivers/devfreq/rk3399_dmc.c
+@@ -372,6 +372,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+ if (node) {
+ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
+ if (IS_ERR(data->regmap_pmu))
+ return PTR_ERR(data->regmap_pmu);
+ }
+--
+2.25.1
+
--- /dev/null
+From 9baa07d7e933e1e4a9dd7548ffcb3dccdc9202df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Dec 2019 17:41:31 +0000
+Subject: PM / devfreq: rk3399_dmc: Disable devfreq-event device when fails
+
+From: Yangtao Li <tiny.windzz@gmail.com>
+
+[ Upstream commit 39a6e4739c19d5334e552d71ceca544ed84f4b87 ]
+
+The probe process may fail, but the devfreq event device remains
+enabled. Call devfreq_event_disable_edev on the error return path.
+
+Signed-off-by: Yangtao Li <tiny.windzz@gmail.com>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/devfreq/rk3399_dmc.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
+index 2f1027c5b6475..24f04f78285b7 100644
+--- a/drivers/devfreq/rk3399_dmc.c
++++ b/drivers/devfreq/rk3399_dmc.c
+@@ -364,7 +364,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ if (res.a0) {
+ dev_err(dev, "Failed to set dram param: %ld\n",
+ res.a0);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ }
+ }
+ }
+@@ -373,8 +374,10 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ if (node) {
+ data->regmap_pmu = syscon_node_to_regmap(node);
+ of_node_put(node);
+- if (IS_ERR(data->regmap_pmu))
+- return PTR_ERR(data->regmap_pmu);
++ if (IS_ERR(data->regmap_pmu)) {
++ ret = PTR_ERR(data->regmap_pmu);
++ goto err_edev;
++ }
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+@@ -392,7 +395,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ break;
+ default:
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ };
+
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+@@ -426,7 +430,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ */
+ if (dev_pm_opp_of_add_table(dev)) {
+ dev_err(dev, "Invalid operating-points in device tree.\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ }
+
+ of_property_read_u32(np, "upthreshold",
+@@ -466,6 +471,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+
+ err_free_opp:
+ dev_pm_opp_of_remove_table(&pdev->dev);
++err_edev:
++ devfreq_event_disable_edev(data->edev);
++
+ return ret;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From f302be6076de545cb400c0255d6f492e28403ba8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jun 2020 11:05:46 +0100
+Subject: PM / devfreq: rk3399_dmc: Fix kernel oops when rockchip,pmu is absent
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 63ef91f24f9bfc70b6446319f6cabfd094481372 ]
+
+Booting a recent kernel on a rk3399-based system (nanopc-t4),
+equipped with a recent u-boot and ATF results in an Oops due
+to a NULL pointer dereference.
+
+This turns out to be due to the rk3399-dmc driver looking for
+an *undocumented* property (rockchip,pmu), and happily using
+a NULL pointer when the property isn't there.
+
+Instead, make most of what was brought in with 9173c5ceb035
+("PM / devfreq: rk3399_dmc: Pass ODT and auto power down parameters
+to TF-A.") conditioned on finding this property in the device-tree,
+preventing the driver from exploding.
+
+Cc: stable@vger.kernel.org
+Fixes: 9173c5ceb035 ("PM / devfreq: rk3399_dmc: Pass ODT and auto power down parameters to TF-A.")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/devfreq/rk3399_dmc.c | 42 ++++++++++++++++++++----------------
+ 1 file changed, 23 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
+index 24f04f78285b7..027769e39f9b8 100644
+--- a/drivers/devfreq/rk3399_dmc.c
++++ b/drivers/devfreq/rk3399_dmc.c
+@@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
+
+ mutex_lock(&dmcfreq->lock);
+
+- if (target_rate >= dmcfreq->odt_dis_freq)
+- odt_enable = true;
+-
+- /*
+- * This makes a SMC call to the TF-A to set the DDR PD (power-down)
+- * timings and to enable or disable the ODT (on-die termination)
+- * resistors.
+- */
+- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+- dmcfreq->odt_pd_arg1,
+- ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+- odt_enable, 0, 0, 0, &res);
++ if (dmcfreq->regmap_pmu) {
++ if (target_rate >= dmcfreq->odt_dis_freq)
++ odt_enable = true;
++
++ /*
++ * This makes a SMC call to the TF-A to set the DDR PD
++ * (power-down) timings and to enable or disable the
++ * ODT (on-die termination) resistors.
++ */
++ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
++ dmcfreq->odt_pd_arg1,
++ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
++ odt_enable, 0, 0, 0, &res);
++ }
+
+ /*
+ * If frequency scaling from low to high, adjust voltage first.
+@@ -371,13 +373,14 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ }
+
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+- if (node) {
+- data->regmap_pmu = syscon_node_to_regmap(node);
+- of_node_put(node);
+- if (IS_ERR(data->regmap_pmu)) {
+- ret = PTR_ERR(data->regmap_pmu);
+- goto err_edev;
+- }
++ if (!node)
++ goto no_pmu;
++
++ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
++ if (IS_ERR(data->regmap_pmu)) {
++ ret = PTR_ERR(data->regmap_pmu);
++ goto err_edev;
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+@@ -399,6 +402,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ goto err_edev;
+ };
+
++no_pmu:
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+ ROCKCHIP_SIP_CONFIG_DRAM_INIT,
+ 0, 0, 0, 0, &res);
+--
+2.25.1
+
--- /dev/null
+From 75f3d6cd874ed22f1bfdfd613ba99cfd907292a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Aug 2020 14:20:57 -0400
+Subject: s390/numa: set node distance to LOCAL_DISTANCE
+
+[ Upstream commit 535e4fc623fab2e09a0653fc3a3e17f382ad0251 ]
+
+The node distance is hardcoded to 0, which causes a trouble
+for some user-level applications. In particular, "libnuma"
+expects the distance of a node to itself as LOCAL_DISTANCE.
+This update removes the offending node distance override.
+
+Cc: <stable@vger.kernel.org> # 4.4
+Fixes: 3a368f742da1 ("s390/numa: add core infrastructure")
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/numa.h | 1 -
+ arch/s390/include/asm/topology.h | 2 --
+ arch/s390/numa/numa.c | 6 ------
+ 3 files changed, 9 deletions(-)
+
+diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
+index 35f8cbe7e5bb0..c759dcffa9eaf 100644
+--- a/arch/s390/include/asm/numa.h
++++ b/arch/s390/include/asm/numa.h
+@@ -17,7 +17,6 @@
+
+ void numa_setup(void);
+ int numa_pfn_to_nid(unsigned long pfn);
+-int __node_distance(int a, int b);
+ void numa_update_cpu_topology(void);
+
+ extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
+diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
+index cca406fdbe51f..ef9dd253dfad0 100644
+--- a/arch/s390/include/asm/topology.h
++++ b/arch/s390/include/asm/topology.h
+@@ -83,8 +83,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
+
+ #define pcibus_to_node(bus) __pcibus_to_node(bus)
+
+-#define node_distance(a, b) __node_distance(a, b)
+-
+ #else /* !CONFIG_NUMA */
+
+ #define numa_node_id numa_node_id
+diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
+index d2910fa834c8a..8386c58fdb3a0 100644
+--- a/arch/s390/numa/numa.c
++++ b/arch/s390/numa/numa.c
+@@ -49,12 +49,6 @@ void numa_update_cpu_topology(void)
+ mode->update_cpu_topology();
+ }
+
+-int __node_distance(int a, int b)
+-{
+- return mode->distance ? mode->distance(a, b) : 0;
+-}
+-EXPORT_SYMBOL(__node_distance);
+-
+ int numa_debug_enabled;
+
+ /*
+--
+2.25.1
+
revert-ath10k-fix-dma-related-firmware-crashes-on-mu.patch
sched-uclamp-protect-uclamp-fast-path-code-with-stat.patch
sched-uclamp-fix-a-deadlock-when-enabling-uclamp-sta.patch
+usb-cdns3-gadget-always-zeroed-trb-buffer-when-enabl.patch
+pm-devfreq-rk3399_dmc-add-missing-of_node_put.patch
+pm-devfreq-rk3399_dmc-disable-devfreq-event-device-w.patch
+pm-devfreq-rk3399_dmc-fix-kernel-oops-when-rockchip-.patch
+drm-xen-fix-passing-zero-to-ptr_err-warning.patch
+drm-xen-front-fix-misused-is_err_or_null-checks.patch
+s390-numa-set-node-distance-to-local_distance.patch
+btrfs-factor-out-inode-items-copy-loop-from-btrfs_lo.patch
+btrfs-only-commit-the-delayed-inode-when-doing-a-ful.patch
+btrfs-only-commit-delayed-items-at-fsync-if-we-are-l.patch
+mm-shuffle-don-t-move-pages-between-zones-and-don-t-.patch
+mm-fix-kthread_use_mm-vs-tlb-invalidate.patch
+mm-cma.c-switch-to-bitmap_zalloc-for-cma-bitmap-allo.patch
+cma-don-t-quit-at-first-error-when-activating-reserv.patch
+gpu-drm-ingenic-use-the-plane-s-src_-x-y-to-configur.patch
+drm-ingenic-fix-incorrect-assumption-about-plane-ind.patch
+drm-amd-display-trigger-modesets-on-mst-dsc-connecto.patch
+drm-amd-display-add-additional-config-guards-for-dcn.patch
+drm-amd-display-fix-dmesg-warning-from-setting-abm-l.patch
+mm-vunmap-add-cond_resched-in-vunmap_pmd_range.patch
+mm-memory.c-skip-spurious-tlb-flush-for-retried-page.patch
+edac-sb_edac-get-rid-of-unused-vars.patch
+edac-skx_common-get-rid-of-unused-type-var.patch
+edac-i7core-sb-pnd2-skx-fix-error-event-severity.patch
+pci-qcom-add-missing-ipq806x-clocks-in-pcie-driver.patch
+pci-qcom-change-duplicate-pci-reset-to-phy-reset.patch
+pci-qcom-add-missing-reset-for-ipq806x.patch
+cpufreq-intel_pstate-fix-epp-setting-via-sysfs-in-ac.patch
+alsa-usb-audio-add-capture-support-for-saffire-6-usb.patch
--- /dev/null
+From ae47e93a2f2404279d19ef92527de72032834546 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Aug 2020 14:01:48 -0400
+Subject: usb: cdns3: gadget: always zeroed TRB buffer when enable endpoint
+
+[ Upstream commit 95f5acfc4f58f01a22b66d8c9c0ffb72aa96271c ]
+
+During the endpoint dequeue operation, it changes dequeued TRB as link
+TRB, when the endpoint is disabled and re-enabled, the DMA fetches the
+TRB before the link TRB, after it handles current TRB, the DMA pointer
+will advance to the TRB after link TRB, but enqueue and dequene
+variables don't know it due to no hardware interrupt at the time, when
+the next TRB is added to link TRB position, the DMA will not handle
+this TRB due to its pointer is already at the next TRB. See the trace
+log like below:
+
+file-storage-675 [001] d..1 86.585657: usb_ep_queue: ep0: req 00000000df9b3a4f length 0/0 sgs 0/0 stream 0 zsI status 0 --> 0
+file-storage-675 [001] d..1 86.585663: cdns3_ep_queue: ep1out: req: 000000002ebce364, req buff 00000000f5bc96b4, length: 0/1024 zsi, status: -115, trb: [start:0, end:0: virt addr (null)], flags:0 SID: 0
+file-storage-675 [001] d..1 86.585671: cdns3_prepare_trb: ep1out: trb 000000007f770303, dma buf: 0xbd195800, size: 1024, burst: 128 ctrl: 0x00000425 (C=1, T=0, ISP, IOC, Normal) SID:0 LAST_SID:0
+file-storage-675 [001] d..1 86.585676: cdns3_ring:
+ Ring contents for ep1out:
+ Ring deq index: 0, trb: 000000007f770303 (virt), 0xc4003000 (dma)
+ Ring enq index: 1, trb: 0000000049c1ba21 (virt), 0xc400300c (dma)
+ free trbs: 38, CCS=1, PCS=1
+ @0x00000000c4003000 bd195800 80020400 00000425
+ @0x00000000c400300c c4003018 80020400 00001811
+ @0x00000000c4003018 bcfcc000 0000001f 00000426
+ @0x00000000c4003024 bcfce800 0000001f 00000426
+
+ ...
+
+ irq/144-5b13000-698 [000] d... 87.619286: usb_gadget_giveback_request: ep1in: req 0000000031b832eb length 13/13 sgs 0/0 stream 0 zsI status 0 --> 0
+ file-storage-675 [001] d..1 87.619287: cdns3_ep_queue: ep1out: req: 000000002ebce364, req buff 00000000f5bc96b4, length: 0/1024 zsi, status: -115, trb: [start:0, end:0: virt addr 0x80020400c400300c], flags:0 SID: 0
+ file-storage-675 [001] d..1 87.619294: cdns3_prepare_trb: ep1out: trb 0000000049c1ba21, dma buf: 0xbd198000, size: 1024, burst: 128 ctrl: 0x00000425 (C=1, T=0, ISP, IOC, Normal) SID:0 LAST_SID:0
+ file-storage-675 [001] d..1 87.619297: cdns3_ring:
+ Ring contents for ep1out:
+ Ring deq index: 1, trb: 0000000049c1ba21 (virt), 0xc400300c (dma)
+ Ring enq index: 2, trb: 0000000059b34b67 (virt), 0xc4003018 (dma)
+ free trbs: 38, CCS=1, PCS=1
+ @0x00000000c4003000 bd195800 0000001f 00000427
+ @0x00000000c400300c bd198000 80020400 00000425
+ @0x00000000c4003018 bcfcc000 0000001f 00000426
+ @0x00000000c4003024 bcfce800 0000001f 00000426
+ ...
+
+ file-storage-675 [001] d..1 87.619305: cdns3_doorbell_epx: ep1out, ep_trbaddr c4003018
+ file-storage-675 [001] .... 87.619308: usb_ep_queue: ep1out: req 000000002ebce364 length 0/1024 sgs 0/0 stream 0 zsI status -115 --> 0
+ irq/144-5b13000-698 [000] d..1 87.619315: cdns3_epx_irq: IRQ for ep1out: 01000c80 TRBERR , ep_traddr: c4003018 ep_last_sid: 00000000 use_streams: 0
+ irq/144-5b13000-698 [000] d..1 87.619395: cdns3_usb_irq: IRQ 00000008 = Hot Reset
+
+Fixes: f616c3bda47e ("usb: cdns3: Fix dequeue implementation")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/gadget.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index 856c34010021b..9900888afbcd8 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -189,10 +189,10 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
+ GFP_DMA32 | GFP_ATOMIC);
+ if (!priv_ep->trb_pool)
+ return -ENOMEM;
+- } else {
+- memset(priv_ep->trb_pool, 0, ring_size);
+ }
+
++ memset(priv_ep->trb_pool, 0, ring_size);
++
+ if (!priv_ep->num)
+ return 0;
+
+--
+2.25.1
+