]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 29 Dec 2019 16:10:31 +0000 (17:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 29 Dec 2019 16:10:31 +0000 (17:10 +0100)
added patches:
clk-imx-clk-composite-8m-add-lock-to-gate-mux.patch
clk-imx-clk-imx7ulp-add-missing-sentinel-of-ulp_div_table.patch
clk-imx-pll14xx-fix-clk_pll14xx_wait_lock.patch
ext4-check-for-directory-entries-too-close-to-block-end.patch
ext4-fix-ext4_empty_dir-for-directories-with-holes.patch
ext4-unlock-on-error-in-ext4_expand_extra_isize.patch
ext4-validate-the-debug_want_extra_isize-mount-option-at-parse-time.patch
iocost-over-budget-forced-ios-should-schedule-async-delay.patch
iwlwifi-pcie-move-power-gating-workaround-earlier-in-the-flow.patch
kvm-arm-arm64-properly-handle-faulting-of-device-mappings.patch
kvm-arm64-ensure-params-is-initialised-when-looking-up-sys-register.patch
kvm-ppc-book3s-hv-fix-regression-on-big-endian-hosts.patch
kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-amd_ssbd.patch
kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-spec_ctrl_ssbd.patch
mmc-sdhci-add-a-quirk-for-broken-command-queuing.patch
mmc-sdhci-msm-correct-the-offset-and-value-for-ddr_config-register.patch
mmc-sdhci-of-esdhc-fix-p2020-errata-handling.patch
mmc-sdhci-of-esdhc-revert-mmc-sdhci-of-esdhc-add-erratum-a-009204-support.patch
mmc-sdhci-update-the-tuning-failed-messages-to-pr_debug-level.patch
mmc-sdhci-workaround-broken-command-queuing-on-intel-glk.patch
nbd-fix-shutdown-and-recv-work-deadlock-v2.patch
ocxl-fix-concurrent-afu-open-and-device-removal.patch
pinctrl-baytrail-really-serialize-all-register-accesses.patch
powerpc-irq-fix-stack-overflow-verification.patch
powerpc-vcpu-assume-dedicated-processors-as-non-preempt.patch
serial-sprd-add-clearing-break-interrupt-operation.patch
tty-serial-atmel-fix-out-of-range-clock-divider-handling.patch
x86-intel-disable-hpet-on-intel-coffee-lake-h-platforms.patch
x86-mce-amd-allow-reserved-types-to-be-overwritten-in-smca_banks.patch
x86-mce-amd-do-not-use-rdmsr_safe_on_cpu-in-smca_configure.patch
x86-mce-fix-possibly-incorrect-severity-calculation-on-amd.patch

32 files changed:
queue-5.4/clk-imx-clk-composite-8m-add-lock-to-gate-mux.patch [new file with mode: 0644]
queue-5.4/clk-imx-clk-imx7ulp-add-missing-sentinel-of-ulp_div_table.patch [new file with mode: 0644]
queue-5.4/clk-imx-pll14xx-fix-clk_pll14xx_wait_lock.patch [new file with mode: 0644]
queue-5.4/ext4-check-for-directory-entries-too-close-to-block-end.patch [new file with mode: 0644]
queue-5.4/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch [new file with mode: 0644]
queue-5.4/ext4-unlock-on-error-in-ext4_expand_extra_isize.patch [new file with mode: 0644]
queue-5.4/ext4-validate-the-debug_want_extra_isize-mount-option-at-parse-time.patch [new file with mode: 0644]
queue-5.4/iocost-over-budget-forced-ios-should-schedule-async-delay.patch [new file with mode: 0644]
queue-5.4/iwlwifi-pcie-move-power-gating-workaround-earlier-in-the-flow.patch [new file with mode: 0644]
queue-5.4/kvm-arm-arm64-properly-handle-faulting-of-device-mappings.patch [new file with mode: 0644]
queue-5.4/kvm-arm64-ensure-params-is-initialised-when-looking-up-sys-register.patch [new file with mode: 0644]
queue-5.4/kvm-ppc-book3s-hv-fix-regression-on-big-endian-hosts.patch [new file with mode: 0644]
queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-amd_ssbd.patch [new file with mode: 0644]
queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-spec_ctrl_ssbd.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-add-a-quirk-for-broken-command-queuing.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-msm-correct-the-offset-and-value-for-ddr_config-register.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-of-esdhc-fix-p2020-errata-handling.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-of-esdhc-revert-mmc-sdhci-of-esdhc-add-erratum-a-009204-support.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-update-the-tuning-failed-messages-to-pr_debug-level.patch [new file with mode: 0644]
queue-5.4/mmc-sdhci-workaround-broken-command-queuing-on-intel-glk.patch [new file with mode: 0644]
queue-5.4/nbd-fix-shutdown-and-recv-work-deadlock-v2.patch [new file with mode: 0644]
queue-5.4/ocxl-fix-concurrent-afu-open-and-device-removal.patch [new file with mode: 0644]
queue-5.4/pinctrl-baytrail-really-serialize-all-register-accesses.patch [new file with mode: 0644]
queue-5.4/powerpc-irq-fix-stack-overflow-verification.patch [new file with mode: 0644]
queue-5.4/powerpc-vcpu-assume-dedicated-processors-as-non-preempt.patch [new file with mode: 0644]
queue-5.4/serial-sprd-add-clearing-break-interrupt-operation.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/tty-serial-atmel-fix-out-of-range-clock-divider-handling.patch [new file with mode: 0644]
queue-5.4/x86-intel-disable-hpet-on-intel-coffee-lake-h-platforms.patch [new file with mode: 0644]
queue-5.4/x86-mce-amd-allow-reserved-types-to-be-overwritten-in-smca_banks.patch [new file with mode: 0644]
queue-5.4/x86-mce-amd-do-not-use-rdmsr_safe_on_cpu-in-smca_configure.patch [new file with mode: 0644]
queue-5.4/x86-mce-fix-possibly-incorrect-severity-calculation-on-amd.patch [new file with mode: 0644]

diff --git a/queue-5.4/clk-imx-clk-composite-8m-add-lock-to-gate-mux.patch b/queue-5.4/clk-imx-clk-composite-8m-add-lock-to-gate-mux.patch
new file mode 100644 (file)
index 0000000..0beb5f7
--- /dev/null
@@ -0,0 +1,41 @@
+From 073a01e8d7c23b3efb59a3d4c20aa546f9ec29a9 Mon Sep 17 00:00:00 2001
+From: Peng Fan <peng.fan@nxp.com>
+Date: Fri, 1 Nov 2019 10:16:19 +0000
+Subject: clk: imx: clk-composite-8m: add lock to gate/mux
+
+From: Peng Fan <peng.fan@nxp.com>
+
+commit 073a01e8d7c23b3efb59a3d4c20aa546f9ec29a9 upstream.
+
+There is a lock to divider in the composite driver, but that's not
+enough. lock to gate/mux are also needed to provide exclusive access
+to the register.
+
+Fixes: d3ff9728134e ("clk: imx: Add imx composite clock")
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/imx/clk-composite-8m.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/clk/imx/clk-composite-8m.c
++++ b/drivers/clk/imx/clk-composite-8m.c
+@@ -142,6 +142,7 @@ struct clk *imx8m_clk_composite_flags(co
+       mux->reg = reg;
+       mux->shift = PCG_PCS_SHIFT;
+       mux->mask = PCG_PCS_MASK;
++      mux->lock = &imx_ccm_lock;
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+@@ -161,6 +162,7 @@ struct clk *imx8m_clk_composite_flags(co
+       gate_hw = &gate->hw;
+       gate->reg = reg;
+       gate->bit_idx = PCG_CGC_SHIFT;
++      gate->lock = &imx_ccm_lock;
+       hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+                       mux_hw, &clk_mux_ops, div_hw,
diff --git a/queue-5.4/clk-imx-clk-imx7ulp-add-missing-sentinel-of-ulp_div_table.patch b/queue-5.4/clk-imx-clk-imx7ulp-add-missing-sentinel-of-ulp_div_table.patch
new file mode 100644 (file)
index 0000000..a4a7785
--- /dev/null
@@ -0,0 +1,32 @@
+From ed11e31709d7ddb19d4dc451d5bbfb15129f4cad Mon Sep 17 00:00:00 2001
+From: Peng Fan <peng.fan@nxp.com>
+Date: Fri, 22 Nov 2019 06:11:42 +0000
+Subject: clk: imx: clk-imx7ulp: Add missing sentinel of ulp_div_table
+
+From: Peng Fan <peng.fan@nxp.com>
+
+commit ed11e31709d7ddb19d4dc451d5bbfb15129f4cad upstream.
+
+There should be a sentinel of ulp_div_table, otherwise _get_table_div
+may access data out of the array.
+
+Fixes: b1260067ac3d ("clk: imx: add imx7ulp clk driver")
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/imx/clk-imx7ulp.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/clk/imx/clk-imx7ulp.c
++++ b/drivers/clk/imx/clk-imx7ulp.c
+@@ -40,6 +40,7 @@ static const struct clk_div_table ulp_di
+       { .val = 5, .div = 16, },
+       { .val = 6, .div = 32, },
+       { .val = 7, .div = 64, },
++      { /* sentinel */ },
+ };
+ static const int pcc2_uart_clk_ids[] __initconst = {
diff --git a/queue-5.4/clk-imx-pll14xx-fix-clk_pll14xx_wait_lock.patch b/queue-5.4/clk-imx-pll14xx-fix-clk_pll14xx_wait_lock.patch
new file mode 100644 (file)
index 0000000..379484a
--- /dev/null
@@ -0,0 +1,36 @@
+From c3a5fd15ed0c1494435e4e35fbee734ae46b5073 Mon Sep 17 00:00:00 2001
+From: Peng Fan <peng.fan@nxp.com>
+Date: Mon, 9 Dec 2019 08:19:55 +0000
+Subject: clk: imx: pll14xx: fix clk_pll14xx_wait_lock
+
+From: Peng Fan <peng.fan@nxp.com>
+
+commit c3a5fd15ed0c1494435e4e35fbee734ae46b5073 upstream.
+
+The usage of readl_poll_timeout is wrong, the 3rd parameter(cond)
+should be "val & LOCK_STATUS" not "val & LOCK_TIMEOUT_US",
+It is not check whether the pll locked, LOCK_STATUS reflects the mask,
+not LOCK_TIMEOUT_US.
+
+Fixes: 8646d4dcc7fb ("clk: imx: Add PLLs driver for imx8mm soc")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Abel Vesa <abel.vesa@nxp.com>
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clk/imx/clk-pll14xx.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -153,7 +153,7 @@ static int clk_pll14xx_wait_lock(struct
+ {
+       u32 val;
+-      return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
++      return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
+                       LOCK_TIMEOUT_US);
+ }
diff --git a/queue-5.4/ext4-check-for-directory-entries-too-close-to-block-end.patch b/queue-5.4/ext4-check-for-directory-entries-too-close-to-block-end.patch
new file mode 100644 (file)
index 0000000..75b3858
--- /dev/null
@@ -0,0 +1,39 @@
+From 109ba779d6cca2d519c5dd624a3276d03e21948e Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 2 Dec 2019 18:02:13 +0100
+Subject: ext4: check for directory entries too close to block end
+
+From: Jan Kara <jack@suse.cz>
+
+commit 109ba779d6cca2d519c5dd624a3276d03e21948e upstream.
+
+ext4_check_dir_entry() currently does not catch a case when a directory
+entry ends so close to the block end that the header of the next
+directory entry would not fit in the remaining space. This can lead to
+directory iteration code trying to access address beyond end of current
+buffer head leading to oops.
+
+CC: stable@vger.kernel.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20191202170213.4761-3-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/dir.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -81,6 +81,11 @@ int __ext4_check_dir_entry(const char *f
+               error_msg = "rec_len is too small for name_len";
+       else if (unlikely(((char *) de - buf) + rlen > size))
+               error_msg = "directory entry overrun";
++      else if (unlikely(((char *) de - buf) + rlen >
++                        size - EXT4_DIR_REC_LEN(1) &&
++                        ((char *) de - buf) + rlen != size)) {
++              error_msg = "directory entry too close to block end";
++      }
+       else if (unlikely(le32_to_cpu(de->inode) >
+                       le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
+               error_msg = "inode out of bounds";
diff --git a/queue-5.4/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch b/queue-5.4/ext4-fix-ext4_empty_dir-for-directories-with-holes.patch
new file mode 100644 (file)
index 0000000..7270918
--- /dev/null
@@ -0,0 +1,97 @@
+From 64d4ce892383b2ad6d782e080d25502f91bf2a38 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Mon, 2 Dec 2019 18:02:12 +0100
+Subject: ext4: fix ext4_empty_dir() for directories with holes
+
+From: Jan Kara <jack@suse.cz>
+
+commit 64d4ce892383b2ad6d782e080d25502f91bf2a38 upstream.
+
+Function ext4_empty_dir() doesn't correctly handle directories with
+holes and crashes on bh->b_data dereference when bh is NULL. Reorganize
+the loop to use 'offset' variable all the times instead of comparing
+pointers to current direntry with bh->b_data pointer. Also add more
+strict checking of '.' and '..' directory entries to avoid entering loop
+in possibly invalid state on corrupted filesystems.
+
+References: CVE-2019-19037
+CC: stable@vger.kernel.org
+Fixes: 4e19d6b65fb4 ("ext4: allow directory holes")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20191202170213.4761-2-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/namei.c |   32 ++++++++++++++++++--------------
+ 1 file changed, 18 insertions(+), 14 deletions(-)
+
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2808,7 +2808,7 @@ bool ext4_empty_dir(struct inode *inode)
+ {
+       unsigned int offset;
+       struct buffer_head *bh;
+-      struct ext4_dir_entry_2 *de, *de1;
++      struct ext4_dir_entry_2 *de;
+       struct super_block *sb;
+       if (ext4_has_inline_data(inode)) {
+@@ -2833,19 +2833,25 @@ bool ext4_empty_dir(struct inode *inode)
+               return true;
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+-      de1 = ext4_next_entry(de, sb->s_blocksize);
+-      if (le32_to_cpu(de->inode) != inode->i_ino ||
+-                      le32_to_cpu(de1->inode) == 0 ||
+-                      strcmp(".", de->name) || strcmp("..", de1->name)) {
+-              ext4_warning_inode(inode, "directory missing '.' and/or '..'");
++      if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++                               0) ||
++          le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
++              ext4_warning_inode(inode, "directory missing '.'");
+               brelse(bh);
+               return true;
+       }
+-      offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
+-               ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
+-      de = ext4_next_entry(de1, sb->s_blocksize);
++      offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
++      de = ext4_next_entry(de, sb->s_blocksize);
++      if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
++                               offset) ||
++          le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
++              ext4_warning_inode(inode, "directory missing '..'");
++              brelse(bh);
++              return true;
++      }
++      offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+       while (offset < inode->i_size) {
+-              if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
++              if (!(offset & (sb->s_blocksize - 1))) {
+                       unsigned int lblock;
+                       brelse(bh);
+                       lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+@@ -2856,12 +2862,11 @@ bool ext4_empty_dir(struct inode *inode)
+                       }
+                       if (IS_ERR(bh))
+                               return true;
+-                      de = (struct ext4_dir_entry_2 *) bh->b_data;
+               }
++              de = (struct ext4_dir_entry_2 *) (bh->b_data +
++                                      (offset & (sb->s_blocksize - 1)));
+               if (ext4_check_dir_entry(inode, NULL, de, bh,
+                                        bh->b_data, bh->b_size, offset)) {
+-                      de = (struct ext4_dir_entry_2 *)(bh->b_data +
+-                                                       sb->s_blocksize);
+                       offset = (offset | (sb->s_blocksize - 1)) + 1;
+                       continue;
+               }
+@@ -2870,7 +2875,6 @@ bool ext4_empty_dir(struct inode *inode)
+                       return false;
+               }
+               offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+-              de = ext4_next_entry(de, sb->s_blocksize);
+       }
+       brelse(bh);
+       return true;
diff --git a/queue-5.4/ext4-unlock-on-error-in-ext4_expand_extra_isize.patch b/queue-5.4/ext4-unlock-on-error-in-ext4_expand_extra_isize.patch
new file mode 100644 (file)
index 0000000..a58e4bf
--- /dev/null
@@ -0,0 +1,43 @@
+From 7f420d64a08c1dcd65b27be82a27cf2bdb2e7847 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Fri, 13 Dec 2019 21:50:11 +0300
+Subject: ext4: unlock on error in ext4_expand_extra_isize()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 7f420d64a08c1dcd65b27be82a27cf2bdb2e7847 upstream.
+
+We need to unlock the xattr before returning on this error path.
+
+Cc: stable@kernel.org # 4.13
+Fixes: c03b45b853f5 ("ext4, project: expand inode extra size if possible")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Link: https://lore.kernel.org/r/20191213185010.6k7yl2tck3wlsdkt@kili.mountain
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/inode.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -6035,7 +6035,7 @@ int ext4_expand_extra_isize(struct inode
+       error = ext4_journal_get_write_access(handle, iloc->bh);
+       if (error) {
+               brelse(iloc->bh);
+-              goto out_stop;
++              goto out_unlock;
+       }
+       error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
+@@ -6045,8 +6045,8 @@ int ext4_expand_extra_isize(struct inode
+       if (!error)
+               error = rc;
++out_unlock:
+       ext4_write_unlock_xattr(inode, &no_expand);
+-out_stop:
+       ext4_journal_stop(handle);
+       return error;
+ }
diff --git a/queue-5.4/ext4-validate-the-debug_want_extra_isize-mount-option-at-parse-time.patch b/queue-5.4/ext4-validate-the-debug_want_extra_isize-mount-option-at-parse-time.patch
new file mode 100644 (file)
index 0000000..8dc347a
--- /dev/null
@@ -0,0 +1,210 @@
+From 9803387c55f7d2ce69aa64340c5fdc6b3027dbc8 Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Sun, 15 Dec 2019 01:09:03 -0500
+Subject: ext4: validate the debug_want_extra_isize mount option at parse time
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 9803387c55f7d2ce69aa64340c5fdc6b3027dbc8 upstream.
+
+Instead of setting s_want_extra_size and then making sure that it is a
+valid value afterwards, validate the field before we set it.  This
+avoids races and other problems when remounting the file system.
+
+Link: https://lore.kernel.org/r/20191215063020.GA11512@mit.edu
+Cc: stable@kernel.org
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reported-and-tested-by: syzbot+4a39a025912b265cacef@syzkaller.appspotmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/super.c |  143 +++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 69 insertions(+), 74 deletions(-)
+
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1887,6 +1887,13 @@ static int handle_mount_opt(struct super
+               }
+               sbi->s_commit_interval = HZ * arg;
+       } else if (token == Opt_debug_want_extra_isize) {
++              if ((arg & 1) ||
++                  (arg < 4) ||
++                  (arg > (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE))) {
++                      ext4_msg(sb, KERN_ERR,
++                               "Invalid want_extra_isize %d", arg);
++                      return -1;
++              }
+               sbi->s_want_extra_isize = arg;
+       } else if (token == Opt_max_batch_time) {
+               sbi->s_max_batch_time = arg;
+@@ -3551,40 +3558,6 @@ int ext4_calculate_overhead(struct super
+       return 0;
+ }
+-static void ext4_clamp_want_extra_isize(struct super_block *sb)
+-{
+-      struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      struct ext4_super_block *es = sbi->s_es;
+-      unsigned def_extra_isize = sizeof(struct ext4_inode) -
+-                                              EXT4_GOOD_OLD_INODE_SIZE;
+-
+-      if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
+-              sbi->s_want_extra_isize = 0;
+-              return;
+-      }
+-      if (sbi->s_want_extra_isize < 4) {
+-              sbi->s_want_extra_isize = def_extra_isize;
+-              if (ext4_has_feature_extra_isize(sb)) {
+-                      if (sbi->s_want_extra_isize <
+-                          le16_to_cpu(es->s_want_extra_isize))
+-                              sbi->s_want_extra_isize =
+-                                      le16_to_cpu(es->s_want_extra_isize);
+-                      if (sbi->s_want_extra_isize <
+-                          le16_to_cpu(es->s_min_extra_isize))
+-                              sbi->s_want_extra_isize =
+-                                      le16_to_cpu(es->s_min_extra_isize);
+-              }
+-      }
+-      /* Check if enough inode space is available */
+-      if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
+-          (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+-                                                      sbi->s_inode_size)) {
+-              sbi->s_want_extra_isize = def_extra_isize;
+-              ext4_msg(sb, KERN_INFO,
+-                       "required extra inode space not available");
+-      }
+-}
+-
+ static void ext4_set_resv_clusters(struct super_block *sb)
+ {
+       ext4_fsblk_t resv_clusters;
+@@ -3792,6 +3765,68 @@ static int ext4_fill_super(struct super_
+        */
+       sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
++      if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
++              sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
++              sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
++      } else {
++              sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
++              sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
++              if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
++                      ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
++                               sbi->s_first_ino);
++                      goto failed_mount;
++              }
++              if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
++                  (!is_power_of_2(sbi->s_inode_size)) ||
++                  (sbi->s_inode_size > blocksize)) {
++                      ext4_msg(sb, KERN_ERR,
++                             "unsupported inode size: %d",
++                             sbi->s_inode_size);
++                      goto failed_mount;
++              }
++              /*
++               * i_atime_extra is the last extra field available for
++               * [acm]times in struct ext4_inode. Checking for that
++               * field should suffice to ensure we have extra space
++               * for all three.
++               */
++              if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
++                      sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
++                      sb->s_time_gran = 1;
++                      sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
++              } else {
++                      sb->s_time_gran = NSEC_PER_SEC;
++                      sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
++              }
++              sb->s_time_min = EXT4_TIMESTAMP_MIN;
++      }
++      if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
++              sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++                      EXT4_GOOD_OLD_INODE_SIZE;
++              if (ext4_has_feature_extra_isize(sb)) {
++                      unsigned v, max = (sbi->s_inode_size -
++                                         EXT4_GOOD_OLD_INODE_SIZE);
++
++                      v = le16_to_cpu(es->s_want_extra_isize);
++                      if (v > max) {
++                              ext4_msg(sb, KERN_ERR,
++                                       "bad s_want_extra_isize: %d", v);
++                              goto failed_mount;
++                      }
++                      if (sbi->s_want_extra_isize < v)
++                              sbi->s_want_extra_isize = v;
++
++                      v = le16_to_cpu(es->s_min_extra_isize);
++                      if (v > max) {
++                              ext4_msg(sb, KERN_ERR,
++                                       "bad s_min_extra_isize: %d", v);
++                              goto failed_mount;
++                      }
++                      if (sbi->s_want_extra_isize < v)
++                              sbi->s_want_extra_isize = v;
++              }
++      }
++
+       if (sbi->s_es->s_mount_opts[0]) {
+               char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+                                             sizeof(sbi->s_es->s_mount_opts),
+@@ -4030,42 +4065,6 @@ static int ext4_fill_super(struct super_
+                                                     has_huge_files);
+       sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
+-      if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
+-              sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
+-              sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
+-      } else {
+-              sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+-              sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+-              if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+-                      ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+-                               sbi->s_first_ino);
+-                      goto failed_mount;
+-              }
+-              if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
+-                  (!is_power_of_2(sbi->s_inode_size)) ||
+-                  (sbi->s_inode_size > blocksize)) {
+-                      ext4_msg(sb, KERN_ERR,
+-                             "unsupported inode size: %d",
+-                             sbi->s_inode_size);
+-                      goto failed_mount;
+-              }
+-              /*
+-               * i_atime_extra is the last extra field available for [acm]times in
+-               * struct ext4_inode. Checking for that field should suffice to ensure
+-               * we have extra space for all three.
+-               */
+-              if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) +
+-                      sizeof(((struct ext4_inode *)0)->i_atime_extra)) {
+-                      sb->s_time_gran = 1;
+-                      sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX;
+-              } else {
+-                      sb->s_time_gran = NSEC_PER_SEC;
+-                      sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX;
+-              }
+-
+-              sb->s_time_min = EXT4_TIMESTAMP_MIN;
+-      }
+-
+       sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
+       if (ext4_has_feature_64bit(sb)) {
+               if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
+@@ -4521,8 +4520,6 @@ no_journal:
+       } else if (ret)
+               goto failed_mount4a;
+-      ext4_clamp_want_extra_isize(sb);
+-
+       ext4_set_resv_clusters(sb);
+       err = ext4_setup_system_zone(sb);
+@@ -5310,8 +5307,6 @@ static int ext4_remount(struct super_blo
+               goto restore_opts;
+       }
+-      ext4_clamp_want_extra_isize(sb);
+-
+       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+           test_opt(sb, JOURNAL_CHECKSUM)) {
+               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
diff --git a/queue-5.4/iocost-over-budget-forced-ios-should-schedule-async-delay.patch b/queue-5.4/iocost-over-budget-forced-ios-should-schedule-async-delay.patch
new file mode 100644 (file)
index 0000000..24d88be
--- /dev/null
@@ -0,0 +1,80 @@
+From d7bd15a138aef3be227818aad9c501e43c89c8c5 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 16 Dec 2019 13:34:00 -0800
+Subject: iocost: over-budget forced IOs should schedule async delay
+
+From: Tejun Heo <tj@kernel.org>
+
+commit d7bd15a138aef3be227818aad9c501e43c89c8c5 upstream.
+
+When over-budget IOs are force-issued through root cgroup,
+iocg_kick_delay() adjusts the async delay accordingly but doesn't
+actually schedule async throttle for the issuing task.  This bug is
+pretty well masked because sooner or later the offending threads are
+gonna get directly throttled on regular IOs or have async delay
+scheduled by mem_cgroup_throttle_swaprate().
+
+However, it can affect control quality on filesystem metadata heavy
+operations.  Let's fix it by invoking blkcg_schedule_throttle() when
+iocg_kick_delay() says async delay is needed.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Fixes: 7caa47151ab2 ("blkcg: implement blk-iocost")
+Cc: stable@vger.kernel.org
+Reported-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-iocost.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_t
+       return HRTIMER_NORESTART;
+ }
+-static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
++static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+ {
+       struct ioc *ioc = iocg->ioc;
+       struct blkcg_gq *blkg = iocg_to_blkg(iocg);
+@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_g
+       /* clear or maintain depending on the overage */
+       if (time_before_eq64(vtime, now->vnow)) {
+               blkcg_clear_delay(blkg);
+-              return;
++              return false;
+       }
+       if (!atomic_read(&blkg->use_delay) &&
+           time_before_eq64(vtime, now->vnow + vmargin))
+-              return;
++              return false;
+       /* use delay */
+       if (cost) {
+@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_g
+       oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
+       if (hrtimer_is_queued(&iocg->delay_timer) &&
+           abs(oexpires - expires) <= margin_ns / 4)
+-              return;
++              return true;
+       hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
+                              margin_ns / 4, HRTIMER_MODE_ABS);
++      return true;
+ }
+ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
+@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_
+        */
+       if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
+               atomic64_add(abs_cost, &iocg->abs_vdebt);
+-              iocg_kick_delay(iocg, &now, cost);
++              if (iocg_kick_delay(iocg, &now, cost))
++                      blkcg_schedule_throttle(rqos->q,
++                                      (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
+               return;
+       }
diff --git a/queue-5.4/iwlwifi-pcie-move-power-gating-workaround-earlier-in-the-flow.patch b/queue-5.4/iwlwifi-pcie-move-power-gating-workaround-earlier-in-the-flow.patch
new file mode 100644 (file)
index 0000000..13e5533
--- /dev/null
@@ -0,0 +1,118 @@
+From 0df36b90c47d93295b7e393da2d961b2f3b6cde4 Mon Sep 17 00:00:00 2001
+From: Luca Coelho <luciano.coelho@intel.com>
+Date: Thu, 5 Dec 2019 09:03:54 +0200
+Subject: iwlwifi: pcie: move power gating workaround earlier in the flow
+
+From: Luca Coelho <luciano.coelho@intel.com>
+
+commit 0df36b90c47d93295b7e393da2d961b2f3b6cde4 upstream.
+
+We need to reset the NIC after setting the bits to enable power
+gating and that cannot be done too late in the flow otherwise it
+cleans other registers and things that were already configured,
+causing initialization to fail.
+
+In order to fix this, move the function to the common code in trans.c
+so it can be called directly from there at an earlier point, just
+after the reset we already do during initialization.
+
+Fixes: 9a47cb988338 ("iwlwifi: pcie: add workaround for power gating in integrated 22000")
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=205719
+Cc: stable@ver.kernel.org # 5.4+
+Reported-by: Anders Kaseorg <andersk@mit.edu>
+Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c |   25 ---------------
+ drivers/net/wireless/intel/iwlwifi/pcie/trans.c      |   30 +++++++++++++++++++
+ 2 files changed, 30 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -57,24 +57,6 @@
+ #include "internal.h"
+ #include "fw/dbg.h"
+-static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+-{
+-      iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+-                        HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+-      udelay(20);
+-      iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+-                        HPM_HIPM_GEN_CFG_CR_PG_EN |
+-                        HPM_HIPM_GEN_CFG_CR_SLP_EN);
+-      udelay(20);
+-      iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+-                          HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+-
+-      iwl_trans_sw_reset(trans);
+-      iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-      return 0;
+-}
+-
+ /*
+  * Start up NIC's basic functionality after it has been reset
+  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+@@ -110,13 +92,6 @@ int iwl_pcie_gen2_apm_init(struct iwl_tr
+       iwl_pcie_apm_config(trans);
+-      if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+-          trans->cfg->integrated) {
+-              ret = iwl_pcie_gen2_force_power_gating(trans);
+-              if (ret)
+-                      return ret;
+-      }
+-
+       ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+       if (ret)
+               return ret;
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1783,6 +1783,29 @@ static int iwl_trans_pcie_clear_persiste
+       return 0;
+ }
++static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
++{
++      int ret;
++
++      ret = iwl_finish_nic_init(trans, trans->trans_cfg);
++      if (ret < 0)
++              return ret;
++
++      iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
++                        HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
++      udelay(20);
++      iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
++                        HPM_HIPM_GEN_CFG_CR_PG_EN |
++                        HPM_HIPM_GEN_CFG_CR_SLP_EN);
++      udelay(20);
++      iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
++                          HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
++
++      iwl_trans_pcie_sw_reset(trans);
++
++      return 0;
++}
++
+ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+ {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+@@ -1802,6 +1825,13 @@ static int _iwl_trans_pcie_start_hw(stru
+       iwl_trans_pcie_sw_reset(trans);
++      if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
++          trans->cfg->integrated) {
++              err = iwl_pcie_gen2_force_power_gating(trans);
++              if (err)
++                      return err;
++      }
++
+       err = iwl_pcie_apm_init(trans);
+       if (err)
+               return err;
diff --git a/queue-5.4/kvm-arm-arm64-properly-handle-faulting-of-device-mappings.patch b/queue-5.4/kvm-arm-arm64-properly-handle-faulting-of-device-mappings.patch
new file mode 100644 (file)
index 0000000..4b41469
--- /dev/null
@@ -0,0 +1,99 @@
+From 6d674e28f642e3ff676fbae2d8d1b872814d32b6 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Wed, 11 Dec 2019 16:56:48 +0000
+Subject: KVM: arm/arm64: Properly handle faulting of device mappings
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 6d674e28f642e3ff676fbae2d8d1b872814d32b6 upstream.
+
+A device mapping is normally always mapped at Stage-2, since there
+is very little gain in having it faulted in.
+
+Nonetheless, it is possible to end-up in a situation where the device
+mapping has been removed from Stage-2 (userspace munmaped the VFIO
+region, and the MMU notifier did its job), but present in a userspace
+mapping (userpace has mapped it back at the same address). In such
+a situation, the device mapping will be demand-paged as the guest
+performs memory accesses.
+
+This requires to be careful when dealing with mapping size, cache
+management, and to handle potential execution of a device mapping.
+
+Reported-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Tested-by: Alexandru Elisei <alexandru.elisei@arm.com>
+Reviewed-by: James Morse <james.morse@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191211165651.7889-2-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/mmu.c |   21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -38,6 +38,11 @@ static unsigned long io_map_base;
+ #define KVM_S2PTE_FLAG_IS_IOMAP               (1UL << 0)
+ #define KVM_S2_FLAG_LOGGING_ACTIVE    (1UL << 1)
++static bool is_iomap(unsigned long flags)
++{
++      return flags & KVM_S2PTE_FLAG_IS_IOMAP;
++}
++
+ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
+ {
+       return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
+@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcp
+       vma_pagesize = vma_kernel_pagesize(vma);
+       if (logging_active ||
++          (vma->vm_flags & VM_PFNMAP) ||
+           !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+               force_pte = true;
+               vma_pagesize = PAGE_SIZE;
+@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcp
+                       writable = false;
+       }
++      if (exec_fault && is_iomap(flags))
++              return -ENOEXEC;
++
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq))
+               goto out_unlock;
+@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcp
+       if (writable)
+               kvm_set_pfn_dirty(pfn);
+-      if (fault_status != FSC_PERM)
++      if (fault_status != FSC_PERM && !is_iomap(flags))
+               clean_dcache_guest_page(pfn, vma_pagesize);
+       if (exec_fault)
+@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vc
+       if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
+               if (is_iabt) {
+                       /* Prefetch Abort on I/O address */
+-                      kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+-                      ret = 1;
+-                      goto out_unlock;
++                      ret = -ENOEXEC;
++                      goto out;
+               }
+               /*
+@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vc
+       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+       if (ret == 0)
+               ret = 1;
++out:
++      if (ret == -ENOEXEC) {
++              kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
++              ret = 1;
++      }
+ out_unlock:
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       return ret;
diff --git a/queue-5.4/kvm-arm64-ensure-params-is-initialised-when-looking-up-sys-register.patch b/queue-5.4/kvm-arm64-ensure-params-is-initialised-when-looking-up-sys-register.patch
new file mode 100644 (file)
index 0000000..ff3c865
--- /dev/null
@@ -0,0 +1,58 @@
+From 1ce74e96c2407df2b5867e5d45a70aacb8923c14 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 12 Dec 2019 09:40:49 +0000
+Subject: KVM: arm64: Ensure 'params' is initialised when looking up sys register
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Will Deacon <will@kernel.org>
+
+commit 1ce74e96c2407df2b5867e5d45a70aacb8923c14 upstream.
+
+Commit 4b927b94d5df ("KVM: arm/arm64: vgic: Introduce find_reg_by_id()")
+introduced 'find_reg_by_id()', which looks up a system register only if
+the 'id' index parameter identifies a valid system register. As part of
+the patch, existing callers of 'find_reg()' were ported over to the new
+interface, but this breaks 'index_to_sys_reg_desc()' in the case that the
+initial lookup in the vCPU target table fails because we will then call
+into 'find_reg()' for the system register table with an uninitialised
+'param' as the key to the lookup.
+
+GCC 10 is bright enough to spot this (amongst a tonne of false positives,
+but hey!):
+
+  | arch/arm64/kvm/sys_regs.c: In function â€˜index_to_sys_reg_desc.part.0.isra’:
+  | arch/arm64/kvm/sys_regs.c:983:33: warning: â€˜params.Op2’ may be used uninitialized in this function [-Wmaybe-uninitialized]
+  |   983 |   (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+  | [...]
+
+Revert the hunk of 4b927b94d5df which breaks 'index_to_sys_reg_desc()' so
+that the old behaviour of checking the index upfront is restored.
+
+Fixes: 4b927b94d5df ("KVM: arm/arm64: vgic: Introduce find_reg_by_id()")
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191212094049.12437-1-will@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/sys_regs.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -2360,8 +2360,11 @@ static const struct sys_reg_desc *index_
+       if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
+               return NULL;
++      if (!index_to_params(id, &params))
++              return NULL;
++
+       table = get_target_table(vcpu->arch.target, true, &num);
+-      r = find_reg_by_id(id, &params, table, num);
++      r = find_reg(&params, table, num);
+       if (!r)
+               r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
diff --git a/queue-5.4/kvm-ppc-book3s-hv-fix-regression-on-big-endian-hosts.patch b/queue-5.4/kvm-ppc-book3s-hv-fix-regression-on-big-endian-hosts.patch
new file mode 100644 (file)
index 0000000..132beaf
--- /dev/null
@@ -0,0 +1,48 @@
+From 228b607d8ea1b7d4561945058d5692709099d432 Mon Sep 17 00:00:00 2001
+From: Marcus Comstedt <marcus@mc.pp.se>
+Date: Sun, 15 Dec 2019 10:49:00 +0100
+Subject: KVM: PPC: Book3S HV: Fix regression on big endian hosts
+
+From: Marcus Comstedt <marcus@mc.pp.se>
+
+commit 228b607d8ea1b7d4561945058d5692709099d432 upstream.
+
+VCPU_CR is the offset of arch.regs.ccr in kvm_vcpu.
+arch/powerpc/include/asm/kvm_host.h defines arch.regs as a struct
+pt_regs, and arch/powerpc/include/asm/ptrace.h defines the ccr field
+of pt_regs as "unsigned long ccr".  Since unsigned long is 64 bits, a
+64-bit load needs to be used to load it, unless an endianness specific
+correction offset is added to access the desired subpart.  In this
+case there is no reason to _not_ use a 64 bit load though.
+
+Fixes: 6c85b7bc637b ("powerpc/kvm: Use UV_RETURN ucall to return to ultravisor")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Marcus Comstedt <marcus@mc.pp.se>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20191215094900.46740-1-marcus@mc.pp.se
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1117,7 +1117,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+       ld      r7, VCPU_GPR(R7)(r4)
+       bne     ret_to_ultra
+-      lwz     r0, VCPU_CR(r4)
++      ld      r0, VCPU_CR(r4)
+       mtcr    r0
+       ld      r0, VCPU_GPR(R0)(r4)
+@@ -1137,7 +1137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+  *   R3 = UV_RETURN
+  */
+ ret_to_ultra:
+-      lwz     r0, VCPU_CR(r4)
++      ld      r0, VCPU_CR(r4)
+       mtcr    r0
+       ld      r0, VCPU_GPR(R3)(r4)
diff --git a/queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-amd_ssbd.patch b/queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-amd_ssbd.patch
new file mode 100644 (file)
index 0000000..bf40428
--- /dev/null
@@ -0,0 +1,47 @@
+From 8715f05269bfbc6453e25e80825d781a82902f8e Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Fri, 13 Dec 2019 16:15:16 -0800
+Subject: kvm: x86: Host feature SSBD doesn't imply guest feature AMD_SSBD
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 8715f05269bfbc6453e25e80825d781a82902f8e upstream.
+
+The host reports support for the synthetic feature X86_FEATURE_SSBD
+when any of the three following hardware features are set:
+  CPUID.(EAX=7,ECX=0):EDX.SSBD[bit 31]
+  CPUID.80000008H:EBX.AMD_SSBD[bit 24]
+  CPUID.80000008H:EBX.VIRT_SSBD[bit 25]
+
+Either of the first two hardware features implies the existence of the
+IA32_SPEC_CTRL MSR, but CPUID.80000008H:EBX.VIRT_SSBD[bit 25] does
+not. Therefore, CPUID.80000008H:EBX.AMD_SSBD[bit 24] should only be
+set in the guest if CPUID.(EAX=7,ECX=0):EDX.SSBD[bit 31] or
+CPUID.80000008H:EBX.AMD_SSBD[bit 24] is set on the host.
+
+Fixes: 4c6903a0f9d76 ("KVM: x86: fix reporting of AMD speculation bug CPUID leaf")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Jacob Xu <jacobhxu@google.com>
+Reviewed-by: Peter Shier <pshier@google.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: stable@vger.kernel.org
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -760,7 +760,8 @@ static inline int __do_cpuid_func(struct
+                       entry->ebx |= F(AMD_IBRS);
+               if (boot_cpu_has(X86_FEATURE_STIBP))
+                       entry->ebx |= F(AMD_STIBP);
+-              if (boot_cpu_has(X86_FEATURE_SSBD))
++              if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++                  boot_cpu_has(X86_FEATURE_AMD_SSBD))
+                       entry->ebx |= F(AMD_SSBD);
+               if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+                       entry->ebx |= F(AMD_SSB_NO);
diff --git a/queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-spec_ctrl_ssbd.patch b/queue-5.4/kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-spec_ctrl_ssbd.patch
new file mode 100644 (file)
index 0000000..ab6dd57
--- /dev/null
@@ -0,0 +1,47 @@
+From 396d2e878f92ec108e4293f1c77ea3bc90b414ff Mon Sep 17 00:00:00 2001
+From: Jim Mattson <jmattson@google.com>
+Date: Fri, 13 Dec 2019 16:15:15 -0800
+Subject: kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
+
+From: Jim Mattson <jmattson@google.com>
+
+commit 396d2e878f92ec108e4293f1c77ea3bc90b414ff upstream.
+
+The host reports support for the synthetic feature X86_FEATURE_SSBD
+when any of the three following hardware features are set:
+  CPUID.(EAX=7,ECX=0):EDX.SSBD[bit 31]
+  CPUID.80000008H:EBX.AMD_SSBD[bit 24]
+  CPUID.80000008H:EBX.VIRT_SSBD[bit 25]
+
+Either of the first two hardware features implies the existence of the
+IA32_SPEC_CTRL MSR, but CPUID.80000008H:EBX.VIRT_SSBD[bit 25] does
+not. Therefore, CPUID.(EAX=7,ECX=0):EDX.SSBD[bit 31] should only be
+set in the guest if CPUID.(EAX=7,ECX=0):EDX.SSBD[bit 31] or
+CPUID.80000008H:EBX.AMD_SSBD[bit 24] is set on the host.
+
+Fixes: 0c54914d0c52a ("KVM: x86: use Intel speculation bugs and features as derived in generic x86 code")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Jacob Xu <jacobhxu@google.com>
+Reviewed-by: Peter Shier <pshier@google.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: stable@vger.kernel.org
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/cpuid.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struc
+                       entry->edx |= F(SPEC_CTRL);
+               if (boot_cpu_has(X86_FEATURE_STIBP))
+                       entry->edx |= F(INTEL_STIBP);
+-              if (boot_cpu_has(X86_FEATURE_SSBD))
++              if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
++                  boot_cpu_has(X86_FEATURE_AMD_SSBD))
+                       entry->edx |= F(SPEC_CTRL_SSBD);
+               /*
+                * We emulate ARCH_CAPABILITIES in software even
diff --git a/queue-5.4/mmc-sdhci-add-a-quirk-for-broken-command-queuing.patch b/queue-5.4/mmc-sdhci-add-a-quirk-for-broken-command-queuing.patch
new file mode 100644 (file)
index 0000000..e62ca49
--- /dev/null
@@ -0,0 +1,51 @@
+From 75d27ea1abf7af3cc2cdec3513e74f52191605c8 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 17 Dec 2019 11:53:49 +0200
+Subject: mmc: sdhci: Add a quirk for broken command queuing
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 75d27ea1abf7af3cc2cdec3513e74f52191605c8 upstream.
+
+Command queuing has been reported broken on some systems based on Intel
+GLK. A separate patch disables command queuing in some cases.
+
+This patch adds a quirk for broken command queuing, which enables users
+with problems to disable command queuing using sdhci module parameters for
+quirks.
+
+Fixes: 8ee82bda230f ("mmc: sdhci-pci: Add CQHCI support for Intel GLK")
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191217095349.14592-2-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c |    3 +++
+ drivers/mmc/host/sdhci.h |    2 ++
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3756,6 +3756,9 @@ int sdhci_setup_host(struct sdhci_host *
+                      mmc_hostname(mmc), host->version);
+       }
++      if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
++              mmc->caps2 &= ~MMC_CAP2_CQE;
++
+       if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
+               host->flags |= SDHCI_USE_SDMA;
+       else if (!(host->caps & SDHCI_CAN_DO_SDMA))
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -409,6 +409,8 @@ struct sdhci_host {
+ #define SDHCI_QUIRK_BROKEN_CARD_DETECTION             (1<<15)
+ /* Controller reports inverted write-protect state */
+ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT            (1<<16)
++/* Controller has unusable command queue engine */
++#define SDHCI_QUIRK_BROKEN_CQE                                (1<<17)
+ /* Controller does not like fast PIO transfers */
+ #define SDHCI_QUIRK_PIO_NEEDS_DELAY                   (1<<18)
+ /* Controller does not have a LED */
diff --git a/queue-5.4/mmc-sdhci-msm-correct-the-offset-and-value-for-ddr_config-register.patch b/queue-5.4/mmc-sdhci-msm-correct-the-offset-and-value-for-ddr_config-register.patch
new file mode 100644 (file)
index 0000000..8ea1f4a
--- /dev/null
@@ -0,0 +1,116 @@
+From fa56ac9792265354b565f28def7164e7d7db2b1e Mon Sep 17 00:00:00 2001
+From: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
+Date: Tue, 26 Nov 2019 10:19:16 +0000
+Subject: mmc: sdhci-msm: Correct the offset and value for DDR_CONFIG register
+
+From: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
+
+commit fa56ac9792265354b565f28def7164e7d7db2b1e upstream.
+
+The DDR_CONFIG register offset got updated after a specific
+minor version of sdcc V4. This offset change has not been properly
+taken care of while updating register changes for sdcc V5.
+
+Correcting proper offset for this register.
+Also updating this register value to reflect the recommended RCLK
+delay.
+
+Signed-off-by: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
+Link: https://lore.kernel.org/r/0101016ea738ec72-fa0f852d-20f8-474a-80b2-4b0ef63b132c-000000@us-west-2.amazonses.com
+Fixes: f15358885dda ("mmc: sdhci-msm: Define new Register address map")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-msm.c |   28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -99,7 +99,7 @@
+ #define CORE_PWRSAVE_DLL      BIT(3)
+-#define DDR_CONFIG_POR_VAL    0x80040853
++#define DDR_CONFIG_POR_VAL    0x80040873
+ #define INVALID_TUNING_PHASE  -1
+@@ -148,8 +148,9 @@ struct sdhci_msm_offset {
+       u32 core_ddr_200_cfg;
+       u32 core_vendor_spec3;
+       u32 core_dll_config_2;
++      u32 core_dll_config_3;
++      u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
+       u32 core_ddr_config;
+-      u32 core_ddr_config_2;
+ };
+ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
+@@ -177,8 +178,8 @@ static const struct sdhci_msm_offset sdh
+       .core_ddr_200_cfg = 0x224,
+       .core_vendor_spec3 = 0x250,
+       .core_dll_config_2 = 0x254,
+-      .core_ddr_config = 0x258,
+-      .core_ddr_config_2 = 0x25c,
++      .core_dll_config_3 = 0x258,
++      .core_ddr_config = 0x25c,
+ };
+ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
+@@ -207,8 +208,8 @@ static const struct sdhci_msm_offset sdh
+       .core_ddr_200_cfg = 0x184,
+       .core_vendor_spec3 = 0x1b0,
+       .core_dll_config_2 = 0x1b4,
+-      .core_ddr_config = 0x1b8,
+-      .core_ddr_config_2 = 0x1bc,
++      .core_ddr_config_old = 0x1b8,
++      .core_ddr_config = 0x1bc,
+ };
+ struct sdhci_msm_variant_ops {
+@@ -253,6 +254,7 @@ struct sdhci_msm_host {
+       const struct sdhci_msm_offset *offset;
+       bool use_cdr;
+       u32 transfer_mode;
++      bool updated_ddr_cfg;
+ };
+ static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
+@@ -924,8 +926,10 @@ out:
+ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+ {
+       struct mmc_host *mmc = host->mmc;
+-      u32 dll_status, config;
++      u32 dll_status, config, ddr_cfg_offset;
+       int ret;
++      struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++      struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+       const struct sdhci_msm_offset *msm_offset =
+                                       sdhci_priv_msm_offset(host);
+@@ -938,8 +942,11 @@ static int sdhci_msm_cm_dll_sdc4_calibra
+        * bootloaders. In the future, if this changes, then the desired
+        * values will need to be programmed appropriately.
+        */
+-      writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+-                      msm_offset->core_ddr_config);
++      if (msm_host->updated_ddr_cfg)
++              ddr_cfg_offset = msm_offset->core_ddr_config;
++      else
++              ddr_cfg_offset = msm_offset->core_ddr_config_old;
++      writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
+       if (mmc->ios.enhanced_strobe) {
+               config = readl_relaxed(host->ioaddr +
+@@ -1899,6 +1906,9 @@ static int sdhci_msm_probe(struct platfo
+                               msm_offset->core_vendor_spec_capabilities0);
+       }
++      if (core_major == 1 && core_minor >= 0x49)
++              msm_host->updated_ddr_cfg = true;
++
+       /*
+        * Power on reset state may trigger power irq if previous status of
+        * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
diff --git a/queue-5.4/mmc-sdhci-of-esdhc-fix-p2020-errata-handling.patch b/queue-5.4/mmc-sdhci-of-esdhc-fix-p2020-errata-handling.patch
new file mode 100644 (file)
index 0000000..27bab90
--- /dev/null
@@ -0,0 +1,47 @@
+From fe0acab448f68c3146235afe03fb932e242ec94c Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 16 Dec 2019 11:18:42 +0800
+Subject: mmc: sdhci-of-esdhc: fix P2020 errata handling
+
+From: Yangbo Lu <yangbo.lu@nxp.com>
+
+commit fe0acab448f68c3146235afe03fb932e242ec94c upstream.
+
+Two previous patches introduced below quirks for P2020 platforms.
+- SDHCI_QUIRK_RESET_AFTER_REQUEST
+- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+
+The patches made a mistake to add them in quirks2 of sdhci_host
+structure, while they were defined for quirks.
+       host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+       host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+This patch is to fix them.
+       host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+       host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+Fixes: 05cb6b2a66fa ("mmc: sdhci-of-esdhc: add erratum eSDHC-A001 and A-008358 support")
+Fixes: a46e42712596 ("mmc: sdhci-of-esdhc: add erratum eSDHC5 support")
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191216031842.40068-1-yangbo.lu@nxp.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-esdhc.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -1123,8 +1123,8 @@ static int sdhci_esdhc_probe(struct plat
+               host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+       if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+-              host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+-              host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
++              host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
++              host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+       }
+       if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
diff --git a/queue-5.4/mmc-sdhci-of-esdhc-revert-mmc-sdhci-of-esdhc-add-erratum-a-009204-support.patch b/queue-5.4/mmc-sdhci-of-esdhc-revert-mmc-sdhci-of-esdhc-add-erratum-a-009204-support.patch
new file mode 100644 (file)
index 0000000..4faa194
--- /dev/null
@@ -0,0 +1,50 @@
+From 8b6dc6b2d60221e90703babbc141f063b8a07e72 Mon Sep 17 00:00:00 2001
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Date: Wed, 4 Dec 2019 09:54:46 +0100
+Subject: mmc: sdhci-of-esdhc: Revert "mmc: sdhci-of-esdhc: add erratum A-009204 support"
+
+From: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+
+commit 8b6dc6b2d60221e90703babbc141f063b8a07e72 upstream.
+
+This reverts commit 5dd195522562542bc6ebe6e7bd47890d8b7ca93c.
+
+First, the fix seems to be plain wrong, since the erratum suggests
+waiting 5ms before setting setting SYSCTL[RSTD], but this msleep()
+happens after the call of sdhci_reset() which is where that bit gets
+set (if SDHCI_RESET_DATA is in mask).
+
+Second, walking the whole device tree to figure out if some node has a
+"fsl,p2020-esdhc" compatible string is hugely expensive - about 70 to
+100 us on our mpc8309 board. Walking the device tree is done under a
+raw_spin_lock, so this is obviously really bad on an -rt system, and a
+waste of time on all.
+
+In fact, since esdhc_reset() seems to get called around 100 times per
+second, that mpc8309 now spends 0.8% of its time determining that
+it is not a p2020. Whether those 100 calls/s are normal or due to some
+other bug or misconfiguration, regularly hitting a 100 us
+non-preemptible window is unacceptable.
+
+Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191204085447.27491-1-linux@rasmusvillemoes.dk
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-of-esdhc.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -710,9 +710,6 @@ static void esdhc_reset(struct sdhci_hos
+       sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+       sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+-      if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
+-              mdelay(5);
+-
+       if (mask & SDHCI_RESET_ALL) {
+               val = sdhci_readl(host, ESDHC_TBCTL);
+               val &= ~ESDHC_TB_EN;
diff --git a/queue-5.4/mmc-sdhci-update-the-tuning-failed-messages-to-pr_debug-level.patch b/queue-5.4/mmc-sdhci-update-the-tuning-failed-messages-to-pr_debug-level.patch
new file mode 100644 (file)
index 0000000..58570ac
--- /dev/null
@@ -0,0 +1,43 @@
+From 2c92dd20304f505b6ef43d206fff21bda8f1f0ae Mon Sep 17 00:00:00 2001
+From: Faiz Abbas <faiz_abbas@ti.com>
+Date: Fri, 6 Dec 2019 17:13:26 +0530
+Subject: mmc: sdhci: Update the tuning failed messages to pr_debug level
+
+From: Faiz Abbas <faiz_abbas@ti.com>
+
+commit 2c92dd20304f505b6ef43d206fff21bda8f1f0ae upstream.
+
+Tuning support in DDR50 speed mode was added in SD Specifications Part1
+Physical Layer Specification v3.01. Its not possible to distinguish
+between v3.00 and v3.01 from the SCR and that is why since
+commit 4324f6de6d2e ("mmc: core: enable CMD19 tuning for DDR50 mode")
+tuning failures are ignored in DDR50 speed mode.
+
+Cards compatible with v3.00 don't respond to CMD19 in DDR50 and this
+error gets printed during enumeration and also if retune is triggered at
+any time during operation. Update the printk level to pr_debug so that
+these errors don't lead to false error reports.
+
+Signed-off-by: Faiz Abbas <faiz_abbas@ti.com>
+Cc: stable@vger.kernel.org # v4.4+
+Link: https://lore.kernel.org/r/20191206114326.15856-1-faiz_abbas@ti.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2406,8 +2406,8 @@ static int __sdhci_execute_tuning(struct
+               sdhci_send_tuning(host, opcode);
+               if (!host->tuning_done) {
+-                      pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+-                              mmc_hostname(host->mmc));
++                      pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
++                               mmc_hostname(host->mmc));
+                       sdhci_abort_tuning(host, opcode);
+                       return -ETIMEDOUT;
+               }
diff --git a/queue-5.4/mmc-sdhci-workaround-broken-command-queuing-on-intel-glk.patch b/queue-5.4/mmc-sdhci-workaround-broken-command-queuing-on-intel-glk.patch
new file mode 100644 (file)
index 0000000..f8caf38
--- /dev/null
@@ -0,0 +1,54 @@
+From bedf9fc01ff1f40cfd1a79ccacedd9f3cd8e652a Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Tue, 17 Dec 2019 11:53:48 +0200
+Subject: mmc: sdhci: Workaround broken command queuing on Intel GLK
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit bedf9fc01ff1f40cfd1a79ccacedd9f3cd8e652a upstream.
+
+Command queuing has been reported broken on some Lenovo systems based on
+Intel GLK. This is likely a BIOS issue, so disable command queuing for
+Intel GLK if the BIOS vendor string is "LENOVO".
+
+Fixes: 8ee82bda230f ("mmc: sdhci-pci: Add CQHCI support for Intel GLK")
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191217095349.14592-1-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-pci-core.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -26,6 +26,7 @@
+ #include <linux/mmc/slot-gpio.h>
+ #include <linux/mmc/sdhci-pci-data.h>
+ #include <linux/acpi.h>
++#include <linux/dmi.h>
+ #ifdef CONFIG_X86
+ #include <asm/iosf_mbi.h>
+@@ -782,11 +783,18 @@ static int byt_emmc_probe_slot(struct sd
+       return 0;
+ }
++static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
++{
++      return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
++             dmi_match(DMI_BIOS_VENDOR, "LENOVO");
++}
++
+ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+       int ret = byt_emmc_probe_slot(slot);
+-      slot->host->mmc->caps2 |= MMC_CAP2_CQE;
++      if (!glk_broken_cqhci(slot))
++              slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+       if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
+               slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
diff --git a/queue-5.4/nbd-fix-shutdown-and-recv-work-deadlock-v2.patch b/queue-5.4/nbd-fix-shutdown-and-recv-work-deadlock-v2.patch
new file mode 100644 (file)
index 0000000..07abc95
--- /dev/null
@@ -0,0 +1,55 @@
+From 1c05839aa973cfae8c3db964a21f9c0eef8fcc21 Mon Sep 17 00:00:00 2001
+From: Mike Christie <mchristi@redhat.com>
+Date: Sun, 8 Dec 2019 16:51:50 -0600
+Subject: nbd: fix shutdown and recv work deadlock v2
+
+From: Mike Christie <mchristi@redhat.com>
+
+commit 1c05839aa973cfae8c3db964a21f9c0eef8fcc21 upstream.
+
+This fixes a regression added with:
+
+commit e9e006f5fcf2bab59149cb38a48a4817c1b538b4
+Author: Mike Christie <mchristi@redhat.com>
+Date:   Sun Aug 4 14:10:06 2019 -0500
+
+    nbd: fix max number of supported devs
+
+where we can deadlock during device shutdown. The problem occurs if
+the recv_work's nbd_config_put occurs after nbd_start_device_ioctl has
+returned and the userspace app has droppped its reference via closing
+the device and running nbd_release. The recv_work nbd_config_put call
+would then drop the refcount to zero and try to destroy the config which
+would try to do destroy_workqueue from the recv work.
+
+This patch just has nbd_start_device_ioctl do a flush_workqueue when it
+wakes so we know after the ioctl returns running works have exited. This
+also fixes a possible race where we could try to reuse the device while
+old recv_works are still running.
+
+Cc: stable@vger.kernel.org
+Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs")
+Signed-off-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct
+       mutex_unlock(&nbd->config_lock);
+       ret = wait_event_interruptible(config->recv_wq,
+                                        atomic_read(&config->recv_threads) == 0);
+-      if (ret) {
++      if (ret)
+               sock_shutdown(nbd);
+-              flush_workqueue(nbd->recv_workq);
+-      }
++      flush_workqueue(nbd->recv_workq);
++
+       mutex_lock(&nbd->config_lock);
+       nbd_bdev_reset(bdev);
+       /* user requested, ignore socket errors */
diff --git a/queue-5.4/ocxl-fix-concurrent-afu-open-and-device-removal.patch b/queue-5.4/ocxl-fix-concurrent-afu-open-and-device-removal.patch
new file mode 100644 (file)
index 0000000..3f44fee
--- /dev/null
@@ -0,0 +1,102 @@
+From a58d37bce0d21cf7fbd589384c619e465ef2f927 Mon Sep 17 00:00:00 2001
+From: Frederic Barrat <fbarrat@linux.ibm.com>
+Date: Mon, 24 Jun 2019 16:41:48 +0200
+Subject: ocxl: Fix concurrent AFU open and device removal
+
+From: Frederic Barrat <fbarrat@linux.ibm.com>
+
+commit a58d37bce0d21cf7fbd589384c619e465ef2f927 upstream.
+
+If an ocxl device is unbound through sysfs at the same time its AFU is
+being opened by a user process, the open code may dereference freed
+stuctures, which can lead to kernel oops messages. You'd have to hit a
+tiny time window, but it's possible. It's fairly easy to test by
+making the time window bigger artificially.
+
+Fix it with a combination of 2 changes:
+  - when an AFU device is found in the IDR by looking for the device
+    minor number, we should hold a reference on the device until after
+    the context is allocated. A reference on the AFU structure is kept
+    when the context is allocated, so we can release the reference on
+    the device after the context allocation.
+  - with the fix above, there's still another even tinier window,
+    between the time the AFU device is found in the IDR and the
+    reference on the device is taken. We can fix this one by removing
+    the IDR entry earlier, when the device setup is removed, instead
+    of waiting for the 'release' device callback. With proper locking
+    around the IDR.
+
+Fixes: 75ca758adbaf ("ocxl: Create a clear delineation between ocxl backend & frontend")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190624144148.32022-1-fbarrat@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/ocxl/file.c |   23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/drivers/misc/ocxl/file.c
++++ b/drivers/misc/ocxl/file.c
+@@ -18,18 +18,15 @@ static struct class *ocxl_class;
+ static struct mutex minors_idr_lock;
+ static struct idr minors_idr;
+-static struct ocxl_file_info *find_file_info(dev_t devno)
++static struct ocxl_file_info *find_and_get_file_info(dev_t devno)
+ {
+       struct ocxl_file_info *info;
+-      /*
+-       * We don't declare an RCU critical section here, as our AFU
+-       * is protected by a reference counter on the device. By the time the
+-       * info reference is removed from the idr, the ref count of
+-       * the device is already at 0, so no user API will access that AFU and
+-       * this function can't return it.
+-       */
++      mutex_lock(&minors_idr_lock);
+       info = idr_find(&minors_idr, MINOR(devno));
++      if (info)
++              get_device(&info->dev);
++      mutex_unlock(&minors_idr_lock);
+       return info;
+ }
+@@ -58,14 +55,16 @@ static int afu_open(struct inode *inode,
+       pr_debug("%s for device %x\n", __func__, inode->i_rdev);
+-      info = find_file_info(inode->i_rdev);
++      info = find_and_get_file_info(inode->i_rdev);
+       if (!info)
+               return -ENODEV;
+       rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping);
+-      if (rc)
++      if (rc) {
++              put_device(&info->dev);
+               return rc;
+-
++      }
++      put_device(&info->dev);
+       file->private_data = ctx;
+       return 0;
+ }
+@@ -487,7 +486,6 @@ static void info_release(struct device *
+ {
+       struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev);
+-      free_minor(info);
+       ocxl_afu_put(info->afu);
+       kfree(info);
+ }
+@@ -577,6 +575,7 @@ void ocxl_file_unregister_afu(struct ocx
+       ocxl_file_make_invisible(info);
+       ocxl_sysfs_unregister_afu(info);
++      free_minor(info);
+       device_unregister(&info->dev);
+ }
diff --git a/queue-5.4/pinctrl-baytrail-really-serialize-all-register-accesses.patch b/queue-5.4/pinctrl-baytrail-really-serialize-all-register-accesses.patch
new file mode 100644 (file)
index 0000000..dc4bbf2
--- /dev/null
@@ -0,0 +1,387 @@
+From 40ecab551232972a39cdd8b6f17ede54a3fdb296 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Tue, 19 Nov 2019 16:46:41 +0100
+Subject: pinctrl: baytrail: Really serialize all register accesses
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 40ecab551232972a39cdd8b6f17ede54a3fdb296 upstream.
+
+Commit 39ce8150a079 ("pinctrl: baytrail: Serialize all register access")
+added a spinlock around all register accesses because:
+
+"There is a hardware issue in Intel Baytrail where concurrent GPIO register
+ access might result reads of 0xffffffff and writes might get dropped
+ completely."
+
+Testing has shown that this does not catch all cases, there are still
+2 problems remaining
+
+1) The original fix uses a spinlock per byt_gpio device / struct,
+additional testing has shown that this is not sufficient concurent
+accesses to 2 different GPIO banks also suffer from the same problem.
+
+This commit fixes this by moving to a single global lock.
+
+2) The original fix did not add a lock around the register accesses in
+the suspend/resume handling.
+
+Since pinctrl-baytrail.c is using normal suspend/resume handlers,
+interrupts are still enabled during suspend/resume handling. Nothing
+should be using the GPIOs when they are being taken down, _but_ the
+GPIOs themselves may still cause interrupts, which are likely to
+use (read) the triggering GPIO. So we need to protect against
+concurrent GPIO register accesses in the suspend/resume handlers too.
+
+This commit fixes this by adding the missing spin_lock / unlock calls.
+
+The 2 fixes together fix the Acer Switch 10 SW5-012 getting completely
+confused after a suspend resume. The DSDT for this device has a bug
+in its _LID method which reprograms the home and power button trigger-
+flags requesting both high and low _level_ interrupts so the IRQs for
+these 2 GPIOs continuously fire. This combined with the saving of
+registers during suspend, triggers concurrent GPIO register accesses
+resulting in saving 0xffffffff as pconf0 value during suspend and then
+when restoring this on resume the pinmux settings get all messed up,
+resulting in various I2C busses being stuck, the wifi no longer working
+and often the tablet simply not coming out of suspend at all.
+
+Cc: stable@vger.kernel.org
+Fixes: 39ce8150a079 ("pinctrl: baytrail: Serialize all register access")
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pinctrl/intel/pinctrl-baytrail.c |   81 ++++++++++++++++---------------
+ 1 file changed, 44 insertions(+), 37 deletions(-)
+
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -110,7 +110,6 @@ struct byt_gpio {
+       struct platform_device *pdev;
+       struct pinctrl_dev *pctl_dev;
+       struct pinctrl_desc pctl_desc;
+-      raw_spinlock_t lock;
+       const struct intel_pinctrl_soc_data *soc_data;
+       struct intel_community *communities_copy;
+       struct byt_gpio_pin_context *saved_context;
+@@ -549,6 +548,8 @@ static const struct intel_pinctrl_soc_da
+       NULL
+ };
++static DEFINE_RAW_SPINLOCK(byt_lock);
++
+ static struct intel_community *byt_get_community(struct byt_gpio *vg,
+                                                unsigned int pin)
+ {
+@@ -658,7 +659,7 @@ static void byt_set_group_simple_mux(str
+       unsigned long flags;
+       int i;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       for (i = 0; i < group.npins; i++) {
+               void __iomem *padcfg0;
+@@ -678,7 +679,7 @@ static void byt_set_group_simple_mux(str
+               writel(value, padcfg0);
+       }
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+@@ -688,7 +689,7 @@ static void byt_set_group_mixed_mux(stru
+       unsigned long flags;
+       int i;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       for (i = 0; i < group.npins; i++) {
+               void __iomem *padcfg0;
+@@ -708,7 +709,7 @@ static void byt_set_group_mixed_mux(stru
+               writel(value, padcfg0);
+       }
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+@@ -749,11 +750,11 @@ static void byt_gpio_clear_triggering(st
+       unsigned long flags;
+       u32 value;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       value = readl(reg);
+       value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+       writel(value, reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+@@ -765,7 +766,7 @@ static int byt_gpio_request_enable(struc
+       u32 value, gpio_mux;
+       unsigned long flags;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       /*
+        * In most cases, func pin mux 000 means GPIO function.
+@@ -787,7 +788,7 @@ static int byt_gpio_request_enable(struc
+                        "pin %u forcibly re-configured as GPIO\n", offset);
+       }
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       pm_runtime_get(&vg->pdev->dev);
+@@ -815,7 +816,7 @@ static int byt_gpio_set_direction(struct
+       unsigned long flags;
+       u32 value;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       value = readl(val_reg);
+       value &= ~BYT_DIR_MASK;
+@@ -832,7 +833,7 @@ static int byt_gpio_set_direction(struct
+                    "Potential Error: Setting GPIO with direct_irq_en to output");
+       writel(value, val_reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
+ }
+@@ -901,11 +902,11 @@ static int byt_pin_config_get(struct pin
+       u32 conf, pull, val, debounce;
+       u16 arg = 0;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       conf = readl(conf_reg);
+       pull = conf & BYT_PULL_ASSIGN_MASK;
+       val = readl(val_reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       switch (param) {
+       case PIN_CONFIG_BIAS_DISABLE:
+@@ -932,9 +933,9 @@ static int byt_pin_config_get(struct pin
+               if (!(conf & BYT_DEBOUNCE_EN))
+                       return -EINVAL;
+-              raw_spin_lock_irqsave(&vg->lock, flags);
++              raw_spin_lock_irqsave(&byt_lock, flags);
+               debounce = readl(db_reg);
+-              raw_spin_unlock_irqrestore(&vg->lock, flags);
++              raw_spin_unlock_irqrestore(&byt_lock, flags);
+               switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
+               case BYT_DEBOUNCE_PULSE_375US:
+@@ -986,7 +987,7 @@ static int byt_pin_config_set(struct pin
+       u32 conf, val, debounce;
+       int i, ret = 0;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       conf = readl(conf_reg);
+       val = readl(val_reg);
+@@ -1094,7 +1095,7 @@ static int byt_pin_config_set(struct pin
+       if (!ret)
+               writel(conf, conf_reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return ret;
+ }
+@@ -1119,9 +1120,9 @@ static int byt_gpio_get(struct gpio_chip
+       unsigned long flags;
+       u32 val;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       val = readl(reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return !!(val & BYT_LEVEL);
+ }
+@@ -1136,13 +1137,13 @@ static void byt_gpio_set(struct gpio_chi
+       if (!reg)
+               return;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       old_val = readl(reg);
+       if (value)
+               writel(old_val | BYT_LEVEL, reg);
+       else
+               writel(old_val & ~BYT_LEVEL, reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+@@ -1155,9 +1156,9 @@ static int byt_gpio_get_direction(struct
+       if (!reg)
+               return -EINVAL;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       value = readl(reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       if (!(value & BYT_OUTPUT_EN))
+               return 0;
+@@ -1200,14 +1201,14 @@ static void byt_gpio_dbg_show(struct seq
+               const char *label;
+               unsigned int pin;
+-              raw_spin_lock_irqsave(&vg->lock, flags);
++              raw_spin_lock_irqsave(&byt_lock, flags);
+               pin = vg->soc_data->pins[i].number;
+               reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+               if (!reg) {
+                       seq_printf(s,
+                                  "Could not retrieve pin %i conf0 reg\n",
+                                  pin);
+-                      raw_spin_unlock_irqrestore(&vg->lock, flags);
++                      raw_spin_unlock_irqrestore(&byt_lock, flags);
+                       continue;
+               }
+               conf0 = readl(reg);
+@@ -1216,11 +1217,11 @@ static void byt_gpio_dbg_show(struct seq
+               if (!reg) {
+                       seq_printf(s,
+                                  "Could not retrieve pin %i val reg\n", pin);
+-                      raw_spin_unlock_irqrestore(&vg->lock, flags);
++                      raw_spin_unlock_irqrestore(&byt_lock, flags);
+                       continue;
+               }
+               val = readl(reg);
+-              raw_spin_unlock_irqrestore(&vg->lock, flags);
++              raw_spin_unlock_irqrestore(&byt_lock, flags);
+               comm = byt_get_community(vg, pin);
+               if (!comm) {
+@@ -1304,9 +1305,9 @@ static void byt_irq_ack(struct irq_data
+       if (!reg)
+               return;
+-      raw_spin_lock(&vg->lock);
++      raw_spin_lock(&byt_lock);
+       writel(BIT(offset % 32), reg);
+-      raw_spin_unlock(&vg->lock);
++      raw_spin_unlock(&byt_lock);
+ }
+ static void byt_irq_mask(struct irq_data *d)
+@@ -1330,7 +1331,7 @@ static void byt_irq_unmask(struct irq_da
+       if (!reg)
+               return;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       value = readl(reg);
+       switch (irqd_get_trigger_type(d)) {
+@@ -1353,7 +1354,7 @@ static void byt_irq_unmask(struct irq_da
+       writel(value, reg);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+ }
+ static int byt_irq_type(struct irq_data *d, unsigned int type)
+@@ -1367,7 +1368,7 @@ static int byt_irq_type(struct irq_data
+       if (!reg || offset >= vg->chip.ngpio)
+               return -EINVAL;
+-      raw_spin_lock_irqsave(&vg->lock, flags);
++      raw_spin_lock_irqsave(&byt_lock, flags);
+       value = readl(reg);
+       WARN(value & BYT_DIRECT_IRQ_EN,
+@@ -1389,7 +1390,7 @@ static int byt_irq_type(struct irq_data
+       else if (type & IRQ_TYPE_LEVEL_MASK)
+               irq_set_handler_locked(d, handle_level_irq);
+-      raw_spin_unlock_irqrestore(&vg->lock, flags);
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
+ }
+@@ -1425,9 +1426,9 @@ static void byt_gpio_irq_handler(struct
+                       continue;
+               }
+-              raw_spin_lock(&vg->lock);
++              raw_spin_lock(&byt_lock);
+               pending = readl(reg);
+-              raw_spin_unlock(&vg->lock);
++              raw_spin_unlock(&byt_lock);
+               for_each_set_bit(pin, &pending, 32) {
+                       virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
+                       generic_handle_irq(virq);
+@@ -1638,8 +1639,6 @@ static int byt_pinctrl_probe(struct plat
+               return PTR_ERR(vg->pctl_dev);
+       }
+-      raw_spin_lock_init(&vg->lock);
+-
+       ret = byt_gpio_probe(vg);
+       if (ret)
+               return ret;
+@@ -1654,8 +1653,11 @@ static int byt_pinctrl_probe(struct plat
+ static int byt_gpio_suspend(struct device *dev)
+ {
+       struct byt_gpio *vg = dev_get_drvdata(dev);
++      unsigned long flags;
+       int i;
++      raw_spin_lock_irqsave(&byt_lock, flags);
++
+       for (i = 0; i < vg->soc_data->npins; i++) {
+               void __iomem *reg;
+               u32 value;
+@@ -1676,14 +1678,18 @@ static int byt_gpio_suspend(struct devic
+               vg->saved_context[i].val = value;
+       }
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
+ }
+ static int byt_gpio_resume(struct device *dev)
+ {
+       struct byt_gpio *vg = dev_get_drvdata(dev);
++      unsigned long flags;
+       int i;
++      raw_spin_lock_irqsave(&byt_lock, flags);
++
+       for (i = 0; i < vg->soc_data->npins; i++) {
+               void __iomem *reg;
+               u32 value;
+@@ -1721,6 +1727,7 @@ static int byt_gpio_resume(struct device
+               }
+       }
++      raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
+ }
+ #endif
diff --git a/queue-5.4/powerpc-irq-fix-stack-overflow-verification.patch b/queue-5.4/powerpc-irq-fix-stack-overflow-verification.patch
new file mode 100644 (file)
index 0000000..b41e3a4
--- /dev/null
@@ -0,0 +1,50 @@
+From 099bc4812f09155da77eeb960a983470249c9ce1 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Mon, 9 Dec 2019 06:19:08 +0000
+Subject: powerpc/irq: fix stack overflow verification
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit 099bc4812f09155da77eeb960a983470249c9ce1 upstream.
+
+Before commit 0366a1c70b89 ("powerpc/irq: Run softirqs off the top of
+the irq stack"), check_stack_overflow() was called by do_IRQ(), before
+switching to the irq stack.
+In that commit, do_IRQ() was renamed __do_irq(), and is now executing
+on the irq stack, so check_stack_overflow() has just become almost
+useless.
+
+Move check_stack_overflow() call in do_IRQ() to do the check while
+still on the current stack.
+
+Fixes: 0366a1c70b89 ("powerpc/irq: Run softirqs off the top of the irq stack")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/e033aa8116ab12b7ca9a9c75189ad0741e3b9b5f.1575872340.git.christophe.leroy@c-s.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/irq.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -619,8 +619,6 @@ void __do_irq(struct pt_regs *regs)
+       trace_irq_entry(regs);
+-      check_stack_overflow();
+-
+       /*
+        * Query the platform PIC for the interrupt & ack it.
+        *
+@@ -652,6 +650,8 @@ void do_IRQ(struct pt_regs *regs)
+       irqsp = hardirq_ctx[raw_smp_processor_id()];
+       sirqsp = softirq_ctx[raw_smp_processor_id()];
++      check_stack_overflow();
++
+       /* Already there ? */
+       if (unlikely(cursp == irqsp || cursp == sirqsp)) {
+               __do_irq(regs);
diff --git a/queue-5.4/powerpc-vcpu-assume-dedicated-processors-as-non-preempt.patch b/queue-5.4/powerpc-vcpu-assume-dedicated-processors-as-non-preempt.patch
new file mode 100644 (file)
index 0000000..00099c8
--- /dev/null
@@ -0,0 +1,160 @@
+From 14c73bd344da60abaf7da3ea2e7733ddda35bbac Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Thu, 5 Dec 2019 14:02:17 +0530
+Subject: powerpc/vcpu: Assume dedicated processors as non-preempt
+
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+
+commit 14c73bd344da60abaf7da3ea2e7733ddda35bbac upstream.
+
+With commit 247f2f6f3c70 ("sched/core: Don't schedule threads on
+pre-empted vCPUs"), the scheduler avoids preempted vCPUs to schedule
+tasks on wakeup. This leads to wrong choice of CPU, which in-turn
+leads to larger wakeup latencies. Eventually, it leads to performance
+regression in latency sensitive benchmarks like soltp, schbench etc.
+
+On Powerpc, vcpu_is_preempted() only looks at yield_count. If the
+yield_count is odd, the vCPU is assumed to be preempted. However
+yield_count is increased whenever the LPAR enters CEDE state (idle).
+So any CPU that has entered CEDE state is assumed to be preempted.
+
+Even if vCPU of dedicated LPAR is preempted/donated, it should have
+right of first-use since they are supposed to own the vCPU.
+
+On a Power9 System with 32 cores:
+  # lscpu
+  Architecture:        ppc64le
+  Byte Order:          Little Endian
+  CPU(s):              128
+  On-line CPU(s) list: 0-127
+  Thread(s) per core:  8
+  Core(s) per socket:  1
+  Socket(s):           16
+  NUMA node(s):        2
+  Model:               2.2 (pvr 004e 0202)
+  Model name:          POWER9 (architected), altivec supported
+  Hypervisor vendor:   pHyp
+  Virtualization type: para
+  L1d cache:           32K
+  L1i cache:           32K
+  L2 cache:            512K
+  L3 cache:            10240K
+  NUMA node0 CPU(s):   0-63
+  NUMA node1 CPU(s):   64-127
+
+  # perf stat -a -r 5 ./schbench
+  v5.4                               v5.4 + patch
+  Latency percentiles (usec)         Latency percentiles (usec)
+        50.0000th: 45                      50.0th: 45
+        75.0000th: 62                      75.0th: 63
+        90.0000th: 71                      90.0th: 74
+        95.0000th: 77                      95.0th: 78
+        *99.0000th: 91                     *99.0th: 82
+        99.5000th: 707                     99.5th: 83
+        99.9000th: 6920                    99.9th: 86
+        min=0, max=10048                   min=0, max=96
+  Latency percentiles (usec)         Latency percentiles (usec)
+        50.0000th: 45                      50.0th: 46
+        75.0000th: 61                      75.0th: 64
+        90.0000th: 72                      90.0th: 75
+        95.0000th: 79                      95.0th: 79
+        *99.0000th: 691                    *99.0th: 83
+        99.5000th: 3972                    99.5th: 85
+        99.9000th: 8368                    99.9th: 91
+        min=0, max=16606                   min=0, max=117
+  Latency percentiles (usec)         Latency percentiles (usec)
+        50.0000th: 45                      50.0th: 46
+        75.0000th: 61                      75.0th: 64
+        90.0000th: 71                      90.0th: 75
+        95.0000th: 77                      95.0th: 79
+        *99.0000th: 106                    *99.0th: 83
+        99.5000th: 2364                    99.5th: 84
+        99.9000th: 7480                    99.9th: 90
+        min=0, max=10001                   min=0, max=95
+  Latency percentiles (usec)         Latency percentiles (usec)
+        50.0000th: 45                      50.0th: 47
+        75.0000th: 62                      75.0th: 65
+        90.0000th: 72                      90.0th: 75
+        95.0000th: 78                      95.0th: 79
+        *99.0000th: 93                     *99.0th: 84
+        99.5000th: 108                     99.5th: 85
+        99.9000th: 6792                    99.9th: 90
+        min=0, max=17681                   min=0, max=117
+  Latency percentiles (usec)         Latency percentiles (usec)
+        50.0000th: 46                      50.0th: 45
+        75.0000th: 62                      75.0th: 64
+        90.0000th: 73                      90.0th: 75
+        95.0000th: 79                      95.0th: 79
+        *99.0000th: 113                    *99.0th: 82
+        99.5000th: 2724                    99.5th: 83
+        99.9000th: 6184                    99.9th: 93
+        min=0, max=9887                    min=0, max=111
+
+   Performance counter stats for 'system wide' (5 runs):
+
+  context-switches    43,373  ( +-  0.40% )   44,597 ( +-  0.55% )
+  cpu-migrations       1,211  ( +-  5.04% )      220 ( +-  6.23% )
+  page-faults         15,983  ( +-  5.21% )   15,360 ( +-  3.38% )
+
+Waiman Long suggested using static_keys.
+
+Fixes: 247f2f6f3c70 ("sched/core: Don't schedule threads on pre-empted vCPUs")
+Cc: stable@vger.kernel.org # v4.18+
+Reported-by: Parth Shah <parth@linux.ibm.com>
+Reported-by: Ihor Pasichnyk <Ihor.Pasichnyk@ibm.com>
+Tested-by: Juri Lelli <juri.lelli@redhat.com>
+Acked-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Acked-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Vaidyanathan Srinivasan <svaidy@linux.ibm.com>
+Tested-by: Parth Shah <parth@linux.ibm.com>
+[mpe: Move the key and setting of the key to pseries/setup.c]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20191213035036.6913-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/spinlock.h    |    4 +++-
+ arch/powerpc/platforms/pseries/setup.c |    7 +++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -36,10 +36,12 @@
+ #endif
+ #ifdef CONFIG_PPC_PSERIES
++DECLARE_STATIC_KEY_FALSE(shared_processor);
++
+ #define vcpu_is_preempted vcpu_is_preempted
+ static inline bool vcpu_is_preempted(int cpu)
+ {
+-      if (!firmware_has_feature(FW_FEATURE_SPLPAR))
++      if (!static_branch_unlikely(&shared_processor))
+               return false;
+       return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+ }
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -74,6 +74,9 @@
+ #include "pseries.h"
+ #include "../../../../drivers/pci/pci.h"
++DEFINE_STATIC_KEY_FALSE(shared_processor);
++EXPORT_SYMBOL_GPL(shared_processor);
++
+ int CMO_PrPSP = -1;
+ int CMO_SecPSP = -1;
+ unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
+@@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(vo
+       if (firmware_has_feature(FW_FEATURE_LPAR)) {
+               vpa_init(boot_cpuid);
++
++              if (lppaca_shared_proc(get_lppaca()))
++                      static_branch_enable(&shared_processor);
++
+               ppc_md.power_save = pseries_lpar_idle;
+               ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
+ #ifdef CONFIG_PCI_IOV
diff --git a/queue-5.4/serial-sprd-add-clearing-break-interrupt-operation.patch b/queue-5.4/serial-sprd-add-clearing-break-interrupt-operation.patch
new file mode 100644 (file)
index 0000000..bd1ab00
--- /dev/null
@@ -0,0 +1,37 @@
+From abeb2e9414d7e3a0d8417bc3b13d7172513ea8a0 Mon Sep 17 00:00:00 2001
+From: Yonghan Ye <yonghan.ye@unisoc.com>
+Date: Wed, 4 Dec 2019 20:00:07 +0800
+Subject: serial: sprd: Add clearing break interrupt operation
+
+From: Yonghan Ye <yonghan.ye@unisoc.com>
+
+commit abeb2e9414d7e3a0d8417bc3b13d7172513ea8a0 upstream.
+
+A break interrupt will be generated if the RX line was pulled low, which
+means some abnomal behaviors occurred of the UART. In this case, we still
+need to clear this break interrupt status, otherwise it will cause irq
+storm to crash the whole system.
+
+Fixes: b7396a38fb28 ("tty/serial: Add Spreadtrum sc9836-uart driver support")
+Signed-off-by: Yonghan Ye <yonghan.ye@unisoc.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
+Link: https://lore.kernel.org/r/925e51b73099c90158e080b8f5bed9b3b38c4548.1575460601.git.baolin.wang7@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/sprd_serial.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -679,6 +679,9 @@ static irqreturn_t sprd_handle_irq(int i
+       if (ims & SPRD_IMSR_TIMEOUT)
+               serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
++      if (ims & SPRD_IMSR_BREAK_DETECT)
++              serial_out(port, SPRD_ICLR, SPRD_IMSR_BREAK_DETECT);
++
+       if (ims & (SPRD_IMSR_RX_FIFO_FULL | SPRD_IMSR_BREAK_DETECT |
+                  SPRD_IMSR_TIMEOUT))
+               sprd_rx(port);
index 353f252786b72922fa8c124a45deae45d53f8e76..e19bd06a13a0a468b16c11d623d54df68a4ef175 100644 (file)
@@ -402,3 +402,34 @@ intel_th-fix-freeing-irqs.patch
 intel_th-msu-fix-window-switching-without-windows.patch
 platform-x86-hp-wmi-make-buffer-for-hpwmi_feature2_query-128-bytes.patch
 staging-comedi-gsc_hpdi-check-dma_alloc_coherent-return-value.patch
+tty-serial-atmel-fix-out-of-range-clock-divider-handling.patch
+serial-sprd-add-clearing-break-interrupt-operation.patch
+pinctrl-baytrail-really-serialize-all-register-accesses.patch
+clk-imx-clk-imx7ulp-add-missing-sentinel-of-ulp_div_table.patch
+clk-imx-clk-composite-8m-add-lock-to-gate-mux.patch
+clk-imx-pll14xx-fix-clk_pll14xx_wait_lock.patch
+ext4-fix-ext4_empty_dir-for-directories-with-holes.patch
+ext4-check-for-directory-entries-too-close-to-block-end.patch
+ext4-unlock-on-error-in-ext4_expand_extra_isize.patch
+ext4-validate-the-debug_want_extra_isize-mount-option-at-parse-time.patch
+iocost-over-budget-forced-ios-should-schedule-async-delay.patch
+kvm-ppc-book3s-hv-fix-regression-on-big-endian-hosts.patch
+kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-spec_ctrl_ssbd.patch
+kvm-x86-host-feature-ssbd-doesn-t-imply-guest-feature-amd_ssbd.patch
+kvm-arm-arm64-properly-handle-faulting-of-device-mappings.patch
+kvm-arm64-ensure-params-is-initialised-when-looking-up-sys-register.patch
+x86-intel-disable-hpet-on-intel-coffee-lake-h-platforms.patch
+x86-mce-amd-do-not-use-rdmsr_safe_on_cpu-in-smca_configure.patch
+x86-mce-amd-allow-reserved-types-to-be-overwritten-in-smca_banks.patch
+x86-mce-fix-possibly-incorrect-severity-calculation-on-amd.patch
+powerpc-vcpu-assume-dedicated-processors-as-non-preempt.patch
+powerpc-irq-fix-stack-overflow-verification.patch
+ocxl-fix-concurrent-afu-open-and-device-removal.patch
+mmc-sdhci-msm-correct-the-offset-and-value-for-ddr_config-register.patch
+mmc-sdhci-of-esdhc-revert-mmc-sdhci-of-esdhc-add-erratum-a-009204-support.patch
+mmc-sdhci-update-the-tuning-failed-messages-to-pr_debug-level.patch
+mmc-sdhci-of-esdhc-fix-p2020-errata-handling.patch
+mmc-sdhci-workaround-broken-command-queuing-on-intel-glk.patch
+mmc-sdhci-add-a-quirk-for-broken-command-queuing.patch
+nbd-fix-shutdown-and-recv-work-deadlock-v2.patch
+iwlwifi-pcie-move-power-gating-workaround-earlier-in-the-flow.patch
diff --git a/queue-5.4/tty-serial-atmel-fix-out-of-range-clock-divider-handling.patch b/queue-5.4/tty-serial-atmel-fix-out-of-range-clock-divider-handling.patch
new file mode 100644 (file)
index 0000000..a28da55
--- /dev/null
@@ -0,0 +1,85 @@
+From cb47b9f8630ae3fa3f5fbd0c7003faba7abdf711 Mon Sep 17 00:00:00 2001
+From: David Engraf <david.engraf@sysgo.com>
+Date: Mon, 16 Dec 2019 09:54:03 +0100
+Subject: tty/serial: atmel: fix out of range clock divider handling
+
+From: David Engraf <david.engraf@sysgo.com>
+
+commit cb47b9f8630ae3fa3f5fbd0c7003faba7abdf711 upstream.
+
+Use MCK_DIV8 when the clock divider is > 65535. Unfortunately the mode
+register was already written thus the clock selection is ignored.
+
+Fix by doing the baud rate calulation before setting the mode.
+
+Fixes: 5bf5635ac170 ("tty/serial: atmel: add fractional baud rate support")
+Signed-off-by: David Engraf <david.engraf@sysgo.com>
+Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Acked-by: Richard Genoud <richard.genoud@gmail.com>
+Cc: stable <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20191216085403.17050-1-david.engraf@sysgo.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/atmel_serial.c |   43 +++++++++++++++++++-------------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2270,27 +2270,6 @@ static void atmel_set_termios(struct uar
+               mode |= ATMEL_US_USMODE_NORMAL;
+       }
+-      /* set the mode, clock divisor, parity, stop bits and data size */
+-      atmel_uart_writel(port, ATMEL_US_MR, mode);
+-
+-      /*
+-       * when switching the mode, set the RTS line state according to the
+-       * new mode, otherwise keep the former state
+-       */
+-      if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+-              unsigned int rts_state;
+-
+-              if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+-                      /* let the hardware control the RTS line */
+-                      rts_state = ATMEL_US_RTSDIS;
+-              } else {
+-                      /* force RTS line to low level */
+-                      rts_state = ATMEL_US_RTSEN;
+-              }
+-
+-              atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+-      }
+-
+       /*
+        * Set the baud rate:
+        * Fractional baudrate allows to setup output frequency more
+@@ -2317,6 +2296,28 @@ static void atmel_set_termios(struct uar
+       if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
+               atmel_uart_writel(port, ATMEL_US_BRGR, quot);
++
++      /* set the mode, clock divisor, parity, stop bits and data size */
++      atmel_uart_writel(port, ATMEL_US_MR, mode);
++
++      /*
++       * when switching the mode, set the RTS line state according to the
++       * new mode, otherwise keep the former state
++       */
++      if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
++              unsigned int rts_state;
++
++              if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
++                      /* let the hardware control the RTS line */
++                      rts_state = ATMEL_US_RTSDIS;
++              } else {
++                      /* force RTS line to low level */
++                      rts_state = ATMEL_US_RTSEN;
++              }
++
++              atmel_uart_writel(port, ATMEL_US_CR, rts_state);
++      }
++
+       atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+       atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+       atmel_port->tx_stopped = false;
diff --git a/queue-5.4/x86-intel-disable-hpet-on-intel-coffee-lake-h-platforms.patch b/queue-5.4/x86-intel-disable-hpet-on-intel-coffee-lake-h-platforms.patch
new file mode 100644 (file)
index 0000000..0abc442
--- /dev/null
@@ -0,0 +1,41 @@
+From f8edbde885bbcab6a2b4a1b5ca614e6ccb807577 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Fri, 29 Nov 2019 14:23:02 +0800
+Subject: x86/intel: Disable HPET on Intel Coffee Lake H platforms
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit f8edbde885bbcab6a2b4a1b5ca614e6ccb807577 upstream.
+
+Coffee Lake H SoC has similar behavior as Coffee Lake, skewed HPET timer
+once the SoCs entered PC10.
+
+So let's disable HPET on CFL-H platforms.
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bp@alien8.de
+Cc: feng.tang@intel.com
+Cc: harry.pan@intel.com
+Cc: hpa@zytor.com
+Link: https://lkml.kernel.org/r/20191129062303.18982-1-kai.heng.feng@canonical.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/early-quirks.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -710,6 +710,8 @@ static struct chipset early_qrk[] __init
+        */
+       { PCI_VENDOR_ID_INTEL, 0x0f00,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
++      { PCI_VENDOR_ID_INTEL, 0x3e20,
++              PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_INTEL, 0x3ec4,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_BROADCOM, 0x4331,
diff --git a/queue-5.4/x86-mce-amd-allow-reserved-types-to-be-overwritten-in-smca_banks.patch b/queue-5.4/x86-mce-amd-allow-reserved-types-to-be-overwritten-in-smca_banks.patch
new file mode 100644 (file)
index 0000000..a8052a6
--- /dev/null
@@ -0,0 +1,96 @@
+From 966af20929ac24360ba3fac5533eb2ab003747da Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Thu, 21 Nov 2019 08:15:08 -0600
+Subject: x86/MCE/AMD: Allow Reserved types to be overwritten in smca_banks[]
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit 966af20929ac24360ba3fac5533eb2ab003747da upstream.
+
+Each logical CPU in Scalable MCA systems controls a unique set of MCA
+banks in the system. These banks are not shared between CPUs. The bank
+types and ordering will be the same across CPUs on currently available
+systems.
+
+However, some CPUs may see a bank as Reserved/Read-as-Zero (RAZ) while
+other CPUs do not. In this case, the bank seen as Reserved on one CPU is
+assumed to be the same type as the bank seen as a known type on another
+CPU.
+
+In general, this occurs when the hardware represented by the MCA bank
+is disabled, e.g. disabled memory controllers on certain models, etc.
+The MCA bank is disabled in the hardware, so there is no possibility of
+getting an MCA/MCE from it even if it is assumed to have a known type.
+
+For example:
+
+Full system:
+       Bank  |  Type seen on CPU0  |  Type seen on CPU1
+       ------------------------------------------------
+        0    |         LS          |          LS
+        1    |         UMC         |          UMC
+        2    |         CS          |          CS
+
+System with hardware disabled:
+       Bank  |  Type seen on CPU0  |  Type seen on CPU1
+       ------------------------------------------------
+        0    |         LS          |          LS
+        1    |         UMC         |          RAZ
+        2    |         CS          |          CS
+
+For this reason, there is a single, global struct smca_banks[] that is
+initialized at boot time. This array is initialized on each CPU as it
+comes online. However, the array will not be updated if an entry already
+exists.
+
+This works as expected when the first CPU (usually CPU0) has all
+possible MCA banks enabled. But if the first CPU has a subset, then it
+will save a "Reserved" type in smca_banks[]. Successive CPUs will then
+not be able to update smca_banks[] even if they encounter a known bank
+type.
+
+This may result in unexpected behavior. Depending on the system
+configuration, a user may observe issues enumerating the MCA
+thresholding sysfs interface. The issues may be as trivial as sysfs
+entries not being available, or as severe as system hangs.
+
+For example:
+
+       Bank  |  Type seen on CPU0  |  Type seen on CPU1
+       ------------------------------------------------
+        0    |         LS          |          LS
+        1    |         RAZ         |          UMC
+        2    |         CS          |          CS
+
+Extend the smca_banks[] entry check to return if the entry is a
+non-reserved type. Otherwise, continue so that CPUs that encounter a
+known bank type can update smca_banks[].
+
+Fixes: 68627a697c19 ("x86/mce/AMD, EDAC/mce_amd: Enumerate Reserved SMCA bank type")
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20191121141508.141273-1-Yazen.Ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/amd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -266,7 +266,7 @@ static void smca_configure(unsigned int
+       smca_set_misc_banks_map(bank, cpu);
+       /* Return early if this bank was already initialized. */
+-      if (smca_banks[bank].hwid)
++      if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
+               return;
+       if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
diff --git a/queue-5.4/x86-mce-amd-do-not-use-rdmsr_safe_on_cpu-in-smca_configure.patch b/queue-5.4/x86-mce-amd-do-not-use-rdmsr_safe_on_cpu-in-smca_configure.patch
new file mode 100644 (file)
index 0000000..88ec6af
--- /dev/null
@@ -0,0 +1,73 @@
+From 246ff09f89e54fdf740a8d496176c86743db3ec7 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Thu, 31 Oct 2019 16:04:48 +0300
+Subject: x86/MCE/AMD: Do not use rdmsr_safe_on_cpu() in smca_configure()
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit 246ff09f89e54fdf740a8d496176c86743db3ec7 upstream.
+
+... because interrupts are disabled that early and sending IPIs can
+deadlock:
+
+  BUG: sleeping function called from invalid context at kernel/sched/completion.c:99
+  in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 0, name: swapper/1
+  no locks held by swapper/1/0.
+  irq event stamp: 0
+  hardirqs last  enabled at (0): [<0000000000000000>] 0x0
+  hardirqs last disabled at (0): [<ffffffff8106dda9>] copy_process+0x8b9/0x1ca0
+  softirqs last  enabled at (0): [<ffffffff8106dda9>] copy_process+0x8b9/0x1ca0
+  softirqs last disabled at (0): [<0000000000000000>] 0x0
+  Preemption disabled at:
+  [<ffffffff8104703b>] start_secondary+0x3b/0x190
+  CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.5.0-rc2+ #1
+  Hardware name: GIGABYTE MZ01-CE1-00/MZ01-CE1-00, BIOS F02 08/29/2018
+  Call Trace:
+   dump_stack
+   ___might_sleep.cold.92
+   wait_for_completion
+   ? generic_exec_single
+   rdmsr_safe_on_cpu
+   ? wrmsr_on_cpus
+   mce_amd_feature_init
+   mcheck_cpu_init
+   identify_cpu
+   identify_secondary_cpu
+   smp_store_cpu_info
+   start_secondary
+   secondary_startup_64
+
+The function smca_configure() is called only on the current CPU anyway,
+therefore replace rdmsr_safe_on_cpu() with atomic rdmsr_safe() and avoid
+the IPI.
+
+ [ bp: Update commit message. ]
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/157252708836.3876.4604398213417262402.stgit@buzz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/amd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -269,7 +269,7 @@ static void smca_configure(unsigned int
+       if (smca_banks[bank].hwid)
+               return;
+-      if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
++      if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+               pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
+               return;
+       }
diff --git a/queue-5.4/x86-mce-fix-possibly-incorrect-severity-calculation-on-amd.patch b/queue-5.4/x86-mce-fix-possibly-incorrect-severity-calculation-on-amd.patch
new file mode 100644 (file)
index 0000000..636190c
--- /dev/null
@@ -0,0 +1,47 @@
+From a3a57ddad061acc90bef39635caf2b2330ce8f21 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= <jschoenh@amazon.de>
+Date: Tue, 10 Dec 2019 01:07:30 +0100
+Subject: x86/mce: Fix possibly incorrect severity calculation on AMD
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jan H. Schönherr <jschoenh@amazon.de>
+
+commit a3a57ddad061acc90bef39635caf2b2330ce8f21 upstream.
+
+The function mce_severity_amd_smca() requires m->bank to be initialized
+for correct operation. Fix the one case, where mce_severity() is called
+without doing so.
+
+Fixes: 6bda529ec42e ("x86/mce: Grade uncorrected errors for SMCA-enabled systems")
+Fixes: d28af26faa0b ("x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()")
+Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Cc: Yazen Ghannam <Yazen.Ghannam@amd.com>
+Link: https://lkml.kernel.org/r/20191210000733.17979-4-jschoenh@amazon.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mce/core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -814,8 +814,8 @@ static int mce_no_way_out(struct mce *m,
+               if (quirk_no_way_out)
+                       quirk_no_way_out(i, m, regs);
++              m->bank = i;
+               if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+-                      m->bank = i;
+                       mce_read_aux(m, i);
+                       *msg = tmp;
+                       return 1;