]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.4
authorSasha Levin <sashal@kernel.org>
Thu, 31 Dec 2020 04:14:49 +0000 (23:14 -0500)
committerSasha Levin <sashal@kernel.org>
Thu, 31 Dec 2020 04:14:49 +0000 (23:14 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.4/btrfs-fix-race-when-defragmenting-leads-to-unnecessa.patch [new file with mode: 0644]
queue-5.4/ext4-don-t-remount-read-only-with-errors-continue-on.patch [new file with mode: 0644]
queue-5.4/jffs2-allow-setting-rp_size-to-zero-during-remountin.patch [new file with mode: 0644]
queue-5.4/jffs2-fix-null-pointer-dereference-in-rp_size-fs-opt.patch [new file with mode: 0644]
queue-5.4/kvm-svm-relax-conditions-for-allowing-msr_ia32_spec_.patch [new file with mode: 0644]
queue-5.4/kvm-x86-avoid-incorrect-writes-to-host-msr_ia32_spec.patch [new file with mode: 0644]
queue-5.4/kvm-x86-reinstate-vendor-agnostic-check-on-spec_ctrl.patch [new file with mode: 0644]
queue-5.4/powerpc-bitops-fix-possible-undefined-behaviour-with.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/vfio-pci-move-dummy_resources_list-init-in-vfio_pci_.patch [new file with mode: 0644]

diff --git a/queue-5.4/btrfs-fix-race-when-defragmenting-leads-to-unnecessa.patch b/queue-5.4/btrfs-fix-race-when-defragmenting-leads-to-unnecessa.patch
new file mode 100644 (file)
index 0000000..48e8f3b
--- /dev/null
@@ -0,0 +1,109 @@
+From c9bb8839c49c804b723f483343650aed46657f04 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Nov 2020 11:07:33 +0000
+Subject: btrfs: fix race when defragmenting leads to unnecessary IO
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 7f458a3873ae94efe1f37c8b96c97e7298769e98 ]
+
+When defragmenting we skip ranges that have holes or inline extents, so that
+we don't do unnecessary IO and waste space. We do this check when calling
+should_defrag_range() at btrfs_defrag_file(). However we do it without
+holding the inode's lock. The reason we do it like this is to avoid
+blocking other tasks for too long, that possibly want to operate on other
+file ranges, since after the call to should_defrag_range() and before
+locking the inode, we trigger a synchronous page cache readahead. However
+before we were able to lock the inode, some other task might have punched
+a hole in our range, or we may now have an inline extent there, in which
+case we should not set the range for defrag anymore since that would cause
+unnecessary IO and make us waste space (i.e. allocating extents to contain
+zeros for a hole).
+
+So after we locked the inode and the range in the iotree, check again if
+we have holes or an inline extent, and if we do, just skip the range.
+
+I hit this while testing my next patch that fixes races when updating an
+inode's number of bytes (subject "btrfs: update the number of bytes used
+by an inode atomically"), and it depends on this change in order to work
+correctly. Alternatively I could rework that other patch to detect holes
+and flag their range with the 'new delalloc' bit, but this itself fixes
+an efficiency problem due a race that from a functional point of view is
+not harmful (it could be triggered with btrfs/062 from fstests).
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ioctl.c | 39 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 39 insertions(+)
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index f58e03d1775d8..8ed71b3b25466 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1256,6 +1256,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
+       u64 page_end;
+       u64 page_cnt;
+       u64 start = (u64)start_index << PAGE_SHIFT;
++      u64 search_start;
+       int ret;
+       int i;
+       int i_done;
+@@ -1352,6 +1353,40 @@ static int cluster_pages_for_defrag(struct inode *inode,
+       lock_extent_bits(&BTRFS_I(inode)->io_tree,
+                        page_start, page_end - 1, &cached_state);
++
++      /*
++       * When defragmenting we skip ranges that have holes or inline extents,
++       * (check should_defrag_range()), to avoid unnecessary IO and wasting
++       * space. At btrfs_defrag_file(), we check if a range should be defragged
++       * before locking the inode and then, if it should, we trigger a sync
++       * page cache readahead - we lock the inode only after that to avoid
++       * blocking for too long other tasks that possibly want to operate on
++       * other file ranges. But before we were able to get the inode lock,
++       * some other task may have punched a hole in the range, or we may have
++       * now an inline extent, in which case we should not defrag. So check
++       * for that here, where we have the inode and the range locked, and bail
++       * out if that happened.
++       */
++      search_start = page_start;
++      while (search_start < page_end) {
++              struct extent_map *em;
++
++              em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, search_start,
++                                    page_end - search_start, 0);
++              if (IS_ERR(em)) {
++                      ret = PTR_ERR(em);
++                      goto out_unlock_range;
++              }
++              if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
++                      free_extent_map(em);
++                      /* Ok, 0 means we did not defrag anything */
++                      ret = 0;
++                      goto out_unlock_range;
++              }
++              search_start = extent_map_end(em);
++              free_extent_map(em);
++      }
++
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
+                         page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+                         EXTENT_DEFRAG, 0, 0, &cached_state);
+@@ -1382,6 +1417,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
+       extent_changeset_free(data_reserved);
+       return i_done;
++
++out_unlock_range:
++      unlock_extent_cached(&BTRFS_I(inode)->io_tree,
++                           page_start, page_end - 1, &cached_state);
+ out:
+       for (i = 0; i < i_done; i++) {
+               unlock_page(pages[i]);
+-- 
+2.27.0
+
diff --git a/queue-5.4/ext4-don-t-remount-read-only-with-errors-continue-on.patch b/queue-5.4/ext4-don-t-remount-read-only-with-errors-continue-on.patch
new file mode 100644 (file)
index 0000000..83f7f86
--- /dev/null
@@ -0,0 +1,56 @@
+From 03353b279b987b3a47465ee2367895f14a3283c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Nov 2020 12:33:54 +0100
+Subject: ext4: don't remount read-only with errors=continue on reboot
+
+From: Jan Kara <jack@suse.cz>
+
+[ Upstream commit b08070eca9e247f60ab39d79b2c25d274750441f ]
+
+ext4_handle_error() with errors=continue mount option can accidentally
+remount the filesystem read-only when the system is rebooting. Fix that.
+
+Fixes: 1dc1097ff60e ("ext4: avoid panic during forced reboot")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/r/20201127113405.26867-2-jack@suse.cz
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/super.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 920658ca8777d..06568467b0c27 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -455,19 +455,17 @@ static bool system_going_down(void)
+ static void ext4_handle_error(struct super_block *sb)
+ {
++      journal_t *journal = EXT4_SB(sb)->s_journal;
++
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+-      if (sb_rdonly(sb))
++      if (sb_rdonly(sb) || test_opt(sb, ERRORS_CONT))
+               return;
+-      if (!test_opt(sb, ERRORS_CONT)) {
+-              journal_t *journal = EXT4_SB(sb)->s_journal;
+-
+-              EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+-              if (journal)
+-                      jbd2_journal_abort(journal, -EIO);
+-      }
++      EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
++      if (journal)
++              jbd2_journal_abort(journal, -EIO);
+       /*
+        * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+        * could panic during 'reboot -f' as the underlying device got already
+-- 
+2.27.0
+
diff --git a/queue-5.4/jffs2-allow-setting-rp_size-to-zero-during-remountin.patch b/queue-5.4/jffs2-allow-setting-rp_size-to-zero-during-remountin.patch
new file mode 100644 (file)
index 0000000..67b3b20
--- /dev/null
@@ -0,0 +1,74 @@
+From e7e1691dc3e2f013bcfb67bbb28394757f4bf273 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Oct 2020 14:54:43 +0800
+Subject: jffs2: Allow setting rp_size to zero during remounting
+
+From: lizhe <lizhe67@huawei.com>
+
+[ Upstream commit cd3ed3c73ac671ff6b0230ccb72b8300292d3643 ]
+
+Set rp_size to zero will be ignore during remounting.
+
+The method to identify whether we input a remounting option of
+rp_size is to check if the rp_size input is zero. It can not work
+well if we pass "rp_size=0".
+
+This patch add a bool variable "set_rp_size" to fix this problem.
+
+Reported-by: Jubin Zhong <zhongjubin@huawei.com>
+Signed-off-by: lizhe <lizhe67@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jffs2/jffs2_fs_sb.h | 1 +
+ fs/jffs2/super.c       | 7 +++++--
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
+index 778275f48a879..5a7091746f68b 100644
+--- a/fs/jffs2/jffs2_fs_sb.h
++++ b/fs/jffs2/jffs2_fs_sb.h
+@@ -38,6 +38,7 @@ struct jffs2_mount_opts {
+        * users. This is implemented simply by means of not allowing the
+        * latter users to write to the file system if the amount if the
+        * available space is less then 'rp_size'. */
++      bool set_rp_size;
+       unsigned int rp_size;
+ };
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 60636b2e35ea4..68ce77cbeed3b 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -88,7 +88,7 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
+       if (opts->override_compr)
+               seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
+-      if (opts->rp_size)
++      if (opts->set_rp_size)
+               seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
+       return 0;
+@@ -212,6 +212,7 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+               if (opt > c->mtd->size)
+                       return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
+                                     c->mtd->size / 1024);
++              c->mount_opts.set_rp_size = true;
+               c->mount_opts.rp_size = opt;
+               break;
+       default:
+@@ -231,8 +232,10 @@ static inline void jffs2_update_mount_opts(struct fs_context *fc)
+               c->mount_opts.override_compr = new_c->mount_opts.override_compr;
+               c->mount_opts.compr = new_c->mount_opts.compr;
+       }
+-      if (new_c->mount_opts.rp_size)
++      if (new_c->mount_opts.set_rp_size) {
++              c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
+               c->mount_opts.rp_size = new_c->mount_opts.rp_size;
++      }
+       mutex_unlock(&c->alloc_sem);
+ }
+-- 
+2.27.0
+
diff --git a/queue-5.4/jffs2-fix-null-pointer-dereference-in-rp_size-fs-opt.patch b/queue-5.4/jffs2-fix-null-pointer-dereference-in-rp_size-fs-opt.patch
new file mode 100644 (file)
index 0000000..85412ca
--- /dev/null
@@ -0,0 +1,114 @@
+From 04a4dd99883146f92b9d55708ceb8931a1a05c29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Oct 2020 14:12:04 +0100
+Subject: jffs2: Fix NULL pointer dereference in rp_size fs option parsing
+
+From: Jamie Iles <jamie@nuviainc.com>
+
+[ Upstream commit a61df3c413e49b0042f9caf774c58512d1cc71b7 ]
+
+syzkaller found the following JFFS2 splat:
+
+  Unable to handle kernel paging request at virtual address dfffa00000000001
+  Mem abort info:
+    ESR = 0x96000004
+    EC = 0x25: DABT (current EL), IL = 32 bits
+    SET = 0, FnV = 0
+    EA = 0, S1PTW = 0
+  Data abort info:
+    ISV = 0, ISS = 0x00000004
+    CM = 0, WnR = 0
+  [dfffa00000000001] address between user and kernel address ranges
+  Internal error: Oops: 96000004 [#1] SMP
+  Dumping ftrace buffer:
+     (ftrace buffer empty)
+  Modules linked in:
+  CPU: 0 PID: 12745 Comm: syz-executor.5 Tainted: G S                5.9.0-rc8+ #98
+  Hardware name: linux,dummy-virt (DT)
+  pstate: 20400005 (nzCv daif +PAN -UAO BTYPE=--)
+  pc : jffs2_parse_param+0x138/0x308 fs/jffs2/super.c:206
+  lr : jffs2_parse_param+0x108/0x308 fs/jffs2/super.c:205
+  sp : ffff000022a57910
+  x29: ffff000022a57910 x28: 0000000000000000
+  x27: ffff000057634008 x26: 000000000000d800
+  x25: 000000000000d800 x24: ffff0000271a9000
+  x23: ffffa0001adb5dc0 x22: ffff000023fdcf00
+  x21: 1fffe0000454af2c x20: ffff000024cc9400
+  x19: 0000000000000000 x18: 0000000000000000
+  x17: 0000000000000000 x16: ffffa000102dbdd0
+  x15: 0000000000000000 x14: ffffa000109e44bc
+  x13: ffffa00010a3a26c x12: ffff80000476e0b3
+  x11: 1fffe0000476e0b2 x10: ffff80000476e0b2
+  x9 : ffffa00010a3ad60 x8 : ffff000023b70593
+  x7 : 0000000000000003 x6 : 00000000f1f1f1f1
+  x5 : ffff000023fdcf00 x4 : 0000000000000002
+  x3 : ffffa00010000000 x2 : 0000000000000001
+  x1 : dfffa00000000000 x0 : 0000000000000008
+  Call trace:
+   jffs2_parse_param+0x138/0x308 fs/jffs2/super.c:206
+   vfs_parse_fs_param+0x234/0x4e8 fs/fs_context.c:117
+   vfs_parse_fs_string+0xe8/0x148 fs/fs_context.c:161
+   generic_parse_monolithic+0x17c/0x208 fs/fs_context.c:201
+   parse_monolithic_mount_data+0x7c/0xa8 fs/fs_context.c:649
+   do_new_mount fs/namespace.c:2871 [inline]
+   path_mount+0x548/0x1da8 fs/namespace.c:3192
+   do_mount+0x124/0x138 fs/namespace.c:3205
+   __do_sys_mount fs/namespace.c:3413 [inline]
+   __se_sys_mount fs/namespace.c:3390 [inline]
+   __arm64_sys_mount+0x164/0x238 fs/namespace.c:3390
+   __invoke_syscall arch/arm64/kernel/syscall.c:36 [inline]
+   invoke_syscall arch/arm64/kernel/syscall.c:48 [inline]
+   el0_svc_common.constprop.0+0x15c/0x598 arch/arm64/kernel/syscall.c:149
+   do_el0_svc+0x60/0x150 arch/arm64/kernel/syscall.c:195
+   el0_svc+0x34/0xb0 arch/arm64/kernel/entry-common.c:226
+   el0_sync_handler+0xc8/0x5b4 arch/arm64/kernel/entry-common.c:236
+   el0_sync+0x15c/0x180 arch/arm64/kernel/entry.S:663
+  Code: d2d40001 f2fbffe1 91002260 d343fc02 (38e16841)
+  ---[ end trace 4edf690313deda44 ]---
+
+This is because since ec10a24f10c8, the option parsing happens before
+fill_super and so the MTD device isn't associated with the filesystem.
+Defer the size check until there is a valid association.
+
+Fixes: ec10a24f10c8 ("vfs: Convert jffs2 to use the new mount API")
+Cc: <stable@vger.kernel.org>
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Jamie Iles <jamie@nuviainc.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jffs2/super.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 68ce77cbeed3b..6839a61e8ff1e 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -208,12 +208,8 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+       case Opt_rp_size:
+               if (result.uint_32 > UINT_MAX / 1024)
+                       return invalf(fc, "jffs2: rp_size unrepresentable");
+-              opt = result.uint_32 * 1024;
+-              if (opt > c->mtd->size)
+-                      return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
+-                                    c->mtd->size / 1024);
++              c->mount_opts.rp_size = result.uint_32 * 1024;
+               c->mount_opts.set_rp_size = true;
+-              c->mount_opts.rp_size = opt;
+               break;
+       default:
+               return -EINVAL;
+@@ -275,6 +271,10 @@ static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
+       c->mtd = sb->s_mtd;
+       c->os_priv = sb;
++      if (c->mount_opts.rp_size > c->mtd->size)
++              return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
++                            c->mtd->size / 1024);
++
+       /* Initialize JFFS2 superblock locks, the further initialization will
+        * be done later */
+       mutex_init(&c->alloc_sem);
+-- 
+2.27.0
+
diff --git a/queue-5.4/kvm-svm-relax-conditions-for-allowing-msr_ia32_spec_.patch b/queue-5.4/kvm-svm-relax-conditions-for-allowing-msr_ia32_spec_.patch
new file mode 100644 (file)
index 0000000..6ecc89d
--- /dev/null
@@ -0,0 +1,51 @@
+From 9f6e8e8efcadf97016b235ee3b90c48c4cb4d677 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 16:10:52 +0100
+Subject: KVM: SVM: relax conditions for allowing MSR_IA32_SPEC_CTRL accesses
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit df7e8818926eb4712b67421442acf7d568fe2645 ]
+
+Userspace that does not know about the AMD_IBRS bit might still
+allow the guest to protect itself with MSR_IA32_SPEC_CTRL using
+the Intel SPEC_CTRL bit.  However, svm.c disallows this and will
+cause a #GP in the guest when writing to the MSR.  Fix this by
+loosening the test and allowing the Intel CPUID bit, and in fact
+allow the AMD_STIBP bit as well since it allows writing to
+MSR_IA32_SPEC_CTRL too.
+
+Reported-by: Zhiyi Guo <zhguo@redhat.com>
+Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+Analyzed-by: Laszlo Ersek <lersek@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 72bf1d8175ac2..ca746006ac040 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4233,6 +4233,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+                       return 1;
+@@ -4318,6 +4320,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr->host_initiated &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
++                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+                       return 1;
+-- 
+2.27.0
+
diff --git a/queue-5.4/kvm-x86-avoid-incorrect-writes-to-host-msr_ia32_spec.patch b/queue-5.4/kvm-x86-avoid-incorrect-writes-to-host-msr_ia32_spec.patch
new file mode 100644 (file)
index 0000000..4bcbb97
--- /dev/null
@@ -0,0 +1,141 @@
+From 0ee8d5f4f8698b52676e386360b643f0a7785542 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jan 2020 16:33:06 +0100
+Subject: KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 6441fa6178f5456d1d4b512c08798888f99db185 ]
+
+If the guest is configured to have SPEC_CTRL but the host does not
+(which is a nonsensical configuration but these are not explicitly
+forbidden) then a host-initiated MSR write can write vmx->spec_ctrl
+(respectively svm->spec_ctrl) and trigger a #GP when KVM tries to
+restore the host value of the MSR.  Add a more comprehensive check
+for valid bits of SPEC_CTRL, covering host CPUID flags and,
+since we are at it and it is more correct that way, guest CPUID
+flags too.
+
+For AMD, remove the unnecessary is_guest_mode check around setting
+the MSR interception bitmap, so that the code looks the same as
+for Intel.
+
+Cc: Jim Mattson <jmattson@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm.c     |  9 +++------
+ arch/x86/kvm/vmx/vmx.c |  7 +++----
+ arch/x86/kvm/x86.c     | 22 ++++++++++++++++++++++
+ arch/x86/kvm/x86.h     |  1 +
+ 4 files changed, 29 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c79c1a07f44b9..72bf1d8175ac2 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4322,12 +4322,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+                       return 1;
+-              /* The STIBP bit doesn't fault even if it's not advertised */
+-              if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
++              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
+                       return 1;
+               svm->spec_ctrl = data;
+-
+               if (!data)
+                       break;
+@@ -4351,13 +4349,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+               if (data & ~PRED_CMD_IBPB)
+                       return 1;
+-
++              if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
++                      return 1;
+               if (!data)
+                       break;
+               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+-              if (is_guest_mode(vcpu))
+-                      break;
+               set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+               break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 2a1ed3aae100e..8450fce70bd96 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1974,12 +1974,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+                   !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+                       return 1;
+-              /* The STIBP bit doesn't fault even if it's not advertised */
+-              if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
++              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
+                       return 1;
+               vmx->spec_ctrl = data;
+-
+               if (!data)
+                       break;
+@@ -2006,7 +2004,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               if (data & ~PRED_CMD_IBPB)
+                       return 1;
+-
++              if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
++                      return 1;
+               if (!data)
+                       break;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b7f86acb8c911..72990c3c6faf7 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10369,6 +10369,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
++u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
++{
++      uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
++
++      /* The STIBP bit doesn't fault even if it's not advertised */
++      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
++          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
++              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
++      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
++          !boot_cpu_has(X86_FEATURE_AMD_IBRS))
++              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
++
++      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
++          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
++              bits &= ~SPEC_CTRL_SSBD;
++      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
++          !boot_cpu_has(X86_FEATURE_AMD_SSBD))
++              bits &= ~SPEC_CTRL_SSBD;
++
++      return bits;
++}
++EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index de6b55484876a..301286d924320 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -368,5 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
+ void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+ void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
++u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
+ #endif
+-- 
+2.27.0
+
diff --git a/queue-5.4/kvm-x86-reinstate-vendor-agnostic-check-on-spec_ctrl.patch b/queue-5.4/kvm-x86-reinstate-vendor-agnostic-check-on-spec_ctrl.patch
new file mode 100644 (file)
index 0000000..2080566
--- /dev/null
@@ -0,0 +1,139 @@
+From 4e19bb4c16631b522e259fc7b2506aed0b3a346e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 3 Dec 2020 09:40:15 -0500
+Subject: KVM: x86: reinstate vendor-agnostic check on SPEC_CTRL cpuid bits
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 39485ed95d6b83b62fa75c06c2c4d33992e0d971 ]
+
+Until commit e7c587da1252 ("x86/speculation: Use synthetic bits for
+IBRS/IBPB/STIBP"), KVM was testing both Intel and AMD CPUID bits before
+allowing the guest to write MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD.
+Testing only Intel bits on VMX processors, or only AMD bits on SVM
+processors, fails if the guests are created with the "opposite" vendor
+as the host.
+
+While at it, also tweak the host CPU check to use the vendor-agnostic
+feature bit X86_FEATURE_IBPB, since we only care about the availability
+of the MSR on the host here and not about specific CPUID bits.
+
+Fixes: e7c587da1252 ("x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP")
+Cc: stable@vger.kernel.org
+Reported-by: Denis V. Lunev <den@openvz.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/cpuid.h   | 14 ++++++++++++++
+ arch/x86/kvm/svm.c     | 14 ++++----------
+ arch/x86/kvm/vmx/vmx.c |  8 ++++----
+ 3 files changed, 22 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index d78a61408243f..7dec43b2c4205 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -154,6 +154,20 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
+       return x86_stepping(best->eax);
+ }
++static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
++{
++      return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
++              guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
++              guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
++              guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
++}
++
++static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
++{
++      return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
++              guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
++}
++
+ static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
+ {
+       return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index ca746006ac040..2b506904be024 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4233,10 +4233,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
++                  !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+               msr_info->data = svm->spec_ctrl;
+@@ -4320,10 +4317,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
++                  !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+               if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
+@@ -4348,12 +4342,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+               break;
+       case MSR_IA32_PRED_CMD:
+               if (!msr->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
++                  !guest_has_pred_cmd_msr(vcpu))
+                       return 1;
+               if (data & ~PRED_CMD_IBPB)
+                       return 1;
+-              if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
++              if (!boot_cpu_has(X86_FEATURE_IBPB))
+                       return 1;
+               if (!data)
+                       break;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 8450fce70bd96..e7fd2f00edc11 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1788,7 +1788,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++                  !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+               msr_info->data = to_vmx(vcpu)->spec_ctrl;
+@@ -1971,7 +1971,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               break;
+       case MSR_IA32_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++                  !guest_has_spec_ctrl_msr(vcpu))
+                       return 1;
+               if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
+@@ -1999,12 +1999,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+               break;
+       case MSR_IA32_PRED_CMD:
+               if (!msr_info->host_initiated &&
+-                  !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
++                  !guest_has_pred_cmd_msr(vcpu))
+                       return 1;
+               if (data & ~PRED_CMD_IBPB)
+                       return 1;
+-              if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
++              if (!boot_cpu_has(X86_FEATURE_IBPB))
+                       return 1;
+               if (!data)
+                       break;
+-- 
+2.27.0
+
diff --git a/queue-5.4/powerpc-bitops-fix-possible-undefined-behaviour-with.patch b/queue-5.4/powerpc-bitops-fix-possible-undefined-behaviour-with.patch
new file mode 100644 (file)
index 0000000..8ddc3c5
--- /dev/null
@@ -0,0 +1,122 @@
+From c80bdfe58c3a53725892bb1bfc8d6be5d6170c3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 14:05:46 +0000
+Subject: powerpc/bitops: Fix possible undefined behaviour with fls() and
+ fls64()
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+[ Upstream commit 1891ef21d92c4801ea082ee8ed478e304ddc6749 ]
+
+fls() and fls64() are using __builtin_ctz() and _builtin_ctzll().
+On powerpc, those builtins trivially use ctlzw and ctlzd power
+instructions.
+
+Allthough those instructions provide the expected result with
+input argument 0, __builtin_ctz() and __builtin_ctzll() are
+documented as undefined for value 0.
+
+The easiest fix would be to use fls() and fls64() functions
+defined in include/asm-generic/bitops/builtin-fls.h and
+include/asm-generic/bitops/fls64.h, but GCC output is not optimal:
+
+00000388 <testfls>:
+ 388:   2c 03 00 00     cmpwi   r3,0
+ 38c:   41 82 00 10     beq     39c <testfls+0x14>
+ 390:   7c 63 00 34     cntlzw  r3,r3
+ 394:   20 63 00 20     subfic  r3,r3,32
+ 398:   4e 80 00 20     blr
+ 39c:   38 60 00 00     li      r3,0
+ 3a0:   4e 80 00 20     blr
+
+000003b0 <testfls64>:
+ 3b0:   2c 03 00 00     cmpwi   r3,0
+ 3b4:   40 82 00 1c     bne     3d0 <testfls64+0x20>
+ 3b8:   2f 84 00 00     cmpwi   cr7,r4,0
+ 3bc:   38 60 00 00     li      r3,0
+ 3c0:   4d 9e 00 20     beqlr   cr7
+ 3c4:   7c 83 00 34     cntlzw  r3,r4
+ 3c8:   20 63 00 20     subfic  r3,r3,32
+ 3cc:   4e 80 00 20     blr
+ 3d0:   7c 63 00 34     cntlzw  r3,r3
+ 3d4:   20 63 00 40     subfic  r3,r3,64
+ 3d8:   4e 80 00 20     blr
+
+When the input of fls(x) is a constant, just check x for nullity and
+return either 0 or __builtin_clz(x). Otherwise, use cntlzw instruction
+directly.
+
+For fls64() on PPC64, do the same but with __builtin_clzll() and
+cntlzd instruction. On PPC32, lets take the generic fls64() which
+will use our fls(). The result is as expected:
+
+00000388 <testfls>:
+ 388:   7c 63 00 34     cntlzw  r3,r3
+ 38c:   20 63 00 20     subfic  r3,r3,32
+ 390:   4e 80 00 20     blr
+
+000003a0 <testfls64>:
+ 3a0:   2c 03 00 00     cmpwi   r3,0
+ 3a4:   40 82 00 10     bne     3b4 <testfls64+0x14>
+ 3a8:   7c 83 00 34     cntlzw  r3,r4
+ 3ac:   20 63 00 20     subfic  r3,r3,32
+ 3b0:   4e 80 00 20     blr
+ 3b4:   7c 63 00 34     cntlzw  r3,r3
+ 3b8:   20 63 00 40     subfic  r3,r3,64
+ 3bc:   4e 80 00 20     blr
+
+Fixes: 2fcff790dcb4 ("powerpc: Use builtin functions for fls()/__fls()/fls64()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Acked-by: Segher Boessenkool <segher@kernel.crashing.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/348c2d3f19ffcff8abe50d52513f989c4581d000.1603375524.git.christophe.leroy@csgroup.eu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/bitops.h | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
+index 603aed229af78..46338f2360046 100644
+--- a/arch/powerpc/include/asm/bitops.h
++++ b/arch/powerpc/include/asm/bitops.h
+@@ -217,15 +217,34 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+  */
+ static __inline__ int fls(unsigned int x)
+ {
+-      return 32 - __builtin_clz(x);
++      int lz;
++
++      if (__builtin_constant_p(x))
++              return x ? 32 - __builtin_clz(x) : 0;
++      asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
++      return 32 - lz;
+ }
+ #include <asm-generic/bitops/builtin-__fls.h>
++/*
++ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
++ * instruction; for 32-bit we use the generic version, which does two
++ * 32-bit fls calls.
++ */
++#ifdef CONFIG_PPC64
+ static __inline__ int fls64(__u64 x)
+ {
+-      return 64 - __builtin_clzll(x);
++      int lz;
++
++      if (__builtin_constant_p(x))
++              return x ? 64 - __builtin_clzll(x) : 0;
++      asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
++      return 64 - lz;
+ }
++#else
++#include <asm-generic/bitops/fls64.h>
++#endif
+ #ifdef CONFIG_PPC64
+ unsigned int __arch_hweight8(unsigned int w);
+-- 
+2.27.0
+
index d55191dc9cd77ac9c776166aa29c0262a531559e..019fc59e7affe17857d3708cf925a27d3be37474 100644 (file)
@@ -6,3 +6,12 @@ ubifs-prevent-creating-duplicate-encrypted-filenames.patch
 f2fs-prevent-creating-duplicate-encrypted-filenames.patch
 fscrypt-add-fscrypt_is_nokey_name.patch
 fscrypt-remove-kernel-internal-constants-from-uapi-header.patch
+vfio-pci-move-dummy_resources_list-init-in-vfio_pci_.patch
+btrfs-fix-race-when-defragmenting-leads-to-unnecessa.patch
+ext4-don-t-remount-read-only-with-errors-continue-on.patch
+kvm-x86-avoid-incorrect-writes-to-host-msr_ia32_spec.patch
+kvm-svm-relax-conditions-for-allowing-msr_ia32_spec_.patch
+kvm-x86-reinstate-vendor-agnostic-check-on-spec_ctrl.patch
+powerpc-bitops-fix-possible-undefined-behaviour-with.patch
+jffs2-allow-setting-rp_size-to-zero-during-remountin.patch
+jffs2-fix-null-pointer-dereference-in-rp_size-fs-opt.patch
diff --git a/queue-5.4/vfio-pci-move-dummy_resources_list-init-in-vfio_pci_.patch b/queue-5.4/vfio-pci-move-dummy_resources_list-init-in-vfio_pci_.patch
new file mode 100644 (file)
index 0000000..5fa32f2
--- /dev/null
@@ -0,0 +1,49 @@
+From 33062e0b9dbad3eda4ce2145e1157220d0e7c154 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Nov 2020 18:52:02 +0100
+Subject: vfio/pci: Move dummy_resources_list init in vfio_pci_probe()
+
+From: Eric Auger <eric.auger@redhat.com>
+
+[ Upstream commit 16b8fe4caf499ae8e12d2ab1b1324497e36a7b83 ]
+
+In case an error occurs in vfio_pci_enable() before the call to
+vfio_pci_probe_mmaps(), vfio_pci_disable() will  try to iterate
+on an uninitialized list and cause a kernel panic.
+
+Lets move to the initialization to vfio_pci_probe() to fix the
+issue.
+
+Signed-off-by: Eric Auger <eric.auger@redhat.com>
+Fixes: 05f0c03fbac1 ("vfio-pci: Allow to mmap sub-page MMIO BARs if the mmio page is exclusive")
+CC: Stable <stable@vger.kernel.org> # v4.7+
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 632653cd70e3b..2372e161cd5e8 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -114,8 +114,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
+       int bar;
+       struct vfio_pci_dummy_resource *dummy_res;
+-      INIT_LIST_HEAD(&vdev->dummy_resources_list);
+-
+       for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+               res = vdev->pdev->resource + bar;
+@@ -1606,6 +1604,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+       mutex_init(&vdev->igate);
+       spin_lock_init(&vdev->irqlock);
+       mutex_init(&vdev->ioeventfds_lock);
++      INIT_LIST_HEAD(&vdev->dummy_resources_list);
+       INIT_LIST_HEAD(&vdev->ioeventfds_list);
+       mutex_init(&vdev->vma_lock);
+       INIT_LIST_HEAD(&vdev->vma_list);
+-- 
+2.27.0
+