]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Feb 2021 12:52:39 +0000 (13:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Feb 2021 12:52:39 +0000 (13:52 +0100)
added patches:
btrfs-fix-lockdep-warning-due-to-seqcount_mutex-on-32bit-arch.patch
btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch
crypto-marvel-cesa-fix-tdma-descriptor-on-64-bit.patch
drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch
drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch
efi-apple-properties-reinstate-support-for-boolean-properties.patch
kvm-arm64-filter-out-v8.1-events-on-v8.0-hw.patch
kvm-forbid-the-use-of-tagged-userspace-addresses-for-memslots.patch
kvm-nsvm-cancel-kvm_req_get_nested_state_pages-on-nested-vmexit.patch
kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch
kvm-x86-allow-kvm_req_get_nested_state_pages-outside-guest-mode-for-vmx.patch
kvm-x86-get-smi-pending-status-correctly.patch
kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch
kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch

15 files changed:
queue-5.10/btrfs-fix-lockdep-warning-due-to-seqcount_mutex-on-32bit-arch.patch [new file with mode: 0644]
queue-5.10/btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch [new file with mode: 0644]
queue-5.10/crypto-marvel-cesa-fix-tdma-descriptor-on-64-bit.patch [new file with mode: 0644]
queue-5.10/drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch [new file with mode: 0644]
queue-5.10/drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch [new file with mode: 0644]
queue-5.10/efi-apple-properties-reinstate-support-for-boolean-properties.patch [new file with mode: 0644]
queue-5.10/kvm-arm64-filter-out-v8.1-events-on-v8.0-hw.patch [new file with mode: 0644]
queue-5.10/kvm-forbid-the-use-of-tagged-userspace-addresses-for-memslots.patch [new file with mode: 0644]
queue-5.10/kvm-nsvm-cancel-kvm_req_get_nested_state_pages-on-nested-vmexit.patch [new file with mode: 0644]
queue-5.10/kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch [new file with mode: 0644]
queue-5.10/kvm-x86-allow-kvm_req_get_nested_state_pages-outside-guest-mode-for-vmx.patch [new file with mode: 0644]
queue-5.10/kvm-x86-get-smi-pending-status-correctly.patch [new file with mode: 0644]
queue-5.10/kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch [new file with mode: 0644]
queue-5.10/kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/btrfs-fix-lockdep-warning-due-to-seqcount_mutex-on-32bit-arch.patch b/queue-5.10/btrfs-fix-lockdep-warning-due-to-seqcount_mutex-on-32bit-arch.patch
new file mode 100644 (file)
index 0000000..41cd6e5
--- /dev/null
@@ -0,0 +1,154 @@
+From c41ec4529d3448df8998950d7bada757a1b321cf Mon Sep 17 00:00:00 2001
+From: Su Yue <l@damenly.su>
+Date: Thu, 21 Jan 2021 19:39:10 +0800
+Subject: btrfs: fix lockdep warning due to seqcount_mutex on 32bit arch
+
+From: Su Yue <l@damenly.su>
+
+commit c41ec4529d3448df8998950d7bada757a1b321cf upstream.
+
+This effectively reverts commit d5c8238849e7 ("btrfs: convert
+data_seqcount to seqcount_mutex_t").
+
+While running fstests on 32 bits test box, many tests failed because of
+warnings in dmesg. One of those warnings (btrfs/003):
+
+  [66.441317] WARNING: CPU: 6 PID: 9251 at include/linux/seqlock.h:279 btrfs_remove_chunk+0x58b/0x7b0 [btrfs]
+  [66.441446] CPU: 6 PID: 9251 Comm: btrfs Tainted: G           O      5.11.0-rc4-custom+ #5
+  [66.441449] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ArchLinux 1.14.0-1 04/01/2014
+  [66.441451] EIP: btrfs_remove_chunk+0x58b/0x7b0 [btrfs]
+  [66.441472] EAX: 00000000 EBX: 00000001 ECX: c576070c EDX: c6b15803
+  [66.441475] ESI: 10000000 EDI: 00000000 EBP: c56fbcfc ESP: c56fbc70
+  [66.441477] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010246
+  [66.441481] CR0: 80050033 CR2: 05c8da20 CR3: 04b20000 CR4: 00350ed0
+  [66.441485] Call Trace:
+  [66.441510]  btrfs_relocate_chunk+0xb1/0x100 [btrfs]
+  [66.441529]  ? btrfs_lookup_block_group+0x17/0x20 [btrfs]
+  [66.441562]  btrfs_balance+0x8ed/0x13b0 [btrfs]
+  [66.441586]  ? btrfs_ioctl_balance+0x333/0x3c0 [btrfs]
+  [66.441619]  ? __this_cpu_preempt_check+0xf/0x11
+  [66.441643]  btrfs_ioctl_balance+0x333/0x3c0 [btrfs]
+  [66.441664]  ? btrfs_ioctl_get_supported_features+0x30/0x30 [btrfs]
+  [66.441683]  btrfs_ioctl+0x414/0x2ae0 [btrfs]
+  [66.441700]  ? __lock_acquire+0x35f/0x2650
+  [66.441717]  ? lockdep_hardirqs_on+0x87/0x120
+  [66.441720]  ? lockdep_hardirqs_on_prepare+0xd0/0x1e0
+  [66.441724]  ? call_rcu+0x2d3/0x530
+  [66.441731]  ? __might_fault+0x41/0x90
+  [66.441736]  ? kvm_sched_clock_read+0x15/0x50
+  [66.441740]  ? sched_clock+0x8/0x10
+  [66.441745]  ? sched_clock_cpu+0x13/0x180
+  [66.441750]  ? btrfs_ioctl_get_supported_features+0x30/0x30 [btrfs]
+  [66.441750]  ? btrfs_ioctl_get_supported_features+0x30/0x30 [btrfs]
+  [66.441768]  __ia32_sys_ioctl+0x165/0x8a0
+  [66.441773]  ? __this_cpu_preempt_check+0xf/0x11
+  [66.441785]  ? __might_fault+0x89/0x90
+  [66.441791]  __do_fast_syscall_32+0x54/0x80
+  [66.441796]  do_fast_syscall_32+0x32/0x70
+  [66.441801]  do_SYSENTER_32+0x15/0x20
+  [66.441805]  entry_SYSENTER_32+0x9f/0xf2
+  [66.441808] EIP: 0xab7b5549
+  [66.441814] EAX: ffffffda EBX: 00000003 ECX: c4009420 EDX: bfa91f5c
+  [66.441816] ESI: 00000003 EDI: 00000001 EBP: 00000000 ESP: bfa91e98
+  [66.441818] DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b EFLAGS: 00000292
+  [66.441833] irq event stamp: 42579
+  [66.441835] hardirqs last  enabled at (42585): [<c60eb065>] console_unlock+0x495/0x590
+  [66.441838] hardirqs last disabled at (42590): [<c60eafd5>] console_unlock+0x405/0x590
+  [66.441840] softirqs last  enabled at (41698): [<c601b76c>] call_on_stack+0x1c/0x60
+  [66.441843] softirqs last disabled at (41681): [<c601b76c>] call_on_stack+0x1c/0x60
+
+  ========================================================================
+  btrfs_remove_chunk+0x58b/0x7b0:
+  __seqprop_mutex_assert at linux/./include/linux/seqlock.h:279
+  (inlined by) btrfs_device_set_bytes_used at linux/fs/btrfs/volumes.h:212
+  (inlined by) btrfs_remove_chunk at linux/fs/btrfs/volumes.c:2994
+  ========================================================================
+
+The warning is produced by lockdep_assert_held() in
+__seqprop_mutex_assert() if CONFIG_LOCKDEP is enabled.
+And "olumes.c:2994 is btrfs_device_set_bytes_used() with mutex lock
+fs_info->chunk_mutex held already.
+
+After adding some debug prints, the cause was found that many
+__alloc_device() are called with NULL @fs_info (during scanning ioctl).
+Inside the function, btrfs_device_data_ordered_init() is expanded to
+seqcount_mutex_init().  In this scenario, its second
+parameter info->chunk_mutex  is &NULL->chunk_mutex which equals
+to offsetof(struct btrfs_fs_info, chunk_mutex) unexpectedly. Thus,
+seqcount_mutex_init() is called in wrong way. And later
+btrfs_device_get/set helpers trigger lockdep warnings.
+
+The device and filesystem object lifetimes are different and we'd have
+to synchronize initialization of the btrfs_device::data_seqcount with
+the fs_info, possibly using some additional synchronization. It would
+still not prevent concurrent access to the seqcount lock when it's used
+for read and initialization.
+
+Commit d5c8238849e7 ("btrfs: convert data_seqcount to seqcount_mutex_t")
+does not mention a particular problem being fixed so revert should not
+cause any harm and we'll get the lockdep warning fixed.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=210139
+Reported-by: Erhard F <erhard_f@mailbox.org>
+Fixes: d5c8238849e7 ("btrfs: convert data_seqcount to seqcount_mutex_t")
+CC: stable@vger.kernel.org # 5.10
+CC: Davidlohr Bueso <dbueso@suse.de>
+Signed-off-by: Su Yue <l@damenly.su>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/volumes.c |    2 +-
+ fs/btrfs/volumes.h |   11 ++++++-----
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -431,7 +431,7 @@ static struct btrfs_device *__alloc_devi
+       atomic_set(&dev->reada_in_flight, 0);
+       atomic_set(&dev->dev_stats_ccnt, 0);
+-      btrfs_device_data_ordered_init(dev, fs_info);
++      btrfs_device_data_ordered_init(dev);
+       INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+       extent_io_tree_init(fs_info, &dev->alloc_state,
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -39,10 +39,10 @@ struct btrfs_io_geometry {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ #include <linux/seqlock.h>
+ #define __BTRFS_NEED_DEVICE_DATA_ORDERED
+-#define btrfs_device_data_ordered_init(device, info)                          \
+-      seqcount_mutex_init(&device->data_seqcount, &info->chunk_mutex)
++#define btrfs_device_data_ordered_init(device)        \
++      seqcount_init(&device->data_seqcount)
+ #else
+-#define btrfs_device_data_ordered_init(device, info) do { } while (0)
++#define btrfs_device_data_ordered_init(device) do { } while (0)
+ #endif
+ #define BTRFS_DEV_STATE_WRITEABLE     (0)
+@@ -72,8 +72,7 @@ struct btrfs_device {
+       blk_status_t last_flush_error;
+ #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
+-      /* A seqcount_t with associated chunk_mutex (for lockdep) */
+-      seqcount_mutex_t data_seqcount;
++      seqcount_t data_seqcount;
+ #endif
+       /* the internal btrfs device id */
+@@ -164,9 +163,11 @@ btrfs_device_get_##name(const struct btr
+ static inline void                                                    \
+ btrfs_device_set_##name(struct btrfs_device *dev, u64 size)           \
+ {                                                                     \
++      preempt_disable();                                              \
+       write_seqcount_begin(&dev->data_seqcount);                      \
+       dev->name = size;                                               \
+       write_seqcount_end(&dev->data_seqcount);                        \
++      preempt_enable();                                               \
+ }
+ #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ #define BTRFS_DEVICE_GETSET_FUNCS(name)                                       \
diff --git a/queue-5.10/btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch b/queue-5.10/btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch
new file mode 100644 (file)
index 0000000..df554c9
--- /dev/null
@@ -0,0 +1,107 @@
+From 2f96e40212d435b328459ba6b3956395eed8fa9f Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Fri, 15 Jan 2021 16:26:17 -0500
+Subject: btrfs: fix possible free space tree corruption with online conversion
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 2f96e40212d435b328459ba6b3956395eed8fa9f upstream.
+
+While running btrfs/011 in a loop I would often ASSERT() while trying to
+add a new free space entry that already existed, or get an EEXIST while
+adding a new block to the extent tree, which is another indication of
+double allocation.
+
+This occurs because when we do the free space tree population, we create
+the new root and then populate the tree and commit the transaction.
+The problem is when you create a new root, the root node and commit root
+node are the same.  During this initial transaction commit we will run
+all of the delayed refs that were paused during the free space tree
+generation, and thus begin to cache block groups.  While caching block
+groups the caching thread will be reading from the main root for the
+free space tree, so as we make allocations we'll be changing the free
+space tree, which can cause us to add the same range twice which results
+in either the ASSERT(ret != -EEXIST); in __btrfs_add_free_space, or in a
+variety of different errors when running delayed refs because of a
+double allocation.
+
+Fix this by marking the fs_info as unsafe to load the free space tree,
+and fall back on the old slow method.  We could be smarter than this,
+for example caching the block group while we're populating the free
+space tree, but since this is a serious problem I've opted for the
+simplest solution.
+
+CC: stable@vger.kernel.org # 4.9+
+Fixes: a5ed91828518 ("Btrfs: implement the free space B-tree")
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/block-group.c     |   10 +++++++++-
+ fs/btrfs/ctree.h           |    3 +++
+ fs/btrfs/free-space-tree.c |   10 +++++++++-
+ 3 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -639,7 +639,15 @@ static noinline void caching_thread(stru
+       mutex_lock(&caching_ctl->mutex);
+       down_read(&fs_info->commit_root_sem);
+-      if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
++      /*
++       * If we are in the transaction that populated the free space tree we
++       * can't actually cache from the free space tree as our commit root and
++       * real root are the same, so we could change the contents of the blocks
++       * while caching.  Instead do the slow caching in this case, and after
++       * the transaction has committed we will be safe.
++       */
++      if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++          !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
+               ret = load_free_space_tree(caching_ctl);
+       else
+               ret = load_extent_tree_free(caching_ctl);
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -146,6 +146,9 @@ enum {
+       BTRFS_FS_STATE_DEV_REPLACING,
+       /* The btrfs_fs_info created for self-tests */
+       BTRFS_FS_STATE_DUMMY_FS_INFO,
++
++      /* Indicate that we can't trust the free space tree for caching yet */
++      BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
+ };
+ #define BTRFS_BACKREF_REV_MAX         256
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1152,6 +1152,7 @@ int btrfs_create_free_space_tree(struct
+               return PTR_ERR(trans);
+       set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++      set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+       free_space_root = btrfs_create_tree(trans,
+                                           BTRFS_FREE_SPACE_TREE_OBJECTID);
+       if (IS_ERR(free_space_root)) {
+@@ -1173,11 +1174,18 @@ int btrfs_create_free_space_tree(struct
+       btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+       btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
+       clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++      ret = btrfs_commit_transaction(trans);
+-      return btrfs_commit_transaction(trans);
++      /*
++       * Now that we've committed the transaction any reading of our commit
++       * root will be safe, so we can cache from the free space tree now.
++       */
++      clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++      return ret;
+ abort:
+       clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++      clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+       btrfs_abort_transaction(trans, ret);
+       btrfs_end_transaction(trans);
+       return ret;
diff --git a/queue-5.10/crypto-marvel-cesa-fix-tdma-descriptor-on-64-bit.patch b/queue-5.10/crypto-marvel-cesa-fix-tdma-descriptor-on-64-bit.patch
new file mode 100644 (file)
index 0000000..cb7ce88
--- /dev/null
@@ -0,0 +1,39 @@
+From 4f6543f28bb05433d87b6de6c21e9c14c35ecf33 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Wed, 20 Jan 2021 16:40:45 +1100
+Subject: crypto: marvel/cesa - Fix tdma descriptor on 64-bit
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 4f6543f28bb05433d87b6de6c21e9c14c35ecf33 upstream.
+
+The patch that added src_dma/dst_dma to struct mv_cesa_tdma_desc
+is broken on 64-bit systems as the size of the descriptor has been
+changed.  This patch fixes it by using u32 instead of dma_addr_t.
+
+Fixes: e62291c1d9f4 ("crypto: marvell/cesa - Fix sparse warnings")
+Cc: <stable@vger.kernel.org>
+Reported-by: Sven Auhagen <sven.auhagen@voleatech.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/marvell/cesa/cesa.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/marvell/cesa/cesa.h
++++ b/drivers/crypto/marvell/cesa/cesa.h
+@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
+       __le32 byte_cnt;
+       union {
+               __le32 src;
+-              dma_addr_t src_dma;
++              u32 src_dma;
+       };
+       union {
+               __le32 dst;
+-              dma_addr_t dst_dma;
++              u32 dst_dma;
+       };
+       __le32 next_dma;
diff --git a/queue-5.10/drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch b/queue-5.10/drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch
new file mode 100644 (file)
index 0000000..8fb9522
--- /dev/null
@@ -0,0 +1,37 @@
+From 680896556805d3ad3fa47f6002b87b3041a45ac2 Mon Sep 17 00:00:00 2001
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+Date: Fri, 22 Jan 2021 14:21:34 +0200
+Subject: drivers: soc: atmel: add null entry at the end of at91_soc_allowed_list[]
+
+From: Claudiu Beznea <claudiu.beznea@microchip.com>
+
+commit 680896556805d3ad3fa47f6002b87b3041a45ac2 upstream.
+
+of_match_node() calls __of_match_node() which loops though the entries of
+matches array. It stops when condition:
+(matches->name[0] || matches->type[0] || matches->compatible[0]) is
+false. Thus, add a null entry at the end of at91_soc_allowed_list[]
+array.
+
+Fixes: caab13b49604 ("drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs")
+Cc: stable@vger.kernel.org #4.12+
+Signed-off-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/atmel/soc.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -269,7 +269,8 @@ static const struct of_device_id at91_so
+       { .compatible = "atmel,at91rm9200", },
+       { .compatible = "atmel,at91sam9", },
+       { .compatible = "atmel,sama5", },
+-      { .compatible = "atmel,samv7", }
++      { .compatible = "atmel,samv7", },
++      { }
+ };
+ static int __init atmel_soc_device_init(void)
diff --git a/queue-5.10/drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch b/queue-5.10/drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch
new file mode 100644 (file)
index 0000000..2addb1d
--- /dev/null
@@ -0,0 +1,52 @@
+From caab13b4960416b9fee83169a758eb0f31e65109 Mon Sep 17 00:00:00 2001
+From: Sudeep Holla <sudeep.holla@arm.com>
+Date: Fri, 11 Dec 2020 13:58:46 +0000
+Subject: drivers: soc: atmel: Avoid calling at91_soc_init on non AT91 SoCs
+
+From: Sudeep Holla <sudeep.holla@arm.com>
+
+commit caab13b4960416b9fee83169a758eb0f31e65109 upstream.
+
+Since at91_soc_init is called unconditionally from atmel_soc_device_init,
+we get the following warning on all non AT91 SoCs:
+       " AT91: Could not find identification node"
+
+Fix the same by filtering with allowed AT91 SoC list.
+
+Cc: Nicolas Ferre <nicolas.ferre@microchip.com>
+Cc: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Cc: Ludovic Desroches <ludovic.desroches@microchip.com>
+Cc: stable@vger.kernel.org #4.12+
+Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/20201211135846.1334322-1-sudeep.holla@arm.com
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/soc/atmel/soc.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/soc/atmel/soc.c
++++ b/drivers/soc/atmel/soc.c
+@@ -265,8 +265,20 @@ struct soc_device * __init at91_soc_init
+       return soc_dev;
+ }
++static const struct of_device_id at91_soc_allowed_list[] __initconst = {
++      { .compatible = "atmel,at91rm9200", },
++      { .compatible = "atmel,at91sam9", },
++      { .compatible = "atmel,sama5", },
++      { .compatible = "atmel,samv7", }
++};
++
+ static int __init atmel_soc_device_init(void)
+ {
++      struct device_node *np = of_find_node_by_path("/");
++
++      if (!of_match_node(at91_soc_allowed_list, np))
++              return 0;
++
+       at91_soc_init(socs);
+       return 0;
diff --git a/queue-5.10/efi-apple-properties-reinstate-support-for-boolean-properties.patch b/queue-5.10/efi-apple-properties-reinstate-support-for-boolean-properties.patch
new file mode 100644 (file)
index 0000000..2331883
--- /dev/null
@@ -0,0 +1,73 @@
+From 355845b738e76445c8522802552146d96cb4afa7 Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Thu, 31 Dec 2020 06:10:32 +0100
+Subject: efi/apple-properties: Reinstate support for boolean properties
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 355845b738e76445c8522802552146d96cb4afa7 upstream.
+
+Since commit 4466bf82821b ("efi/apple-properties: use
+PROPERTY_ENTRY_U8_ARRAY_LEN"), my MacBook Pro issues a -ENODATA error
+when trying to assign EFI properties to the discrete GPU:
+
+pci 0000:01:00.0: assigning 56 device properties
+pci 0000:01:00.0: error -61 assigning properties
+
+That's because some of the properties have no value.  They're booleans
+whose presence can be checked by drivers, e.g. "use-backlight-blanking".
+
+Commit 6e98503dba64 ("efi/apple-properties: Remove redundant attribute
+initialization from unmarshal_key_value_pairs()") employed a trick to
+store such booleans as u8 arrays (which is the data type used for all
+other EFI properties on Macs):  It cleared the property_entry's
+"is_array" flag, thereby denoting that the value is stored inline in the
+property_entry.
+
+Commit 4466bf82821b erroneously removed that trick.  It was probably a
+little fragile to begin with.
+
+Reinstate support for boolean properties by explicitly invoking the
+PROPERTY_ENTRY_BOOL() initializer for properties with zero-length value.
+
+Fixes: 4466bf82821b ("efi/apple-properties: use PROPERTY_ENTRY_U8_ARRAY_LEN")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Link: https://lore.kernel.org/r/be958bda75331a011d53c696d1deec8dccd06fd2.1609388549.git.lukas@wunner.de
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/firmware/efi/apple-properties.c |   13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/firmware/efi/apple-properties.c
++++ b/drivers/firmware/efi/apple-properties.c
+@@ -3,8 +3,9 @@
+  * apple-properties.c - EFI device properties on Macs
+  * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+  *
+- * Note, all properties are considered as u8 arrays.
+- * To get a value of any of them the caller must use device_property_read_u8_array().
++ * Properties are stored either as:
++ * u8 arrays which can be retrieved with device_property_read_u8_array() or
++ * booleans which can be queried with device_property_present().
+  */
+ #define pr_fmt(fmt) "apple-properties: " fmt
+@@ -88,8 +89,12 @@ static void __init unmarshal_key_value_p
+               entry_data = ptr + key_len + sizeof(val_len);
+               entry_len = val_len - sizeof(val_len);
+-              entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+-                                                     entry_len);
++              if (entry_len)
++                      entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
++                                                             entry_len);
++              else
++                      entry[i] = PROPERTY_ENTRY_BOOL(key);
++
+               if (dump_properties) {
+                       dev_info(dev, "property: %s\n", key);
+                       print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
diff --git a/queue-5.10/kvm-arm64-filter-out-v8.1-events-on-v8.0-hw.patch b/queue-5.10/kvm-arm64-filter-out-v8.1-events-on-v8.0-hw.patch
new file mode 100644 (file)
index 0000000..f4cac2c
--- /dev/null
@@ -0,0 +1,53 @@
+From 9529aaa056edc76b3a41df616c71117ebe11e049 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 21 Jan 2021 10:56:36 +0000
+Subject: KVM: arm64: Filter out v8.1+ events on v8.0 HW
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 9529aaa056edc76b3a41df616c71117ebe11e049 upstream.
+
+When running on v8.0 HW, make sure we don't try to advertise
+events in the 0x4000-0x403f range.
+
+Cc: stable@vger.kernel.org
+Fixes: 88865beca9062 ("KVM: arm64: Mask out filtered events in PCMEID{0,1}_EL1")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20210121105636.1478491-1-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/pmu-emul.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *
+ {
+       unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
+       u64 val, mask = 0;
+-      int base, i;
++      int base, i, nr_events;
+       if (!pmceid1) {
+               val = read_sysreg(pmceid0_el0);
+@@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *
+       if (!bmap)
+               return val;
++      nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
++
+       for (i = 0; i < 32; i += 8) {
+               u64 byte;
+               byte = bitmap_get_value8(bmap, base + i);
+               mask |= byte << i;
+-              byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+-              mask |= byte << (32 + i);
++              if (nr_events >= (0x4000 + base + 32)) {
++                      byte = bitmap_get_value8(bmap, 0x4000 + base + i);
++                      mask |= byte << (32 + i);
++              }
+       }
+       return val & mask;
diff --git a/queue-5.10/kvm-forbid-the-use-of-tagged-userspace-addresses-for-memslots.patch b/queue-5.10/kvm-forbid-the-use-of-tagged-userspace-addresses-for-memslots.patch
new file mode 100644 (file)
index 0000000..8afd5cf
--- /dev/null
@@ -0,0 +1,47 @@
+From 139bc8a6146d92822c866cf2fd410159c56b3648 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 21 Jan 2021 12:08:15 +0000
+Subject: KVM: Forbid the use of tagged userspace addresses for memslots
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 139bc8a6146d92822c866cf2fd410159c56b3648 upstream.
+
+The use of a tagged address could be pretty confusing for the
+whole memslot infrastructure as well as the MMU notifiers.
+
+Forbid it altogether, as it never quite worked the first place.
+
+Cc: stable@vger.kernel.org
+Reported-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/virt/kvm/api.rst |    3 +++
+ virt/kvm/kvm_main.c            |    1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -1264,6 +1264,9 @@ field userspace_addr, which must point a
+ the entire memory slot size.  Any object may back this memory, including
+ anonymous memory, ordinary files, and hugetlbfs.
++On architectures that support a form of address tagging, userspace_addr must
++be an untagged address.
++
+ It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
+ be identical.  This allows large pages in the guest to be backed by large
+ pages in the host.
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1289,6 +1289,7 @@ int __kvm_set_memory_region(struct kvm *
+               return -EINVAL;
+       /* We can read the guest memory with __xxx_user() later on. */
+       if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
++          (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
+            !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+                       mem->memory_size))
+               return -EINVAL;
diff --git a/queue-5.10/kvm-nsvm-cancel-kvm_req_get_nested_state_pages-on-nested-vmexit.patch b/queue-5.10/kvm-nsvm-cancel-kvm_req_get_nested_state_pages-on-nested-vmexit.patch
new file mode 100644 (file)
index 0000000..f3c341d
--- /dev/null
@@ -0,0 +1,72 @@
+From f2c7ef3ba9556d62a7e2bb23b563c6510007d55c Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 7 Jan 2021 11:38:51 +0200
+Subject: KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES on nested vmexit
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit f2c7ef3ba9556d62a7e2bb23b563c6510007d55c upstream.
+
+It is possible to exit the nested guest mode, entered by
+svm_set_nested_state prior to first vm entry to it (e.g due to pending event)
+if the nested run was not pending during the migration.
+
+In this case we must not switch to the nested msr permission bitmap.
+Also add a warning to catch similar cases in the future.
+
+Fixes: a7d5c7ce41ac1 ("KVM: nSVM: delay MSR permission processing to first nested VM run")
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20210107093854.882483-2-mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm/nested.c |    3 +++
+ arch/x86/kvm/vmx/nested.c |    2 ++
+ arch/x86/kvm/x86.c        |    4 +++-
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -199,6 +199,7 @@ static bool nested_svm_vmrun_msrpm(struc
+ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++
+       if (!nested_svm_vmrun_msrpm(svm)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror =
+@@ -595,6 +596,8 @@ int nested_svm_vmexit(struct vcpu_svm *s
+       svm->nested.vmcb12_gpa = 0;
+       WARN_ON_ONCE(svm->nested.nested_run_pending);
++      kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
++
+       /* in case we halted in L2 */
+       svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4416,6 +4416,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *
+       /* trying to cancel vmlaunch/vmresume is a bug */
+       WARN_ON_ONCE(vmx->nested.nested_run_pending);
++      kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
++
+       /* Service the TLB flush request for L2 before switching to L1. */
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+               kvm_vcpu_flush_tlb_current(vcpu);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8750,7 +8750,9 @@ static int vcpu_enter_guest(struct kvm_v
+       if (kvm_request_pending(vcpu)) {
+               if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+-                      if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
++                      if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
++                              ;
++                      else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                               r = 0;
+                               goto out;
+                       }
diff --git a/queue-5.10/kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch b/queue-5.10/kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch
new file mode 100644 (file)
index 0000000..475e702
--- /dev/null
@@ -0,0 +1,53 @@
+From d51e1d3f6b4236e0352407d8a63f5c5f71ce193d Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Thu, 14 Jan 2021 22:54:47 +0200
+Subject: KVM: nVMX: Sync unsync'd vmcs02 state to vmcs12 on migration
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit d51e1d3f6b4236e0352407d8a63f5c5f71ce193d upstream.
+
+Even when we are outside the nested guest, some vmcs02 fields
+may not be in sync vs vmcs12.  This is intentional, even across
+nested VM-exit, because the sync can be delayed until the nested
+hypervisor performs a VMCLEAR or a VMREAD/VMWRITE that affects those
+rarely accessed fields.
+
+However, during KVM_GET_NESTED_STATE, the vmcs12 has to be up to date to
+be able to restore it.  To fix that, call copy_vmcs02_to_vmcs12_rare()
+before the vmcs12 contents are copied to userspace.
+
+Fixes: 7952d769c29ca ("KVM: nVMX: Sync rarely accessed guest fields only when needed")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20210114205449.8715-2-mlevitsk@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/nested.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -6070,11 +6070,14 @@ static int vmx_get_nested_state(struct k
+       if (is_guest_mode(vcpu)) {
+               sync_vmcs02_to_vmcs12(vcpu, vmcs12);
+               sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
+-      } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+-              if (vmx->nested.hv_evmcs)
+-                      copy_enlightened_to_vmcs12(vmx);
+-              else if (enable_shadow_vmcs)
+-                      copy_shadow_to_vmcs12(vmx);
++      } else  {
++              copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
++              if (!vmx->nested.need_vmcs12_to_shadow_sync) {
++                      if (vmx->nested.hv_evmcs)
++                              copy_enlightened_to_vmcs12(vmx);
++                      else if (enable_shadow_vmcs)
++                              copy_shadow_to_vmcs12(vmx);
++              }
+       }
+       BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
diff --git a/queue-5.10/kvm-x86-allow-kvm_req_get_nested_state_pages-outside-guest-mode-for-vmx.patch b/queue-5.10/kvm-x86-allow-kvm_req_get_nested_state_pages-outside-guest-mode-for-vmx.patch
new file mode 100644 (file)
index 0000000..92a12f7
--- /dev/null
@@ -0,0 +1,117 @@
+From 9a78e15802a87de2b08dfd1bd88e855201d2c8fa Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 8 Jan 2021 11:43:08 -0500
+Subject: KVM: x86: allow KVM_REQ_GET_NESTED_STATE_PAGES outside guest mode for VMX
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9a78e15802a87de2b08dfd1bd88e855201d2c8fa upstream.
+
+VMX also uses KVM_REQ_GET_NESTED_STATE_PAGES for the Hyper-V eVMCS,
+which may need to be loaded outside guest mode.  Therefore we cannot
+WARN in that case.
+
+However, that part of nested_get_vmcs12_pages is _not_ needed at
+vmentry time.  Split it out of KVM_REQ_GET_NESTED_STATE_PAGES handling,
+so that both vmentry and migration (and in the latter case, independent
+of is_guest_mode) do the parts that are needed.
+
+Cc: <stable@vger.kernel.org> # 5.10.x: f2c7ef3ba: KVM: nSVM: cancel KVM_REQ_GET_NESTED_STATE_PAGES
+Cc: <stable@vger.kernel.org> # 5.10.x
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm/nested.c |    3 +++
+ arch/x86/kvm/vmx/nested.c |   31 +++++++++++++++++++++++++------
+ arch/x86/kvm/x86.c        |    4 +---
+ 3 files changed, 29 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(s
+ {
+       struct vcpu_svm *svm = to_svm(vcpu);
++      if (WARN_ON(!is_guest_mode(vcpu)))
++              return true;
++
+       if (!nested_svm_vmrun_msrpm(svm)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror =
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3123,13 +3123,9 @@ static int nested_vmx_check_vmentry_hw(s
+       return 0;
+ }
+-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
++static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
+ {
+-      struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+-      struct kvm_host_map *map;
+-      struct page *page;
+-      u64 hpa;
+       /*
+        * hv_evmcs may end up being not mapped after migration (when
+@@ -3152,6 +3148,17 @@ static bool nested_get_vmcs12_pages(stru
+               }
+       }
++      return true;
++}
++
++static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
++{
++      struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++      struct vcpu_vmx *vmx = to_vmx(vcpu);
++      struct kvm_host_map *map;
++      struct page *page;
++      u64 hpa;
++
+       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+               /*
+                * Translate L1 physical address to host physical
+@@ -3220,6 +3227,18 @@ static bool nested_get_vmcs12_pages(stru
+               exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+       else
+               exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
++
++      return true;
++}
++
++static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
++{
++      if (!nested_get_evmcs_page(vcpu))
++              return false;
++
++      if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
++              return false;
++
+       return true;
+ }
+@@ -6575,7 +6594,7 @@ struct kvm_x86_nested_ops vmx_nested_ops
+       .hv_timer_pending = nested_vmx_preemption_timer_pending,
+       .get_state = vmx_get_nested_state,
+       .set_state = vmx_set_nested_state,
+-      .get_nested_state_pages = nested_get_vmcs12_pages,
++      .get_nested_state_pages = vmx_get_nested_state_pages,
+       .write_log_dirty = nested_vmx_write_pml_buffer,
+       .enable_evmcs = nested_enable_evmcs,
+       .get_evmcs_version = nested_get_evmcs_version,
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8750,9 +8750,7 @@ static int vcpu_enter_guest(struct kvm_v
+       if (kvm_request_pending(vcpu)) {
+               if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+-                      if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+-                              ;
+-                      else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
++                      if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                               r = 0;
+                               goto out;
+                       }
diff --git a/queue-5.10/kvm-x86-get-smi-pending-status-correctly.patch b/queue-5.10/kvm-x86-get-smi-pending-status-correctly.patch
new file mode 100644 (file)
index 0000000..c7abfae
--- /dev/null
@@ -0,0 +1,65 @@
+From 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 Mon Sep 17 00:00:00 2001
+From: Jay Zhou <jianjay.zhou@huawei.com>
+Date: Mon, 18 Jan 2021 16:47:20 +0800
+Subject: KVM: x86: get smi pending status correctly
+
+From: Jay Zhou <jianjay.zhou@huawei.com>
+
+commit 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 upstream.
+
+The injection process of smi has two steps:
+
+    Qemu                        KVM
+Step1:
+    cpu->interrupt_request &= \
+        ~CPU_INTERRUPT_SMI;
+    kvm_vcpu_ioctl(cpu, KVM_SMI)
+
+                                call kvm_vcpu_ioctl_smi() and
+                                kvm_make_request(KVM_REQ_SMI, vcpu);
+
+Step2:
+    kvm_vcpu_ioctl(cpu, KVM_RUN, 0)
+
+                                call process_smi() if
+                                kvm_check_request(KVM_REQ_SMI, vcpu) is
+                                true, mark vcpu->arch.smi_pending = true;
+
+The vcpu->arch.smi_pending will be set true in step2, unfortunately if
+vcpu paused between step1 and step2, the kvm_run->immediate_exit will be
+set and vcpu has to exit to Qemu immediately during step2 before mark
+vcpu->arch.smi_pending true.
+During VM migration, Qemu will get the smi pending status from KVM using
+KVM_GET_VCPU_EVENTS ioctl at the downtime, then the smi pending status
+will be lost.
+
+Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
+Signed-off-by: Shengen Zhuang <zhuangshengen@huawei.com>
+Message-Id: <20210118084720.1585-1-jianjay.zhou@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bi
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
++static void process_smi(struct kvm_vcpu *vcpu);
+ static void enter_smm(struct kvm_vcpu *vcpu);
+ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+ static void store_regs(struct kvm_vcpu *vcpu);
+@@ -4199,6 +4200,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_
+ {
+       process_nmi(vcpu);
++      if (kvm_check_request(KVM_REQ_SMI, vcpu))
++              process_smi(vcpu);
++
+       /*
+        * In guest mode, payload delivery should be deferred,
+        * so that the L1 hypervisor can intercept #PF before
diff --git a/queue-5.10/kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch b/queue-5.10/kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch
new file mode 100644 (file)
index 0000000..d1f90b3
--- /dev/null
@@ -0,0 +1,35 @@
+From 98dd2f108e448988d91e296173e773b06fb978b8 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Wed, 30 Dec 2020 16:19:16 +0800
+Subject: KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[]
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit 98dd2f108e448988d91e296173e773b06fb978b8 upstream.
+
+The HW_REF_CPU_CYCLES event on the fixed counter 2 is pseudo-encoded as
+0x0300 in the intel_perfmon_event_map[]. Correct its usage.
+
+Fixes: 62079d8a4312 ("KVM: PMU: add proper support for fixed counter 2")
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Message-Id: <20201230081916.63417-1-like.xu@linux.intel.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/pmu_intel.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping
+       [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+       [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+-      [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
++      [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
+ };
+ /* mapping between fixed pmc index and intel_arch_events array */
diff --git a/queue-5.10/kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch b/queue-5.10/kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch
new file mode 100644 (file)
index 0000000..f6c9016
--- /dev/null
@@ -0,0 +1,66 @@
+From e61ab2a320c3dfd6209efe18a575979e07470597 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Mon, 18 Jan 2021 10:58:00 +0800
+Subject: KVM: x86/pmu: Fix UBSAN shift-out-of-bounds warning in intel_pmu_refresh()
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit e61ab2a320c3dfd6209efe18a575979e07470597 upstream.
+
+Since we know vPMU will not work properly when (1) the guest bit_width(s)
+of the [gp|fixed] counters are greater than the host ones, or (2) guest
+requested architectural events exceeds the range supported by the host, so
+we can setup a smaller left shift value and refresh the guest cpuid entry,
+thus fixing the following UBSAN shift-out-of-bounds warning:
+
+shift exponent 197 is too large for 64-bit type 'long long unsigned int'
+
+Call Trace:
+ __dump_stack lib/dump_stack.c:79 [inline]
+ dump_stack+0x107/0x163 lib/dump_stack.c:120
+ ubsan_epilogue+0xb/0x5a lib/ubsan.c:148
+ __ubsan_handle_shift_out_of_bounds.cold+0xb1/0x181 lib/ubsan.c:395
+ intel_pmu_refresh.cold+0x75/0x99 arch/x86/kvm/vmx/pmu_intel.c:348
+ kvm_vcpu_after_set_cpuid+0x65a/0xf80 arch/x86/kvm/cpuid.c:177
+ kvm_vcpu_ioctl_set_cpuid2+0x160/0x440 arch/x86/kvm/cpuid.c:308
+ kvm_arch_vcpu_ioctl+0x11b6/0x2d70 arch/x86/kvm/x86.c:4709
+ kvm_vcpu_ioctl+0x7b9/0xdb0 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3386
+ vfs_ioctl fs/ioctl.c:48 [inline]
+ __do_sys_ioctl fs/ioctl.c:753 [inline]
+ __se_sys_ioctl fs/ioctl.c:739 [inline]
+ __x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
+ do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported-by: syzbot+ae488dc136a4cc6ba32b@syzkaller.appspotmail.com
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Message-Id: <20210118025800.34620-1-like.xu@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/pmu_intel.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm
+       pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
+                                        x86_pmu.num_counters_gp);
++      eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
+       pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
++      eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
+       pmu->available_event_types = ~entry->ebx &
+                                       ((1ull << eax.split.mask_length) - 1);
+@@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm
+               pmu->nr_arch_fixed_counters =
+                       min_t(int, edx.split.num_counters_fixed,
+                             x86_pmu.num_counters_fixed);
++              edx.split.bit_width_fixed = min_t(int,
++                      edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
+               pmu->counter_bitmask[KVM_PMC_FIXED] =
+                       ((u64)1 << edx.split.bit_width_fixed) - 1;
+       }
index 4dd9a5de65a0db2a6aeb23067ccf2dbb6014db51..c9dbe881254f153b9417ec9e69b76f5e068e2097 100644 (file)
@@ -30,3 +30,17 @@ s390-uv-fix-sysfs-max-number-of-vcpus-reporting.patch
 s390-vfio-ap-no-need-to-disable-irq-after-queue-reset.patch
 pm-hibernate-flush-swap-writer-after-marking.patch
 x86-entry-emit-a-symbol-for-register-restoring-thunk.patch
+efi-apple-properties-reinstate-support-for-boolean-properties.patch
+crypto-marvel-cesa-fix-tdma-descriptor-on-64-bit.patch
+drivers-soc-atmel-avoid-calling-at91_soc_init-on-non-at91-socs.patch
+drivers-soc-atmel-add-null-entry-at-the-end-of-at91_soc_allowed_list.patch
+btrfs-fix-lockdep-warning-due-to-seqcount_mutex-on-32bit-arch.patch
+btrfs-fix-possible-free-space-tree-corruption-with-online-conversion.patch
+kvm-x86-pmu-fix-hw_ref_cpu_cycles-event-pseudo-encoding-in-intel_arch_events.patch
+kvm-x86-pmu-fix-ubsan-shift-out-of-bounds-warning-in-intel_pmu_refresh.patch
+kvm-arm64-filter-out-v8.1-events-on-v8.0-hw.patch
+kvm-nsvm-cancel-kvm_req_get_nested_state_pages-on-nested-vmexit.patch
+kvm-x86-allow-kvm_req_get_nested_state_pages-outside-guest-mode-for-vmx.patch
+kvm-nvmx-sync-unsync-d-vmcs02-state-to-vmcs12-on-migration.patch
+kvm-x86-get-smi-pending-status-correctly.patch
+kvm-forbid-the-use-of-tagged-userspace-addresses-for-memslots.patch