]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.15
authorSasha Levin <sashal@kernel.org>
Sat, 6 Aug 2022 15:31:10 +0000 (11:31 -0400)
committerSasha Levin <sashal@kernel.org>
Sat, 6 Aug 2022 15:31:10 +0000 (11:31 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.15/kvm-selftests-make-hyperv_clock-selftest-more-stable.patch [new file with mode: 0644]
queue-5.15/kvm-x86-do-not-report-a-vcpu-as-preempted-outside-in.patch [new file with mode: 0644]
queue-5.15/kvm-x86-do-not-set-st-preempted-when-going-back-to-u.patch [new file with mode: 0644]
queue-5.15/kvm-x86-svm-add-__gfp_account-to-__sev_dbg_-en-de-cr.patch [new file with mode: 0644]
queue-5.15/selftests-kvm-handle-compiler-optimizations-in-ucall.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/tools-kvm_stat-fix-display-of-error-when-multiple-pr.patch [new file with mode: 0644]

diff --git a/queue-5.15/kvm-selftests-make-hyperv_clock-selftest-more-stable.patch b/queue-5.15/kvm-selftests-make-hyperv_clock-selftest-more-stable.patch
new file mode 100644 (file)
index 0000000..8de6455
--- /dev/null
@@ -0,0 +1,75 @@
+From 751876953b7f9686f73d96d8686f1ae06bf84d54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Jun 2022 16:43:22 +0200
+Subject: KVM: selftests: Make hyperv_clock selftest more stable
+
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+
+[ Upstream commit eae260be3a0111a28fe95923e117a55dddec0384 ]
+
+hyperv_clock doesn't always give a stable test result, especially with
+AMD CPUs. The test compares Hyper-V MSR clocksource (acquired either
+with rdmsr() from within the guest or KVM_GET_MSRS from the host)
+against rdtsc(). To increase the accuracy, increase the measured delay
+(done with nop loop) by two orders of magnitude and take the mean rdtsc()
+value before and after rdmsr()/KVM_GET_MSRS.
+
+Reported-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Tested-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20220601144322.1968742-1-vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kvm/x86_64/hyperv_clock.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+index e0b2bb1339b1..3330fb183c68 100644
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+@@ -44,7 +44,7 @@ static inline void nop_loop(void)
+ {
+       int i;
+-      for (i = 0; i < 1000000; i++)
++      for (i = 0; i < 100000000; i++)
+               asm volatile("nop");
+ }
+@@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void)
+       tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
+       GUEST_ASSERT(tsc_freq > 0);
+-      /* First, check MSR-based clocksource */
++      /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
+       r1 = rdtsc();
+       t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
++      r1 = (r1 + rdtsc()) / 2;
+       nop_loop();
+       r2 = rdtsc();
+       t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
++      r2 = (r2 + rdtsc()) / 2;
+       GUEST_ASSERT(r2 > r1 && t2 > t1);
+@@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
+       tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
+       TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
+-      /* First, check MSR-based clocksource */
++      /* For increased accuracy, take mean rdtsc() before and afrer ioctl */
+       r1 = rdtsc();
+       t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
++      r1 = (r1 + rdtsc()) / 2;
+       nop_loop();
+       r2 = rdtsc();
+       t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
++      r2 = (r2 + rdtsc()) / 2;
+       TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
+-- 
+2.35.1
+
diff --git a/queue-5.15/kvm-x86-do-not-report-a-vcpu-as-preempted-outside-in.patch b/queue-5.15/kvm-x86-do-not-report-a-vcpu-as-preempted-outside-in.patch
new file mode 100644 (file)
index 0000000..2a1b971
--- /dev/null
@@ -0,0 +1,136 @@
+From 3e704ddf65ff462fdb8dcfb79bf0ef3f4568a8df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 10:09:03 -0400
+Subject: KVM: x86: do not report a vCPU as preempted outside instruction
+ boundaries
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 6cd88243c7e03845a450795e134b488fc2afb736 ]
+
+If a vCPU is outside guest mode and is scheduled out, it might be in the
+process of making a memory access.  A problem occurs if another vCPU uses
+the PV TLB flush feature during the period when the vCPU is scheduled
+out, and a virtual address has already been translated but has not yet
+been accessed, because this is equivalent to using a stale TLB entry.
+
+To avoid this, only report a vCPU as preempted if sure that the guest
+is at an instruction boundary.  A rescheduling request will be delivered
+to the host physical CPU as an external interrupt, so for simplicity
+consider any vmexit *not* instruction boundary except for external
+interrupts.
+
+It would in principle be okay to report the vCPU as preempted also
+if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
+vmentry/vmexit overhead unnecessarily, and optimistic spinning is
+also unlikely to succeed.  However, leave it for later because right
+now kvm_vcpu_check_block() is doing memory accesses.  Even
+though the TLB flush issue only applies to virtual memory address,
+it's very much preferrable to be conservative.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/kvm_host.h |  3 +++
+ arch/x86/kvm/svm/svm.c          |  2 ++
+ arch/x86/kvm/vmx/vmx.c          |  1 +
+ arch/x86/kvm/x86.c              | 22 ++++++++++++++++++++++
+ 4 files changed, 28 insertions(+)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 49d814b2a341..a35f5e23fc2a 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -642,6 +642,7 @@ struct kvm_vcpu_arch {
+       u64 ia32_misc_enable_msr;
+       u64 smbase;
+       u64 smi_count;
++      bool at_instruction_boundary;
+       bool tpr_access_reporting;
+       bool xsaves_enabled;
+       u64 ia32_xss;
+@@ -1271,6 +1272,8 @@ struct kvm_vcpu_stat {
+       u64 nested_run;
+       u64 directed_yield_attempted;
+       u64 directed_yield_successful;
++      u64 preemption_reported;
++      u64 preemption_other;
+       u64 guest_mode;
+ };
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 26f2da1590ed..5b51156712f7 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4263,6 +4263,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
+ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+ {
++      if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
++              vcpu->arch.at_instruction_boundary = true;
+ }
+ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index a236104fc743..359292767e17 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6471,6 +6471,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+               return;
+       handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
++      vcpu->arch.at_instruction_boundary = true;
+ }
+ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index bd410926fda5..b2436796e03c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -277,6 +277,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+       STATS_DESC_COUNTER(VCPU, nested_run),
+       STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
+       STATS_DESC_COUNTER(VCPU, directed_yield_successful),
++      STATS_DESC_COUNTER(VCPU, preemption_reported),
++      STATS_DESC_COUNTER(VCPU, preemption_other),
+       STATS_DESC_ICOUNTER(VCPU, guest_mode)
+ };
+@@ -4371,6 +4373,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+       struct kvm_memslots *slots;
+       static const u8 preempted = KVM_VCPU_PREEMPTED;
++      /*
++       * The vCPU can be marked preempted if and only if the VM-Exit was on
++       * an instruction boundary and will not trigger guest emulation of any
++       * kind (see vcpu_run).  Vendor specific code controls (conservatively)
++       * when this is true, for example allowing the vCPU to be marked
++       * preempted if and only if the VM-Exit was due to a host interrupt.
++       */
++      if (!vcpu->arch.at_instruction_boundary) {
++              vcpu->stat.preemption_other++;
++              return;
++      }
++
++      vcpu->stat.preemption_reported++;
+       if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+               return;
+@@ -9934,6 +9949,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
+       vcpu->arch.l1tf_flush_l1d = true;
+       for (;;) {
++              /*
++               * If another guest vCPU requests a PV TLB flush in the middle
++               * of instruction emulation, the rest of the emulation could
++               * use a stale page translation. Assume that any code after
++               * this point can start executing an instruction.
++               */
++              vcpu->arch.at_instruction_boundary = false;
+               if (kvm_vcpu_running(vcpu)) {
+                       r = vcpu_enter_guest(vcpu);
+               } else {
+-- 
+2.35.1
+
diff --git a/queue-5.15/kvm-x86-do-not-set-st-preempted-when-going-back-to-u.patch b/queue-5.15/kvm-x86-do-not-set-st-preempted-when-going-back-to-u.patch
new file mode 100644 (file)
index 0000000..211ce32
--- /dev/null
@@ -0,0 +1,81 @@
+From 3a2baf1cc573dfdb45c2aa09215c96707819ca33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jun 2022 10:07:11 -0400
+Subject: KVM: x86: do not set st->preempted when going back to user space
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 54aa83c90198e68eee8b0850c749bc70efb548da ]
+
+Similar to the Xen path, only change the vCPU's reported state if the vCPU
+was actually preempted.  The reason for KVM's behavior is that for example
+optimistic spinning might not be a good idea if the guest is doing repeated
+exits to userspace; however, it is confusing and unlikely to make a difference,
+because well-tuned guests will hardly ever exit KVM_RUN in the first place.
+
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 26 ++++++++++++++------------
+ arch/x86/kvm/xen.h |  6 ++++--
+ 2 files changed, 18 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b2436796e03c..8a6ee5d8adc7 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4415,19 +4415,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ {
+       int idx;
+-      if (vcpu->preempted && !vcpu->arch.guest_state_protected)
+-              vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
++      if (vcpu->preempted) {
++              if (!vcpu->arch.guest_state_protected)
++                      vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+-      /*
+-       * Take the srcu lock as memslots will be accessed to check the gfn
+-       * cache generation against the memslots generation.
+-       */
+-      idx = srcu_read_lock(&vcpu->kvm->srcu);
+-      if (kvm_xen_msr_enabled(vcpu->kvm))
+-              kvm_xen_runstate_set_preempted(vcpu);
+-      else
+-              kvm_steal_time_set_preempted(vcpu);
+-      srcu_read_unlock(&vcpu->kvm->srcu, idx);
++              /*
++               * Take the srcu lock as memslots will be accessed to check the gfn
++               * cache generation against the memslots generation.
++               */
++              idx = srcu_read_lock(&vcpu->kvm->srcu);
++              if (kvm_xen_msr_enabled(vcpu->kvm))
++                      kvm_xen_runstate_set_preempted(vcpu);
++              else
++                      kvm_steal_time_set_preempted(vcpu);
++              srcu_read_unlock(&vcpu->kvm->srcu, idx);
++      }
+       static_call(kvm_x86_vcpu_put)(vcpu);
+       vcpu->arch.last_host_tsc = rdtsc();
+diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
+index cc0cf5f37450..a7693a286e40 100644
+--- a/arch/x86/kvm/xen.h
++++ b/arch/x86/kvm/xen.h
+@@ -97,8 +97,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
+        * behalf of the vCPU. Only if the VMM does actually block
+        * does it need to enter RUNSTATE_blocked.
+        */
+-      if (vcpu->preempted)
+-              kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
++      if (WARN_ON_ONCE(!vcpu->preempted))
++              return;
++
++      kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
+ }
+ /* 32-bit compatibility definitions, also used natively in 32-bit build */
+-- 
+2.35.1
+
diff --git a/queue-5.15/kvm-x86-svm-add-__gfp_account-to-__sev_dbg_-en-de-cr.patch b/queue-5.15/kvm-x86-svm-add-__gfp_account-to-__sev_dbg_-en-de-cr.patch
new file mode 100644 (file)
index 0000000..72347f2
--- /dev/null
@@ -0,0 +1,47 @@
+From 140a123e10e8c366edbe3b618e472d7a0b919528 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jun 2022 17:18:58 +0000
+Subject: KVM: x86/svm: add __GFP_ACCOUNT to __sev_dbg_{en,de}crypt_user()
+
+From: Mingwei Zhang <mizhang@google.com>
+
+[ Upstream commit ebdec859faa8cfbfef9f6c1f83d79dd6c8f4ab8c ]
+
+Adding the accounting flag when allocating pages within the SEV function,
+since these memory pages should belong to individual VM.
+
+No functional change intended.
+
+Signed-off-by: Mingwei Zhang <mizhang@google.com>
+Message-Id: <20220623171858.2083637-1-mizhang@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/sev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 4a4dc105552e..86f3096f042f 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -832,7 +832,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
+       /* If source buffer is not aligned then use an intermediate buffer */
+       if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
+-              src_tpage = alloc_page(GFP_KERNEL);
++              src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
+               if (!src_tpage)
+                       return -ENOMEM;
+@@ -853,7 +853,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
+       if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+               int dst_offset;
+-              dst_tpage = alloc_page(GFP_KERNEL);
++              dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
+               if (!dst_tpage) {
+                       ret = -ENOMEM;
+                       goto e_free;
+-- 
+2.35.1
+
diff --git a/queue-5.15/selftests-kvm-handle-compiler-optimizations-in-ucall.patch b/queue-5.15/selftests-kvm-handle-compiler-optimizations-in-ucall.patch
new file mode 100644 (file)
index 0000000..48b1e29
--- /dev/null
@@ -0,0 +1,61 @@
+From 0fb128efc5c97f240a528b8f256abc4c6432475f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jun 2022 18:57:06 +0000
+Subject: selftests: KVM: Handle compiler optimizations in ucall
+
+From: Raghavendra Rao Ananta <rananta@google.com>
+
+[ Upstream commit 9e2f6498efbbc880d7caa7935839e682b64fe5a6 ]
+
+The selftests, when built with newer versions of clang, is found
+to have over optimized guests' ucall() function, and eliminating
+the stores for uc.cmd (perhaps due to no immediate readers). This
+resulted in the userspace side always reading a value of '0', and
+causing multiple test failures.
+
+As a result, prevent the compiler from optimizing the stores in
+ucall() with WRITE_ONCE().
+
+Suggested-by: Ricardo Koller <ricarkol@google.com>
+Suggested-by: Reiji Watanabe <reijiw@google.com>
+Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
+Message-Id: <20220615185706.1099208-1-rananta@google.com>
+Reviewed-by: Andrew Jones <drjones@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kvm/lib/aarch64/ucall.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+index e0b0164e9af8..be1d9728c4ce 100644
+--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
++++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
+ void ucall(uint64_t cmd, int nargs, ...)
+ {
+-      struct ucall uc = {
+-              .cmd = cmd,
+-      };
++      struct ucall uc = {};
+       va_list va;
+       int i;
++      WRITE_ONCE(uc.cmd, cmd);
+       nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+       va_start(va, nargs);
+       for (i = 0; i < nargs; ++i)
+-              uc.args[i] = va_arg(va, uint64_t);
++              WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
+       va_end(va);
+-      *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
++      WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
+ }
+ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+-- 
+2.35.1
+
index a02c8cde89536287e1a768b7392b6be04dcca1f5..1bbdbad8721ad461a2d6b659d3c5d3f2e4ea7aa8 100644 (file)
@@ -7,3 +7,9 @@ acpi-video-force-backlight-native-for-some-tongfang-devices.patch
 acpi-video-shortening-quirk-list-by-identifying-clevo-by-board_name-only.patch
 acpi-apei-better-fix-to-avoid-spamming-the-console-with-old-error-logs.patch
 crypto-arm64-poly1305-fix-a-read-out-of-bound.patch
+kvm-x86-do-not-report-a-vcpu-as-preempted-outside-in.patch
+kvm-x86-do-not-set-st-preempted-when-going-back-to-u.patch
+kvm-selftests-make-hyperv_clock-selftest-more-stable.patch
+tools-kvm_stat-fix-display-of-error-when-multiple-pr.patch
+selftests-kvm-handle-compiler-optimizations-in-ucall.patch
+kvm-x86-svm-add-__gfp_account-to-__sev_dbg_-en-de-cr.patch
diff --git a/queue-5.15/tools-kvm_stat-fix-display-of-error-when-multiple-pr.patch b/queue-5.15/tools-kvm_stat-fix-display-of-error-when-multiple-pr.patch
new file mode 100644 (file)
index 0000000..4dc87d8
--- /dev/null
@@ -0,0 +1,64 @@
+From 47bfda9f243a2a84deee99ee56504a51f6c6cbd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jun 2022 15:11:41 +0300
+Subject: tools/kvm_stat: fix display of error when multiple processes are
+ found
+
+From: Dmitry Klochkov <kdmitry556@gmail.com>
+
+[ Upstream commit 933b5f9f98da29af646b51b36a0753692908ef64 ]
+
+Instead of printing an error message, kvm_stat script fails when we
+restrict statistics to a guest by its name and there are multiple guests
+with such name:
+
+  # kvm_stat -g my_vm
+  Traceback (most recent call last):
+    File "/usr/bin/kvm_stat", line 1819, in <module>
+      main()
+    File "/usr/bin/kvm_stat", line 1779, in main
+      options = get_options()
+    File "/usr/bin/kvm_stat", line 1718, in get_options
+      options = argparser.parse_args()
+    File "/usr/lib64/python3.10/argparse.py", line 1825, in parse_args
+      args, argv = self.parse_known_args(args, namespace)
+    File "/usr/lib64/python3.10/argparse.py", line 1858, in parse_known_args
+      namespace, args = self._parse_known_args(args, namespace)
+    File "/usr/lib64/python3.10/argparse.py", line 2067, in _parse_known_args
+      start_index = consume_optional(start_index)
+    File "/usr/lib64/python3.10/argparse.py", line 2007, in consume_optional
+      take_action(action, args, option_string)
+    File "/usr/lib64/python3.10/argparse.py", line 1935, in take_action
+      action(self, namespace, argument_values, option_string)
+    File "/usr/bin/kvm_stat", line 1649, in __call__
+      ' to specify the desired pid'.format(" ".join(pids)))
+  TypeError: sequence item 0: expected str instance, int found
+
+To avoid this, it's needed to convert pids int values to strings before
+pass them to join().
+
+Signed-off-by: Dmitry Klochkov <kdmitry556@gmail.com>
+Message-Id: <20220614121141.160689-1-kdmitry556@gmail.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/kvm/kvm_stat/kvm_stat | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
+index 5a5bd74f55bd..9c366b3a676d 100755
+--- a/tools/kvm/kvm_stat/kvm_stat
++++ b/tools/kvm/kvm_stat/kvm_stat
+@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
+                          .format(values))
+             if len(pids) > 1:
+                 sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
+-                         ' to specify the desired pid'.format(" ".join(pids)))
++                         ' to specify the desired pid'
++                         .format(" ".join(map(str, pids))))
+             namespace.pid = pids[0]
+     argparser = argparse.ArgumentParser(description=description_text,
+-- 
+2.35.1
+