]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.54/kvm-ppc-book3s-use-new-mutex-to-synchronize-access-t.patch
Linux 4.19.54
[thirdparty/kernel/stable-queue.git] / releases / 4.19.54 / kvm-ppc-book3s-use-new-mutex-to-synchronize-access-t.patch
1 From bc3c51cabd27911f6b6c25722ba71b02de162077 Mon Sep 17 00:00:00 2001
2 From: Paul Mackerras <paulus@ozlabs.org>
3 Date: Wed, 29 May 2019 11:54:00 +1000
4 Subject: KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token
5 list
6
7 [ Upstream commit 1659e27d2bc1ef47b6d031abe01b467f18cb72d9 ]
8
9 Currently the Book 3S KVM code uses kvm->lock to synchronize access
10 to the kvm->arch.rtas_tokens list. Because this list is scanned
11 inside kvmppc_rtas_hcall(), which is called with the vcpu mutex held,
12 taking kvm->lock cause a lock inversion problem, which could lead to
13 a deadlock.
14
15 To fix this, we add a new mutex, kvm->arch.rtas_token_lock, which nests
16 inside the vcpu mutexes, and use that instead of kvm->lock when
17 accessing the rtas token list.
18
19 This removes the lockdep_assert_held() in kvmppc_rtas_tokens_free().
20 At this point we don't hold the new mutex, but that is OK because
21 kvmppc_rtas_tokens_free() is only called when the whole VM is being
22 destroyed, and at that point nothing can be looking up a token in
23 the list.
24
25 Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
26 Signed-off-by: Sasha Levin <sashal@kernel.org>
27 ---
28 arch/powerpc/include/asm/kvm_host.h | 1 +
29 arch/powerpc/kvm/book3s.c | 1 +
30 arch/powerpc/kvm/book3s_rtas.c | 14 ++++++--------
31 3 files changed, 8 insertions(+), 8 deletions(-)
32
33 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
34 index bccc5051249e..2b6049e83970 100644
35 --- a/arch/powerpc/include/asm/kvm_host.h
36 +++ b/arch/powerpc/include/asm/kvm_host.h
37 @@ -299,6 +299,7 @@ struct kvm_arch {
38 #ifdef CONFIG_PPC_BOOK3S_64
39 struct list_head spapr_tce_tables;
40 struct list_head rtas_tokens;
41 + struct mutex rtas_token_lock;
42 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
43 #endif
44 #ifdef CONFIG_KVM_MPIC
45 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
46 index 87348e498c89..281f074581a3 100644
47 --- a/arch/powerpc/kvm/book3s.c
48 +++ b/arch/powerpc/kvm/book3s.c
49 @@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
50 #ifdef CONFIG_PPC64
51 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
52 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
53 + mutex_init(&kvm->arch.rtas_token_lock);
54 #endif
55
56 return kvm->arch.kvm_ops->init_vm(kvm);
57 diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
58 index 2d3b2b1cc272..8f2355138f80 100644
59 --- a/arch/powerpc/kvm/book3s_rtas.c
60 +++ b/arch/powerpc/kvm/book3s_rtas.c
61 @@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
62 {
63 struct rtas_token_definition *d, *tmp;
64
65 - lockdep_assert_held(&kvm->lock);
66 + lockdep_assert_held(&kvm->arch.rtas_token_lock);
67
68 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
69 if (rtas_name_matches(d->handler->name, name)) {
70 @@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
71 bool found;
72 int i;
73
74 - lockdep_assert_held(&kvm->lock);
75 + lockdep_assert_held(&kvm->arch.rtas_token_lock);
76
77 list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
78 if (d->token == token)
79 @@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
80 if (copy_from_user(&args, argp, sizeof(args)))
81 return -EFAULT;
82
83 - mutex_lock(&kvm->lock);
84 + mutex_lock(&kvm->arch.rtas_token_lock);
85
86 if (args.token)
87 rc = rtas_token_define(kvm, args.name, args.token);
88 else
89 rc = rtas_token_undefine(kvm, args.name);
90
91 - mutex_unlock(&kvm->lock);
92 + mutex_unlock(&kvm->arch.rtas_token_lock);
93
94 return rc;
95 }
96 @@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
97 orig_rets = args.rets;
98 args.rets = &args.args[be32_to_cpu(args.nargs)];
99
100 - mutex_lock(&vcpu->kvm->lock);
101 + mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
102
103 rc = -ENOENT;
104 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
105 @@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
106 }
107 }
108
109 - mutex_unlock(&vcpu->kvm->lock);
110 + mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
111
112 if (rc == 0) {
113 args.rets = orig_rets;
114 @@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
115 {
116 struct rtas_token_definition *d, *tmp;
117
118 - lockdep_assert_held(&kvm->lock);
119 -
120 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
121 list_del(&d->list);
122 kfree(d);
123 --
124 2.20.1
125