]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/kvm/trace_pr.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/linux.git] / arch / powerpc / kvm / trace_pr.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
4 #define _TRACE_KVM_PR_H
5
6 #include <linux/tracepoint.h>
7 #include "trace_book3s.h"
8
9 #undef TRACE_SYSTEM
10 #define TRACE_SYSTEM kvm_pr
11 #define TRACE_INCLUDE_PATH .
12 #define TRACE_INCLUDE_FILE trace_pr
13
14 TRACE_EVENT(kvm_book3s_reenter,
15 TP_PROTO(int r, struct kvm_vcpu *vcpu),
16 TP_ARGS(r, vcpu),
17
18 TP_STRUCT__entry(
19 __field( unsigned int, r )
20 __field( unsigned long, pc )
21 ),
22
23 TP_fast_assign(
24 __entry->r = r;
25 __entry->pc = kvmppc_get_pc(vcpu);
26 ),
27
28 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
29 );
30
31 #ifdef CONFIG_PPC_BOOK3S_64
32
33 TRACE_EVENT(kvm_book3s_64_mmu_map,
34 TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
35 struct kvmppc_pte *orig_pte),
36 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
37
38 TP_STRUCT__entry(
39 __field( unsigned char, flag_w )
40 __field( unsigned char, flag_x )
41 __field( unsigned long, eaddr )
42 __field( unsigned long, hpteg )
43 __field( unsigned long, va )
44 __field( unsigned long long, vpage )
45 __field( unsigned long, hpaddr )
46 ),
47
48 TP_fast_assign(
49 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
50 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
51 __entry->eaddr = orig_pte->eaddr;
52 __entry->hpteg = hpteg;
53 __entry->va = va;
54 __entry->vpage = orig_pte->vpage;
55 __entry->hpaddr = hpaddr;
56 ),
57
58 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
59 __entry->flag_w, __entry->flag_x, __entry->eaddr,
60 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
61 );
62
63 #endif /* CONFIG_PPC_BOOK3S_64 */
64
65 TRACE_EVENT(kvm_book3s_mmu_map,
66 TP_PROTO(struct hpte_cache *pte),
67 TP_ARGS(pte),
68
69 TP_STRUCT__entry(
70 __field( u64, host_vpn )
71 __field( u64, pfn )
72 __field( ulong, eaddr )
73 __field( u64, vpage )
74 __field( ulong, raddr )
75 __field( int, flags )
76 ),
77
78 TP_fast_assign(
79 __entry->host_vpn = pte->host_vpn;
80 __entry->pfn = pte->pfn;
81 __entry->eaddr = pte->pte.eaddr;
82 __entry->vpage = pte->pte.vpage;
83 __entry->raddr = pte->pte.raddr;
84 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
85 (pte->pte.may_write ? 0x2 : 0) |
86 (pte->pte.may_execute ? 0x1 : 0);
87 ),
88
89 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
90 __entry->host_vpn, __entry->pfn, __entry->eaddr,
91 __entry->vpage, __entry->raddr, __entry->flags)
92 );
93
94 TRACE_EVENT(kvm_book3s_mmu_invalidate,
95 TP_PROTO(struct hpte_cache *pte),
96 TP_ARGS(pte),
97
98 TP_STRUCT__entry(
99 __field( u64, host_vpn )
100 __field( u64, pfn )
101 __field( ulong, eaddr )
102 __field( u64, vpage )
103 __field( ulong, raddr )
104 __field( int, flags )
105 ),
106
107 TP_fast_assign(
108 __entry->host_vpn = pte->host_vpn;
109 __entry->pfn = pte->pfn;
110 __entry->eaddr = pte->pte.eaddr;
111 __entry->vpage = pte->pte.vpage;
112 __entry->raddr = pte->pte.raddr;
113 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
114 (pte->pte.may_write ? 0x2 : 0) |
115 (pte->pte.may_execute ? 0x1 : 0);
116 ),
117
118 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
119 __entry->host_vpn, __entry->pfn, __entry->eaddr,
120 __entry->vpage, __entry->raddr, __entry->flags)
121 );
122
123 TRACE_EVENT(kvm_book3s_mmu_flush,
124 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
125 unsigned long long p2),
126 TP_ARGS(type, vcpu, p1, p2),
127
128 TP_STRUCT__entry(
129 __field( int, count )
130 __field( unsigned long long, p1 )
131 __field( unsigned long long, p2 )
132 __field( const char *, type )
133 ),
134
135 TP_fast_assign(
136 __entry->count = to_book3s(vcpu)->hpte_cache_count;
137 __entry->p1 = p1;
138 __entry->p2 = p2;
139 __entry->type = type;
140 ),
141
142 TP_printk("Flush %d %sPTEs: %llx - %llx",
143 __entry->count, __entry->type, __entry->p1, __entry->p2)
144 );
145
146 TRACE_EVENT(kvm_book3s_slb_found,
147 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
148 TP_ARGS(gvsid, hvsid),
149
150 TP_STRUCT__entry(
151 __field( unsigned long long, gvsid )
152 __field( unsigned long long, hvsid )
153 ),
154
155 TP_fast_assign(
156 __entry->gvsid = gvsid;
157 __entry->hvsid = hvsid;
158 ),
159
160 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
161 );
162
163 TRACE_EVENT(kvm_book3s_slb_fail,
164 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
165 TP_ARGS(sid_map_mask, gvsid),
166
167 TP_STRUCT__entry(
168 __field( unsigned short, sid_map_mask )
169 __field( unsigned long long, gvsid )
170 ),
171
172 TP_fast_assign(
173 __entry->sid_map_mask = sid_map_mask;
174 __entry->gvsid = gvsid;
175 ),
176
177 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
178 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
179 );
180
181 TRACE_EVENT(kvm_book3s_slb_map,
182 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
183 unsigned long long hvsid),
184 TP_ARGS(sid_map_mask, gvsid, hvsid),
185
186 TP_STRUCT__entry(
187 __field( unsigned short, sid_map_mask )
188 __field( unsigned long long, guest_vsid )
189 __field( unsigned long long, host_vsid )
190 ),
191
192 TP_fast_assign(
193 __entry->sid_map_mask = sid_map_mask;
194 __entry->guest_vsid = gvsid;
195 __entry->host_vsid = hvsid;
196 ),
197
198 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
199 __entry->guest_vsid, __entry->host_vsid)
200 );
201
202 TRACE_EVENT(kvm_book3s_slbmte,
203 TP_PROTO(u64 slb_vsid, u64 slb_esid),
204 TP_ARGS(slb_vsid, slb_esid),
205
206 TP_STRUCT__entry(
207 __field( u64, slb_vsid )
208 __field( u64, slb_esid )
209 ),
210
211 TP_fast_assign(
212 __entry->slb_vsid = slb_vsid;
213 __entry->slb_esid = slb_esid;
214 ),
215
216 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
217 );
218
219 TRACE_EVENT(kvm_exit,
220 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
221 TP_ARGS(exit_nr, vcpu),
222
223 TP_STRUCT__entry(
224 __field( unsigned int, exit_nr )
225 __field( unsigned long, pc )
226 __field( unsigned long, msr )
227 __field( unsigned long, dar )
228 __field( unsigned long, srr1 )
229 __field( unsigned long, last_inst )
230 ),
231
232 TP_fast_assign(
233 __entry->exit_nr = exit_nr;
234 __entry->pc = kvmppc_get_pc(vcpu);
235 __entry->dar = kvmppc_get_fault_dar(vcpu);
236 __entry->msr = kvmppc_get_msr(vcpu);
237 __entry->srr1 = vcpu->arch.shadow_srr1;
238 __entry->last_inst = vcpu->arch.last_inst;
239 ),
240
241 TP_printk("exit=%s"
242 " | pc=0x%lx"
243 " | msr=0x%lx"
244 " | dar=0x%lx"
245 " | srr1=0x%lx"
246 " | last_inst=0x%lx"
247 ,
248 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
249 __entry->pc,
250 __entry->msr,
251 __entry->dar,
252 __entry->srr1,
253 __entry->last_inst
254 )
255 );
256
257 TRACE_EVENT(kvm_unmap_hva,
258 TP_PROTO(unsigned long hva),
259 TP_ARGS(hva),
260
261 TP_STRUCT__entry(
262 __field( unsigned long, hva )
263 ),
264
265 TP_fast_assign(
266 __entry->hva = hva;
267 ),
268
269 TP_printk("unmap hva 0x%lx\n", __entry->hva)
270 );
271
272 #endif /* _TRACE_KVM_H */
273
274 /* This part must be outside protection */
275 #include <trace/define_trace.h>