]>
Commit | Line | Data |
---|---|---|
d29a09ca DK |
1 | /* |
2 | * QEMU emulation of AMD IOMMU (AMD-Vi) | |
3 | * | |
4 | * Copyright (C) 2011 Eduard - Gabriel Munteanu | |
c8350ebd | 5 | * Copyright (C) 2015, 2016 David Kiarie Kahurani |
d29a09ca DK |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | ||
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | ||
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
19 | * | |
20 | * Cache implementation inspired by hw/i386/intel_iommu.c | |
21 | */ | |
d6454270 | 22 | |
d29a09ca | 23 | #include "qemu/osdep.h" |
433545d5 PMD |
24 | #include "hw/i386/pc.h" |
25 | #include "hw/pci/msi.h" | |
26 | #include "hw/pci/pci_bus.h" | |
d6454270 | 27 | #include "migration/vmstate.h" |
433545d5 | 28 | #include "amd_iommu.h" |
ef0e8fc7 | 29 | #include "qapi/error.h" |
a3276f78 | 30 | #include "qemu/error-report.h" |
577c470f | 31 | #include "hw/i386/apic_internal.h" |
d29a09ca | 32 | #include "trace.h" |
b44159fe | 33 | #include "hw/i386/apic-msidef.h" |
d29a09ca DK |
34 | |
35 | /* used AMD-Vi MMIO registers */ | |
36 | const char *amdvi_mmio_low[] = { | |
37 | "AMDVI_MMIO_DEVTAB_BASE", | |
38 | "AMDVI_MMIO_CMDBUF_BASE", | |
39 | "AMDVI_MMIO_EVTLOG_BASE", | |
40 | "AMDVI_MMIO_CONTROL", | |
41 | "AMDVI_MMIO_EXCL_BASE", | |
42 | "AMDVI_MMIO_EXCL_LIMIT", | |
43 | "AMDVI_MMIO_EXT_FEATURES", | |
44 | "AMDVI_MMIO_PPR_BASE", | |
45 | "UNHANDLED" | |
46 | }; | |
47 | const char *amdvi_mmio_high[] = { | |
48 | "AMDVI_MMIO_COMMAND_HEAD", | |
49 | "AMDVI_MMIO_COMMAND_TAIL", | |
50 | "AMDVI_MMIO_EVTLOG_HEAD", | |
51 | "AMDVI_MMIO_EVTLOG_TAIL", | |
52 | "AMDVI_MMIO_STATUS", | |
53 | "AMDVI_MMIO_PPR_HEAD", | |
54 | "AMDVI_MMIO_PPR_TAIL", | |
55 | "UNHANDLED" | |
56 | }; | |
57 | ||
58 | struct AMDVIAddressSpace { | |
59 | uint8_t bus_num; /* bus number */ | |
60 | uint8_t devfn; /* device function */ | |
61 | AMDVIState *iommu_state; /* AMDVI - one per machine */ | |
53244386 | 62 | MemoryRegion root; /* AMDVI Root memory map region */ |
3df9d748 | 63 | IOMMUMemoryRegion iommu; /* Device's address translation region */ |
d29a09ca DK |
64 | MemoryRegion iommu_ir; /* Device's interrupt remapping region */ |
65 | AddressSpace as; /* device's corresponding address space */ | |
66 | }; | |
67 | ||
68 | /* AMDVI cache entry */ | |
69 | typedef struct AMDVIIOTLBEntry { | |
70 | uint16_t domid; /* assigned domain id */ | |
71 | uint16_t devid; /* device owning entry */ | |
72 | uint64_t perms; /* access permissions */ | |
73 | uint64_t translated_addr; /* translated address */ | |
74 | uint64_t page_mask; /* physical page size */ | |
75 | } AMDVIIOTLBEntry; | |
76 | ||
77 | /* configure MMIO registers at startup/reset */ | |
78 | static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val, | |
79 | uint64_t romask, uint64_t w1cmask) | |
80 | { | |
81 | stq_le_p(&s->mmior[addr], val); | |
82 | stq_le_p(&s->romask[addr], romask); | |
83 | stq_le_p(&s->w1cmask[addr], w1cmask); | |
84 | } | |
85 | ||
86 | static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr) | |
87 | { | |
88 | return lduw_le_p(&s->mmior[addr]); | |
89 | } | |
90 | ||
91 | static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr) | |
92 | { | |
93 | return ldl_le_p(&s->mmior[addr]); | |
94 | } | |
95 | ||
96 | static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr) | |
97 | { | |
98 | return ldq_le_p(&s->mmior[addr]); | |
99 | } | |
100 | ||
101 | /* internal write */ | |
e526ab61 | 102 | static void amdvi_writeq_raw(AMDVIState *s, hwaddr addr, uint64_t val) |
d29a09ca DK |
103 | { |
104 | stq_le_p(&s->mmior[addr], val); | |
105 | } | |
106 | ||
107 | /* external write */ | |
108 | static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val) | |
109 | { | |
110 | uint16_t romask = lduw_le_p(&s->romask[addr]); | |
111 | uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]); | |
112 | uint16_t oldval = lduw_le_p(&s->mmior[addr]); | |
113 | stw_le_p(&s->mmior[addr], | |
114 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
115 | } | |
116 | ||
117 | static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) | |
118 | { | |
119 | uint32_t romask = ldl_le_p(&s->romask[addr]); | |
120 | uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); | |
121 | uint32_t oldval = ldl_le_p(&s->mmior[addr]); | |
122 | stl_le_p(&s->mmior[addr], | |
123 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
124 | } | |
125 | ||
126 | static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) | |
127 | { | |
128 | uint64_t romask = ldq_le_p(&s->romask[addr]); | |
129 | uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); | |
130 | uint32_t oldval = ldq_le_p(&s->mmior[addr]); | |
131 | stq_le_p(&s->mmior[addr], | |
132 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
133 | } | |
134 | ||
135 | /* OR a 64-bit register with a 64-bit value */ | |
136 | static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val) | |
137 | { | |
138 | return amdvi_readq(s, addr) | val; | |
139 | } | |
140 | ||
141 | /* OR a 64-bit register with a 64-bit value storing result in the register */ | |
142 | static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val) | |
143 | { | |
144 | amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val); | |
145 | } | |
146 | ||
147 | /* AND a 64-bit register with a 64-bit value storing result in the register */ | |
148 | static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val) | |
149 | { | |
150 | amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val); | |
151 | } | |
152 | ||
153 | static void amdvi_generate_msi_interrupt(AMDVIState *s) | |
154 | { | |
1d5b128c DK |
155 | MSIMessage msg = {}; |
156 | MemTxAttrs attrs = { | |
157 | .requester_id = pci_requester_id(&s->pci.dev) | |
158 | }; | |
d29a09ca DK |
159 | |
160 | if (msi_enabled(&s->pci.dev)) { | |
161 | msg = msi_get_message(&s->pci.dev, 0); | |
162 | address_space_stl_le(&address_space_memory, msg.address, msg.data, | |
163 | attrs, NULL); | |
164 | } | |
165 | } | |
166 | ||
167 | static void amdvi_log_event(AMDVIState *s, uint64_t *evt) | |
168 | { | |
169 | /* event logging not enabled */ | |
170 | if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, | |
171 | AMDVI_MMIO_STATUS_EVT_OVF)) { | |
172 | return; | |
173 | } | |
174 | ||
175 | /* event log buffer full */ | |
176 | if (s->evtlog_tail >= s->evtlog_len) { | |
177 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); | |
178 | /* generate interrupt */ | |
179 | amdvi_generate_msi_interrupt(s); | |
180 | return; | |
181 | } | |
182 | ||
183 | if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, | |
ba06fe8a | 184 | evt, AMDVI_EVENT_LEN, MEMTXATTRS_UNSPECIFIED)) { |
d29a09ca DK |
185 | trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); |
186 | } | |
187 | ||
188 | s->evtlog_tail += AMDVI_EVENT_LEN; | |
189 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); | |
190 | amdvi_generate_msi_interrupt(s); | |
191 | } | |
192 | ||
193 | static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, | |
194 | int length) | |
195 | { | |
196 | int index = start / 64, bitpos = start % 64; | |
1d5b128c | 197 | uint64_t mask = MAKE_64BIT_MASK(start, length); |
d29a09ca DK |
198 | buffer[index] &= ~mask; |
199 | buffer[index] |= (value << bitpos) & mask; | |
200 | } | |
201 | /* | |
202 | * AMDVi event structure | |
203 | * 0:15 -> DeviceID | |
204 | * 55:63 -> event type + miscellaneous info | |
205 | * 63:127 -> related address | |
206 | */ | |
207 | static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr, | |
208 | uint16_t info) | |
209 | { | |
210 | amdvi_setevent_bits(evt, devid, 0, 16); | |
211 | amdvi_setevent_bits(evt, info, 55, 8); | |
212 | amdvi_setevent_bits(evt, addr, 63, 64); | |
213 | } | |
214 | /* log an error encountered during a page walk | |
215 | * | |
216 | * @addr: virtual address in translation request | |
217 | */ | |
218 | static void amdvi_page_fault(AMDVIState *s, uint16_t devid, | |
219 | hwaddr addr, uint16_t info) | |
220 | { | |
221 | uint64_t evt[4]; | |
222 | ||
223 | info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF; | |
224 | amdvi_encode_event(evt, devid, addr, info); | |
225 | amdvi_log_event(s, evt); | |
226 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
227 | PCI_STATUS_SIG_TARGET_ABORT); | |
228 | } | |
229 | /* | |
230 | * log a master abort accessing device table | |
231 | * @devtab : address of device table entry | |
232 | * @info : error flags | |
233 | */ | |
234 | static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, | |
235 | hwaddr devtab, uint16_t info) | |
236 | { | |
237 | uint64_t evt[4]; | |
238 | ||
239 | info |= AMDVI_EVENT_DEV_TAB_HW_ERROR; | |
240 | ||
241 | amdvi_encode_event(evt, devid, devtab, info); | |
242 | amdvi_log_event(s, evt); | |
243 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
244 | PCI_STATUS_SIG_TARGET_ABORT); | |
245 | } | |
246 | /* log an event trying to access command buffer | |
247 | * @addr : address that couldn't be accessed | |
248 | */ | |
249 | static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) | |
250 | { | |
251 | uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR; | |
252 | ||
253 | amdvi_encode_event(evt, 0, addr, info); | |
254 | amdvi_log_event(s, evt); | |
255 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
256 | PCI_STATUS_SIG_TARGET_ABORT); | |
257 | } | |
258 | /* log an illegal comand event | |
259 | * @addr : address of illegal command | |
260 | */ | |
261 | static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, | |
262 | hwaddr addr) | |
263 | { | |
264 | uint64_t evt[4]; | |
265 | ||
266 | info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR; | |
267 | amdvi_encode_event(evt, 0, addr, info); | |
268 | amdvi_log_event(s, evt); | |
269 | } | |
270 | /* log an error accessing device table | |
271 | * | |
272 | * @devid : device owning the table entry | |
273 | * @devtab : address of device table entry | |
274 | * @info : error flags | |
275 | */ | |
276 | static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid, | |
277 | hwaddr addr, uint16_t info) | |
278 | { | |
279 | uint64_t evt[4]; | |
280 | ||
281 | info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY; | |
282 | amdvi_encode_event(evt, devid, addr, info); | |
283 | amdvi_log_event(s, evt); | |
284 | } | |
285 | /* log an error accessing a PTE entry | |
286 | * @addr : address that couldn't be accessed | |
287 | */ | |
288 | static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid, | |
289 | hwaddr addr, uint16_t info) | |
290 | { | |
291 | uint64_t evt[4]; | |
292 | ||
293 | info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR; | |
294 | amdvi_encode_event(evt, devid, addr, info); | |
295 | amdvi_log_event(s, evt); | |
296 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
297 | PCI_STATUS_SIG_TARGET_ABORT); | |
298 | } | |
299 | ||
300 | static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2) | |
301 | { | |
302 | return *((const uint64_t *)v1) == *((const uint64_t *)v2); | |
303 | } | |
304 | ||
305 | static guint amdvi_uint64_hash(gconstpointer v) | |
306 | { | |
307 | return (guint)*(const uint64_t *)v; | |
308 | } | |
309 | ||
310 | static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr, | |
311 | uint64_t devid) | |
312 | { | |
313 | uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | | |
314 | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
315 | return g_hash_table_lookup(s->iotlb, &key); | |
316 | } | |
317 | ||
318 | static void amdvi_iotlb_reset(AMDVIState *s) | |
319 | { | |
320 | assert(s->iotlb); | |
321 | trace_amdvi_iotlb_reset(); | |
322 | g_hash_table_remove_all(s->iotlb); | |
323 | } | |
324 | ||
325 | static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value, | |
326 | gpointer user_data) | |
327 | { | |
328 | AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; | |
329 | uint16_t devid = *(uint16_t *)user_data; | |
330 | return entry->devid == devid; | |
331 | } | |
332 | ||
333 | static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr, | |
334 | uint64_t devid) | |
335 | { | |
336 | uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | | |
337 | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
338 | g_hash_table_remove(s->iotlb, &key); | |
339 | } | |
340 | ||
341 | static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid, | |
342 | uint64_t gpa, IOMMUTLBEntry to_cache, | |
343 | uint16_t domid) | |
344 | { | |
1d5b128c DK |
345 | AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1); |
346 | uint64_t *key = g_new(uint64_t, 1); | |
d29a09ca DK |
347 | uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K; |
348 | ||
349 | /* don't cache erroneous translations */ | |
350 | if (to_cache.perm != IOMMU_NONE) { | |
351 | trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
352 | PCI_FUNC(devid), gpa, to_cache.translated_addr); | |
353 | ||
354 | if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { | |
355 | amdvi_iotlb_reset(s); | |
356 | } | |
357 | ||
358 | entry->domid = domid; | |
359 | entry->perms = to_cache.perm; | |
360 | entry->translated_addr = to_cache.translated_addr; | |
361 | entry->page_mask = to_cache.addr_mask; | |
362 | *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
363 | g_hash_table_replace(s->iotlb, key, entry); | |
364 | } | |
365 | } | |
366 | ||
367 | static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) | |
368 | { | |
369 | /* pad the last 3 bits */ | |
370 | hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3; | |
371 | uint64_t data = cpu_to_le64(cmd[1]); | |
372 | ||
2356ff85 | 373 | if (extract64(cmd[0], 52, 8)) { |
d29a09ca DK |
374 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
375 | s->cmdbuf + s->cmdbuf_head); | |
376 | } | |
377 | if (extract64(cmd[0], 0, 1)) { | |
378 | if (dma_memory_write(&address_space_memory, addr, &data, | |
ba06fe8a PMD |
379 | AMDVI_COMPLETION_DATA_SIZE, |
380 | MEMTXATTRS_UNSPECIFIED)) { | |
d29a09ca DK |
381 | trace_amdvi_completion_wait_fail(addr); |
382 | } | |
383 | } | |
384 | /* set completion interrupt */ | |
385 | if (extract64(cmd[0], 1, 1)) { | |
e526ab61 | 386 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); |
d29a09ca DK |
387 | /* generate interrupt */ |
388 | amdvi_generate_msi_interrupt(s); | |
389 | } | |
390 | trace_amdvi_completion_wait(addr, data); | |
391 | } | |
392 | ||
393 | /* log error without aborting since linux seems to be using reserved bits */ | |
394 | static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd) | |
395 | { | |
396 | uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16)); | |
397 | ||
398 | /* This command should invalidate internal caches of which there isn't */ | |
2356ff85 | 399 | if (extract64(cmd[0], 16, 44) || cmd[1]) { |
d29a09ca DK |
400 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
401 | s->cmdbuf + s->cmdbuf_head); | |
402 | } | |
403 | trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
404 | PCI_FUNC(devid)); | |
405 | } | |
406 | ||
407 | static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) | |
408 | { | |
2356ff85 | 409 | if (extract64(cmd[0], 16, 16) || extract64(cmd[0], 52, 8) || |
d29a09ca | 410 | extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29) |
2356ff85 | 411 | || extract64(cmd[1], 48, 16)) { |
d29a09ca DK |
412 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
413 | s->cmdbuf + s->cmdbuf_head); | |
414 | } | |
415 | trace_amdvi_ppr_exec(); | |
416 | } | |
417 | ||
418 | static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) | |
419 | { | |
420 | if (extract64(cmd[0], 0, 60) || cmd[1]) { | |
421 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
422 | s->cmdbuf + s->cmdbuf_head); | |
423 | } | |
424 | ||
425 | amdvi_iotlb_reset(s); | |
426 | trace_amdvi_all_inval(); | |
427 | } | |
428 | ||
429 | static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value, | |
430 | gpointer user_data) | |
431 | { | |
432 | AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; | |
433 | uint16_t domid = *(uint16_t *)user_data; | |
434 | return entry->domid == domid; | |
435 | } | |
436 | ||
437 | /* we don't have devid - we can't remove pages by address */ | |
438 | static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd) | |
439 | { | |
440 | uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16)); | |
441 | ||
2356ff85 WH |
442 | if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 48, 12) || |
443 | extract64(cmd[1], 3, 9)) { | |
d29a09ca DK |
444 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
445 | s->cmdbuf + s->cmdbuf_head); | |
446 | } | |
447 | ||
448 | g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid, | |
449 | &domid); | |
450 | trace_amdvi_pages_inval(domid); | |
451 | } | |
452 | ||
453 | static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd) | |
454 | { | |
2356ff85 | 455 | if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 52, 8) || |
d29a09ca DK |
456 | extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || |
457 | extract64(cmd[1], 5, 7)) { | |
458 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
459 | s->cmdbuf + s->cmdbuf_head); | |
460 | } | |
461 | ||
462 | trace_amdvi_prefetch_pages(); | |
463 | } | |
464 | ||
465 | static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) | |
466 | { | |
2356ff85 | 467 | if (extract64(cmd[0], 16, 44) || cmd[1]) { |
d29a09ca DK |
468 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
469 | s->cmdbuf + s->cmdbuf_head); | |
470 | return; | |
471 | } | |
472 | ||
473 | trace_amdvi_intr_inval(); | |
474 | } | |
475 | ||
476 | /* FIXME: Try to work with the specified size instead of all the pages | |
477 | * when the S bit is on | |
478 | */ | |
479 | static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd) | |
480 | { | |
481 | ||
482 | uint16_t devid = extract64(cmd[0], 0, 16); | |
2356ff85 WH |
483 | if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || |
484 | extract64(cmd[1], 6, 6)) { | |
d29a09ca DK |
485 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), |
486 | s->cmdbuf + s->cmdbuf_head); | |
487 | return; | |
488 | } | |
489 | ||
490 | if (extract64(cmd[1], 0, 1)) { | |
491 | g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid, | |
492 | &devid); | |
493 | } else { | |
494 | amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12, | |
495 | cpu_to_le16(extract64(cmd[1], 0, 16))); | |
496 | } | |
497 | trace_amdvi_iotlb_inval(); | |
498 | } | |
499 | ||
500 | /* not honouring reserved bits is regarded as an illegal command */ | |
501 | static void amdvi_cmdbuf_exec(AMDVIState *s) | |
502 | { | |
503 | uint64_t cmd[2]; | |
504 | ||
505 | if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, | |
ba06fe8a | 506 | cmd, AMDVI_COMMAND_SIZE, MEMTXATTRS_UNSPECIFIED)) { |
d29a09ca DK |
507 | trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head); |
508 | amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head); | |
509 | return; | |
510 | } | |
511 | ||
512 | switch (extract64(cmd[0], 60, 4)) { | |
513 | case AMDVI_CMD_COMPLETION_WAIT: | |
514 | amdvi_completion_wait(s, cmd); | |
515 | break; | |
516 | case AMDVI_CMD_INVAL_DEVTAB_ENTRY: | |
517 | amdvi_inval_devtab_entry(s, cmd); | |
518 | break; | |
519 | case AMDVI_CMD_INVAL_AMDVI_PAGES: | |
520 | amdvi_inval_pages(s, cmd); | |
521 | break; | |
522 | case AMDVI_CMD_INVAL_IOTLB_PAGES: | |
523 | iommu_inval_iotlb(s, cmd); | |
524 | break; | |
525 | case AMDVI_CMD_INVAL_INTR_TABLE: | |
526 | amdvi_inval_inttable(s, cmd); | |
527 | break; | |
528 | case AMDVI_CMD_PREFETCH_AMDVI_PAGES: | |
529 | amdvi_prefetch_pages(s, cmd); | |
530 | break; | |
531 | case AMDVI_CMD_COMPLETE_PPR_REQUEST: | |
532 | amdvi_complete_ppr(s, cmd); | |
533 | break; | |
534 | case AMDVI_CMD_INVAL_AMDVI_ALL: | |
535 | amdvi_inval_all(s, cmd); | |
536 | break; | |
537 | default: | |
538 | trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4)); | |
539 | /* log illegal command */ | |
540 | amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4), | |
541 | s->cmdbuf + s->cmdbuf_head); | |
542 | } | |
543 | } | |
544 | ||
545 | static void amdvi_cmdbuf_run(AMDVIState *s) | |
546 | { | |
547 | if (!s->cmdbuf_enabled) { | |
548 | trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL)); | |
549 | return; | |
550 | } | |
551 | ||
552 | /* check if there is work to do. */ | |
553 | while (s->cmdbuf_head != s->cmdbuf_tail) { | |
554 | trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf); | |
555 | amdvi_cmdbuf_exec(s); | |
556 | s->cmdbuf_head += AMDVI_COMMAND_SIZE; | |
e526ab61 | 557 | amdvi_writeq_raw(s, AMDVI_MMIO_COMMAND_HEAD, s->cmdbuf_head); |
d29a09ca DK |
558 | |
559 | /* wrap head pointer */ | |
560 | if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) { | |
561 | s->cmdbuf_head = 0; | |
562 | } | |
563 | } | |
564 | } | |
565 | ||
566 | static void amdvi_mmio_trace(hwaddr addr, unsigned size) | |
567 | { | |
568 | uint8_t index = (addr & ~0x2000) / 8; | |
569 | ||
570 | if ((addr & 0x2000)) { | |
571 | /* high table */ | |
572 | index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index; | |
573 | trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); | |
574 | } else { | |
575 | index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index; | |
d9429b84 | 576 | trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); |
d29a09ca DK |
577 | } |
578 | } | |
579 | ||
580 | static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) | |
581 | { | |
582 | AMDVIState *s = opaque; | |
583 | ||
584 | uint64_t val = -1; | |
585 | if (addr + size > AMDVI_MMIO_SIZE) { | |
0d3ef788 | 586 | trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size); |
d29a09ca DK |
587 | return (uint64_t)-1; |
588 | } | |
589 | ||
590 | if (size == 2) { | |
591 | val = amdvi_readw(s, addr); | |
592 | } else if (size == 4) { | |
593 | val = amdvi_readl(s, addr); | |
594 | } else if (size == 8) { | |
595 | val = amdvi_readq(s, addr); | |
596 | } | |
597 | amdvi_mmio_trace(addr, size); | |
598 | ||
599 | return val; | |
600 | } | |
601 | ||
602 | static void amdvi_handle_control_write(AMDVIState *s) | |
603 | { | |
604 | unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL); | |
605 | s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN); | |
606 | ||
607 | s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN); | |
608 | s->evtlog_enabled = s->enabled && !!(control & | |
609 | AMDVI_MMIO_CONTROL_EVENTLOGEN); | |
610 | ||
611 | s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN); | |
612 | s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN); | |
613 | s->cmdbuf_enabled = s->enabled && !!(control & | |
614 | AMDVI_MMIO_CONTROL_CMDBUFLEN); | |
135f866e | 615 | s->ga_enabled = !!(control & AMDVI_MMIO_CONTROL_GAEN); |
d29a09ca DK |
616 | |
617 | /* update the flags depending on the control register */ | |
618 | if (s->cmdbuf_enabled) { | |
619 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN); | |
620 | } else { | |
621 | amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN); | |
622 | } | |
623 | if (s->evtlog_enabled) { | |
624 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN); | |
625 | } else { | |
626 | amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN); | |
627 | } | |
628 | ||
629 | trace_amdvi_control_status(control); | |
630 | amdvi_cmdbuf_run(s); | |
631 | } | |
632 | ||
633 | static inline void amdvi_handle_devtab_write(AMDVIState *s) | |
634 | ||
635 | { | |
636 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE); | |
637 | s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK); | |
638 | ||
639 | /* set device table length */ | |
640 | s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 * | |
641 | (AMDVI_MMIO_DEVTAB_SIZE_UNIT / | |
642 | AMDVI_MMIO_DEVTAB_ENTRY_SIZE)); | |
643 | } | |
644 | ||
645 | static inline void amdvi_handle_cmdhead_write(AMDVIState *s) | |
646 | { | |
647 | s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD) | |
648 | & AMDVI_MMIO_CMDBUF_HEAD_MASK; | |
649 | amdvi_cmdbuf_run(s); | |
650 | } | |
651 | ||
652 | static inline void amdvi_handle_cmdbase_write(AMDVIState *s) | |
653 | { | |
654 | s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE) | |
655 | & AMDVI_MMIO_CMDBUF_BASE_MASK; | |
656 | s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE) | |
657 | & AMDVI_MMIO_CMDBUF_SIZE_MASK); | |
658 | s->cmdbuf_head = s->cmdbuf_tail = 0; | |
659 | } | |
660 | ||
661 | static inline void amdvi_handle_cmdtail_write(AMDVIState *s) | |
662 | { | |
663 | s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL) | |
664 | & AMDVI_MMIO_CMDBUF_TAIL_MASK; | |
665 | amdvi_cmdbuf_run(s); | |
666 | } | |
667 | ||
668 | static inline void amdvi_handle_excllim_write(AMDVIState *s) | |
669 | { | |
670 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT); | |
671 | s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) | | |
672 | AMDVI_MMIO_EXCL_LIMIT_LOW; | |
673 | } | |
674 | ||
675 | static inline void amdvi_handle_evtbase_write(AMDVIState *s) | |
676 | { | |
677 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE); | |
678 | s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK; | |
679 | s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE) | |
680 | & AMDVI_MMIO_EVTLOG_SIZE_MASK); | |
681 | } | |
682 | ||
683 | static inline void amdvi_handle_evttail_write(AMDVIState *s) | |
684 | { | |
685 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL); | |
686 | s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK; | |
687 | } | |
688 | ||
689 | static inline void amdvi_handle_evthead_write(AMDVIState *s) | |
690 | { | |
691 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD); | |
692 | s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK; | |
693 | } | |
694 | ||
695 | static inline void amdvi_handle_pprbase_write(AMDVIState *s) | |
696 | { | |
697 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE); | |
698 | s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK; | |
699 | s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE) | |
700 | & AMDVI_MMIO_PPRLOG_SIZE_MASK); | |
701 | } | |
702 | ||
703 | static inline void amdvi_handle_pprhead_write(AMDVIState *s) | |
704 | { | |
705 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD); | |
706 | s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK; | |
707 | } | |
708 | ||
709 | static inline void amdvi_handle_pprtail_write(AMDVIState *s) | |
710 | { | |
711 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL); | |
712 | s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK; | |
713 | } | |
714 | ||
715 | /* FIXME: something might go wrong if System Software writes in chunks | |
716 | * of one byte but linux writes in chunks of 4 bytes so currently it | |
717 | * works correctly with linux but will definitely be busted if software | |
718 | * reads/writes 8 bytes | |
719 | */ | |
720 | static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val, | |
721 | hwaddr addr) | |
722 | { | |
723 | if (size == 2) { | |
724 | amdvi_writew(s, addr, val); | |
725 | } else if (size == 4) { | |
726 | amdvi_writel(s, addr, val); | |
727 | } else if (size == 8) { | |
728 | amdvi_writeq(s, addr, val); | |
729 | } | |
730 | } | |
731 | ||
732 | static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, | |
733 | unsigned size) | |
734 | { | |
735 | AMDVIState *s = opaque; | |
736 | unsigned long offset = addr & 0x07; | |
737 | ||
738 | if (addr + size > AMDVI_MMIO_SIZE) { | |
739 | trace_amdvi_mmio_write("error: addr outside region: max ", | |
740 | (uint64_t)AMDVI_MMIO_SIZE, size, val, offset); | |
741 | return; | |
742 | } | |
743 | ||
744 | amdvi_mmio_trace(addr, size); | |
745 | switch (addr & ~0x07) { | |
746 | case AMDVI_MMIO_CONTROL: | |
747 | amdvi_mmio_reg_write(s, size, val, addr); | |
748 | amdvi_handle_control_write(s); | |
749 | break; | |
750 | case AMDVI_MMIO_DEVICE_TABLE: | |
751 | amdvi_mmio_reg_write(s, size, val, addr); | |
752 | /* set device table address | |
753 | * This also suffers from inability to tell whether software | |
754 | * is done writing | |
755 | */ | |
756 | if (offset || (size == 8)) { | |
757 | amdvi_handle_devtab_write(s); | |
758 | } | |
759 | break; | |
760 | case AMDVI_MMIO_COMMAND_HEAD: | |
761 | amdvi_mmio_reg_write(s, size, val, addr); | |
762 | amdvi_handle_cmdhead_write(s); | |
763 | break; | |
764 | case AMDVI_MMIO_COMMAND_BASE: | |
765 | amdvi_mmio_reg_write(s, size, val, addr); | |
766 | /* FIXME - make sure System Software has finished writing incase | |
767 | * it writes in chucks less than 8 bytes in a robust way.As for | |
768 | * now, this hacks works for the linux driver | |
769 | */ | |
770 | if (offset || (size == 8)) { | |
771 | amdvi_handle_cmdbase_write(s); | |
772 | } | |
773 | break; | |
774 | case AMDVI_MMIO_COMMAND_TAIL: | |
775 | amdvi_mmio_reg_write(s, size, val, addr); | |
776 | amdvi_handle_cmdtail_write(s); | |
777 | break; | |
778 | case AMDVI_MMIO_EVENT_BASE: | |
779 | amdvi_mmio_reg_write(s, size, val, addr); | |
780 | amdvi_handle_evtbase_write(s); | |
781 | break; | |
782 | case AMDVI_MMIO_EVENT_HEAD: | |
783 | amdvi_mmio_reg_write(s, size, val, addr); | |
784 | amdvi_handle_evthead_write(s); | |
785 | break; | |
786 | case AMDVI_MMIO_EVENT_TAIL: | |
787 | amdvi_mmio_reg_write(s, size, val, addr); | |
788 | amdvi_handle_evttail_write(s); | |
789 | break; | |
790 | case AMDVI_MMIO_EXCL_LIMIT: | |
791 | amdvi_mmio_reg_write(s, size, val, addr); | |
792 | amdvi_handle_excllim_write(s); | |
793 | break; | |
794 | /* PPR log base - unused for now */ | |
795 | case AMDVI_MMIO_PPR_BASE: | |
796 | amdvi_mmio_reg_write(s, size, val, addr); | |
797 | amdvi_handle_pprbase_write(s); | |
798 | break; | |
799 | /* PPR log head - also unused for now */ | |
800 | case AMDVI_MMIO_PPR_HEAD: | |
801 | amdvi_mmio_reg_write(s, size, val, addr); | |
802 | amdvi_handle_pprhead_write(s); | |
803 | break; | |
804 | /* PPR log tail - unused for now */ | |
805 | case AMDVI_MMIO_PPR_TAIL: | |
806 | amdvi_mmio_reg_write(s, size, val, addr); | |
807 | amdvi_handle_pprtail_write(s); | |
808 | break; | |
809 | } | |
810 | } | |
811 | ||
812 | static inline uint64_t amdvi_get_perms(uint64_t entry) | |
813 | { | |
814 | return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >> | |
815 | AMDVI_DEV_PERM_SHIFT; | |
816 | } | |
817 | ||
470506b5 | 818 | /* validate that reserved bits are honoured */ |
d29a09ca DK |
819 | static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, |
820 | uint64_t *dte) | |
821 | { | |
822 | if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED) | |
823 | || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED) | |
824 | || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) { | |
825 | amdvi_log_illegaldevtab_error(s, devid, | |
826 | s->devtab + | |
827 | devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); | |
828 | return false; | |
829 | } | |
830 | ||
470506b5 | 831 | return true; |
d29a09ca DK |
832 | } |
833 | ||
834 | /* get a device table entry given the devid */ | |
835 | static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) | |
836 | { | |
837 | uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; | |
838 | ||
839 | if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, | |
ba06fe8a | 840 | AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) { |
d29a09ca DK |
841 | trace_amdvi_dte_get_fail(s->devtab, offset); |
842 | /* log error accessing dte */ | |
843 | amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); | |
844 | return false; | |
845 | } | |
846 | ||
847 | *entry = le64_to_cpu(*entry); | |
848 | if (!amdvi_validate_dte(s, devid, entry)) { | |
849 | trace_amdvi_invalid_dte(entry[0]); | |
850 | return false; | |
851 | } | |
852 | ||
853 | return true; | |
854 | } | |
855 | ||
856 | /* get pte translation mode */ | |
857 | static inline uint8_t get_pte_translation_mode(uint64_t pte) | |
858 | { | |
859 | return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; | |
860 | } | |
861 | ||
862 | static inline uint64_t pte_override_page_mask(uint64_t pte) | |
863 | { | |
5d31e1e5 JPB |
864 | uint8_t page_mask = 13; |
865 | uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) >> 12; | |
d29a09ca DK |
866 | /* find the first zero bit */ |
867 | while (addr & 1) { | |
868 | page_mask++; | |
869 | addr = addr >> 1; | |
870 | } | |
871 | ||
872 | return ~((1ULL << page_mask) - 1); | |
873 | } | |
874 | ||
875 | static inline uint64_t pte_get_page_mask(uint64_t oldlevel) | |
876 | { | |
877 | return ~((1UL << ((oldlevel * 9) + 3)) - 1); | |
878 | } | |
879 | ||
880 | static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, | |
881 | uint16_t devid) | |
882 | { | |
883 | uint64_t pte; | |
884 | ||
ba06fe8a PMD |
885 | if (dma_memory_read(&address_space_memory, pte_addr, |
886 | &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { | |
d29a09ca DK |
887 | trace_amdvi_get_pte_hwerror(pte_addr); |
888 | amdvi_log_pagetab_error(s, devid, pte_addr, 0); | |
889 | pte = 0; | |
890 | return pte; | |
891 | } | |
892 | ||
893 | pte = le64_to_cpu(pte); | |
894 | return pte; | |
895 | } | |
896 | ||
897 | static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, | |
898 | IOMMUTLBEntry *ret, unsigned perms, | |
899 | hwaddr addr) | |
900 | { | |
901 | unsigned level, present, pte_perms, oldlevel; | |
902 | uint64_t pte = dte[0], pte_addr, page_mask; | |
903 | ||
904 | /* make sure the DTE has TV = 1 */ | |
905 | if (pte & AMDVI_DEV_TRANSLATION_VALID) { | |
906 | level = get_pte_translation_mode(pte); | |
907 | if (level >= 7) { | |
908 | trace_amdvi_mode_invalid(level, addr); | |
909 | return; | |
910 | } | |
911 | if (level == 0) { | |
912 | goto no_remap; | |
913 | } | |
914 | ||
915 | /* we are at the leaf page table or page table encodes a huge page */ | |
916 | while (level > 0) { | |
917 | pte_perms = amdvi_get_perms(pte); | |
918 | present = pte & 1; | |
919 | if (!present || perms != (perms & pte_perms)) { | |
920 | amdvi_page_fault(as->iommu_state, as->devfn, addr, perms); | |
921 | trace_amdvi_page_fault(addr); | |
922 | return; | |
923 | } | |
924 | ||
925 | /* go to the next lower level */ | |
926 | pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK; | |
927 | /* add offset and load pte */ | |
928 | pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3; | |
929 | pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn); | |
930 | if (!pte) { | |
931 | return; | |
932 | } | |
933 | oldlevel = level; | |
934 | level = get_pte_translation_mode(pte); | |
935 | if (level == 0x7) { | |
936 | break; | |
937 | } | |
938 | } | |
939 | ||
940 | if (level == 0x7) { | |
941 | page_mask = pte_override_page_mask(pte); | |
942 | } else { | |
943 | page_mask = pte_get_page_mask(oldlevel); | |
944 | } | |
945 | ||
946 | /* get access permissions from pte */ | |
947 | ret->iova = addr & page_mask; | |
948 | ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask; | |
949 | ret->addr_mask = ~page_mask; | |
950 | ret->perm = amdvi_get_perms(pte); | |
951 | return; | |
952 | } | |
953 | no_remap: | |
954 | ret->iova = addr & AMDVI_PAGE_MASK_4K; | |
955 | ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
956 | ret->addr_mask = ~AMDVI_PAGE_MASK_4K; | |
957 | ret->perm = amdvi_get_perms(pte); | |
958 | } | |
959 | ||
960 | static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, | |
961 | bool is_write, IOMMUTLBEntry *ret) | |
962 | { | |
963 | AMDVIState *s = as->iommu_state; | |
964 | uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn); | |
965 | AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid); | |
966 | uint64_t entry[4]; | |
967 | ||
968 | if (iotlb_entry) { | |
969 | trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
970 | PCI_FUNC(devid), addr, iotlb_entry->translated_addr); | |
971 | ret->iova = addr & ~iotlb_entry->page_mask; | |
972 | ret->translated_addr = iotlb_entry->translated_addr; | |
973 | ret->addr_mask = iotlb_entry->page_mask; | |
974 | ret->perm = iotlb_entry->perms; | |
975 | return; | |
976 | } | |
977 | ||
d29a09ca | 978 | if (!amdvi_get_dte(s, devid, entry)) { |
470506b5 BS |
979 | return; |
980 | } | |
981 | ||
982 | /* devices with V = 0 are not translated */ | |
983 | if (!(entry[0] & AMDVI_DEV_VALID)) { | |
d29a09ca DK |
984 | goto out; |
985 | } | |
986 | ||
987 | amdvi_page_walk(as, entry, ret, | |
988 | is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr); | |
989 | ||
990 | amdvi_update_iotlb(s, devid, addr, *ret, | |
991 | entry[1] & AMDVI_DEV_DOMID_ID_MASK); | |
992 | return; | |
993 | ||
994 | out: | |
995 | ret->iova = addr & AMDVI_PAGE_MASK_4K; | |
996 | ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
997 | ret->addr_mask = ~AMDVI_PAGE_MASK_4K; | |
998 | ret->perm = IOMMU_RW; | |
999 | } | |
1000 | ||
1001 | static inline bool amdvi_is_interrupt_addr(hwaddr addr) | |
1002 | { | |
1003 | return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST; | |
1004 | } | |
1005 | ||
3df9d748 | 1006 | static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr, |
2c91bcf2 | 1007 | IOMMUAccessFlags flag, int iommu_idx) |
d29a09ca DK |
1008 | { |
1009 | AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); | |
1010 | AMDVIState *s = as->iommu_state; | |
1011 | IOMMUTLBEntry ret = { | |
1012 | .target_as = &address_space_memory, | |
1013 | .iova = addr, | |
1014 | .translated_addr = 0, | |
1015 | .addr_mask = ~(hwaddr)0, | |
1016 | .perm = IOMMU_NONE | |
1017 | }; | |
1018 | ||
1019 | if (!s->enabled) { | |
1020 | /* AMDVI disabled - corresponds to iommu=off not | |
1021 | * failure to provide any parameter | |
1022 | */ | |
1023 | ret.iova = addr & AMDVI_PAGE_MASK_4K; | |
1024 | ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
1025 | ret.addr_mask = ~AMDVI_PAGE_MASK_4K; | |
1026 | ret.perm = IOMMU_RW; | |
1027 | return ret; | |
1028 | } else if (amdvi_is_interrupt_addr(addr)) { | |
1029 | ret.iova = addr & AMDVI_PAGE_MASK_4K; | |
1030 | ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
1031 | ret.addr_mask = ~AMDVI_PAGE_MASK_4K; | |
1032 | ret.perm = IOMMU_WO; | |
1033 | return ret; | |
1034 | } | |
1035 | ||
bf55b7af | 1036 | amdvi_do_translate(as, addr, flag & IOMMU_WO, &ret); |
d29a09ca DK |
1037 | trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn), |
1038 | PCI_FUNC(as->devfn), addr, ret.translated_addr); | |
1039 | return ret; | |
1040 | } | |
1041 | ||
b44159fe BS |
1042 | static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte, |
1043 | union irte *irte, uint16_t devid) | |
1044 | { | |
1045 | uint64_t irte_root, offset; | |
1046 | ||
1047 | irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK; | |
1048 | offset = (origin->data & AMDVI_IRTE_OFFSET) << 2; | |
1049 | ||
1050 | trace_amdvi_ir_irte(irte_root, offset); | |
1051 | ||
1052 | if (dma_memory_read(&address_space_memory, irte_root + offset, | |
ba06fe8a | 1053 | irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { |
b44159fe BS |
1054 | trace_amdvi_ir_err("failed to get irte"); |
1055 | return -AMDVI_IR_GET_IRTE; | |
1056 | } | |
1057 | ||
1058 | trace_amdvi_ir_irte_val(irte->val); | |
1059 | ||
1060 | return 0; | |
1061 | } | |
1062 | ||
1063 | static int amdvi_int_remap_legacy(AMDVIState *iommu, | |
1064 | MSIMessage *origin, | |
1065 | MSIMessage *translated, | |
1066 | uint64_t *dte, | |
1067 | X86IOMMUIrq *irq, | |
1068 | uint16_t sid) | |
1069 | { | |
1070 | int ret; | |
1071 | union irte irte; | |
1072 | ||
1073 | /* get interrupt remapping table */ | |
1074 | ret = amdvi_get_irte(iommu, origin, dte, &irte, sid); | |
1075 | if (ret < 0) { | |
1076 | return ret; | |
1077 | } | |
1078 | ||
1079 | if (!irte.fields.valid) { | |
1080 | trace_amdvi_ir_target_abort("RemapEn is disabled"); | |
1081 | return -AMDVI_IR_TARGET_ABORT; | |
1082 | } | |
1083 | ||
1084 | if (irte.fields.guest_mode) { | |
1085 | error_report_once("guest mode is not zero"); | |
1086 | return -AMDVI_IR_ERR; | |
1087 | } | |
1088 | ||
1089 | if (irte.fields.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) { | |
1090 | error_report_once("reserved int_type"); | |
1091 | return -AMDVI_IR_ERR; | |
1092 | } | |
1093 | ||
1094 | irq->delivery_mode = irte.fields.int_type; | |
1095 | irq->vector = irte.fields.vector; | |
1096 | irq->dest_mode = irte.fields.dm; | |
1097 | irq->redir_hint = irte.fields.rq_eoi; | |
1098 | irq->dest = irte.fields.destination; | |
1099 | ||
1100 | return 0; | |
1101 | } | |
1102 | ||
135f866e BS |
1103 | static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte, |
1104 | struct irte_ga *irte, uint16_t devid) | |
1105 | { | |
1106 | uint64_t irte_root, offset; | |
1107 | ||
1108 | irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK; | |
1109 | offset = (origin->data & AMDVI_IRTE_OFFSET) << 4; | |
1110 | trace_amdvi_ir_irte(irte_root, offset); | |
1111 | ||
1112 | if (dma_memory_read(&address_space_memory, irte_root + offset, | |
ba06fe8a | 1113 | irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { |
135f866e BS |
1114 | trace_amdvi_ir_err("failed to get irte_ga"); |
1115 | return -AMDVI_IR_GET_IRTE; | |
1116 | } | |
1117 | ||
1118 | trace_amdvi_ir_irte_ga_val(irte->hi.val, irte->lo.val); | |
1119 | return 0; | |
1120 | } | |
1121 | ||
1122 | static int amdvi_int_remap_ga(AMDVIState *iommu, | |
1123 | MSIMessage *origin, | |
1124 | MSIMessage *translated, | |
1125 | uint64_t *dte, | |
1126 | X86IOMMUIrq *irq, | |
1127 | uint16_t sid) | |
1128 | { | |
1129 | int ret; | |
1130 | struct irte_ga irte; | |
1131 | ||
1132 | /* get interrupt remapping table */ | |
1133 | ret = amdvi_get_irte_ga(iommu, origin, dte, &irte, sid); | |
1134 | if (ret < 0) { | |
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | if (!irte.lo.fields_remap.valid) { | |
1139 | trace_amdvi_ir_target_abort("RemapEn is disabled"); | |
1140 | return -AMDVI_IR_TARGET_ABORT; | |
1141 | } | |
1142 | ||
1143 | if (irte.lo.fields_remap.guest_mode) { | |
1144 | error_report_once("guest mode is not zero"); | |
1145 | return -AMDVI_IR_ERR; | |
1146 | } | |
1147 | ||
1148 | if (irte.lo.fields_remap.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) { | |
1149 | error_report_once("reserved int_type is set"); | |
1150 | return -AMDVI_IR_ERR; | |
1151 | } | |
1152 | ||
1153 | irq->delivery_mode = irte.lo.fields_remap.int_type; | |
1154 | irq->vector = irte.hi.fields.vector; | |
1155 | irq->dest_mode = irte.lo.fields_remap.dm; | |
1156 | irq->redir_hint = irte.lo.fields_remap.rq_eoi; | |
1157 | irq->dest = irte.lo.fields_remap.destination; | |
1158 | ||
1159 | return 0; | |
1160 | } | |
1161 | ||
b44159fe BS |
1162 | static int __amdvi_int_remap_msi(AMDVIState *iommu, |
1163 | MSIMessage *origin, | |
1164 | MSIMessage *translated, | |
1165 | uint64_t *dte, | |
1166 | X86IOMMUIrq *irq, | |
1167 | uint16_t sid) | |
1168 | { | |
135f866e | 1169 | int ret; |
b44159fe BS |
1170 | uint8_t int_ctl; |
1171 | ||
1172 | int_ctl = (dte[2] >> AMDVI_IR_INTCTL_SHIFT) & 3; | |
1173 | trace_amdvi_ir_intctl(int_ctl); | |
1174 | ||
1175 | switch (int_ctl) { | |
1176 | case AMDVI_IR_INTCTL_PASS: | |
1177 | memcpy(translated, origin, sizeof(*origin)); | |
1178 | return 0; | |
1179 | case AMDVI_IR_INTCTL_REMAP: | |
1180 | break; | |
1181 | case AMDVI_IR_INTCTL_ABORT: | |
1182 | trace_amdvi_ir_target_abort("int_ctl abort"); | |
1183 | return -AMDVI_IR_TARGET_ABORT; | |
1184 | default: | |
1185 | trace_amdvi_ir_err("int_ctl reserved"); | |
1186 | return -AMDVI_IR_ERR; | |
1187 | } | |
1188 | ||
135f866e BS |
1189 | if (iommu->ga_enabled) { |
1190 | ret = amdvi_int_remap_ga(iommu, origin, translated, dte, irq, sid); | |
1191 | } else { | |
1192 | ret = amdvi_int_remap_legacy(iommu, origin, translated, dte, irq, sid); | |
1193 | } | |
1194 | ||
1195 | return ret; | |
b44159fe BS |
1196 | } |
1197 | ||
577c470f BS |
1198 | /* Interrupt remapping for MSI/MSI-X entry */ |
1199 | static int amdvi_int_remap_msi(AMDVIState *iommu, | |
1200 | MSIMessage *origin, | |
1201 | MSIMessage *translated, | |
1202 | uint16_t sid) | |
1203 | { | |
b44159fe BS |
1204 | int ret = 0; |
1205 | uint64_t pass = 0; | |
1206 | uint64_t dte[4] = { 0 }; | |
1207 | X86IOMMUIrq irq = { 0 }; | |
1208 | uint8_t dest_mode, delivery_mode; | |
1209 | ||
577c470f BS |
1210 | assert(origin && translated); |
1211 | ||
b44159fe BS |
1212 | /* |
1213 | * When IOMMU is enabled, interrupt remap request will come either from | |
1214 | * IO-APIC or PCI device. If interrupt is from PCI device then it will | |
1215 | * have a valid requester id but if the interrupt is from IO-APIC | |
1216 | * then requester id will be invalid. | |
1217 | */ | |
1218 | if (sid == X86_IOMMU_SID_INVALID) { | |
1219 | sid = AMDVI_IOAPIC_SB_DEVID; | |
1220 | } | |
1221 | ||
577c470f BS |
1222 | trace_amdvi_ir_remap_msi_req(origin->address, origin->data, sid); |
1223 | ||
b44159fe BS |
1224 | /* check if device table entry is set before we go further. */ |
1225 | if (!iommu || !iommu->devtab_len) { | |
1226 | memcpy(translated, origin, sizeof(*origin)); | |
1227 | goto out; | |
1228 | } | |
1229 | ||
1230 | if (!amdvi_get_dte(iommu, sid, dte)) { | |
1231 | return -AMDVI_IR_ERR; | |
1232 | } | |
1233 | ||
1234 | /* Check if IR is enabled in DTE */ | |
1235 | if (!(dte[2] & AMDVI_IR_REMAP_ENABLE)) { | |
577c470f BS |
1236 | memcpy(translated, origin, sizeof(*origin)); |
1237 | goto out; | |
1238 | } | |
1239 | ||
b44159fe | 1240 | /* validate that we are configure with intremap=on */ |
a924b3d8 | 1241 | if (!x86_iommu_ir_supported(X86_IOMMU_DEVICE(iommu))) { |
b44159fe BS |
1242 | trace_amdvi_err("Interrupt remapping is enabled in the guest but " |
1243 | "not in the host. Use intremap=on to enable interrupt " | |
1244 | "remapping in amd-iommu."); | |
1245 | return -AMDVI_IR_ERR; | |
1246 | } | |
1247 | ||
577c470f BS |
1248 | if (origin->address & AMDVI_MSI_ADDR_HI_MASK) { |
1249 | trace_amdvi_err("MSI address high 32 bits non-zero when " | |
1250 | "Interrupt Remapping enabled."); | |
1251 | return -AMDVI_IR_ERR; | |
1252 | } | |
1253 | ||
1254 | if ((origin->address & AMDVI_MSI_ADDR_LO_MASK) != APIC_DEFAULT_ADDRESS) { | |
1255 | trace_amdvi_err("MSI is not from IOAPIC."); | |
1256 | return -AMDVI_IR_ERR; | |
1257 | } | |
1258 | ||
b44159fe BS |
1259 | /* |
1260 | * The MSI data register [10:8] are used to get the upstream interrupt type. | |
1261 | * | |
1262 | * See MSI/MSI-X format: | |
1263 | * https://pdfs.semanticscholar.org/presentation/9420/c279e942eca568157711ef5c92b800c40a79.pdf | |
1264 | * (page 5) | |
1265 | */ | |
1266 | delivery_mode = (origin->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 7; | |
1267 | ||
1268 | switch (delivery_mode) { | |
1269 | case AMDVI_IOAPIC_INT_TYPE_FIXED: | |
1270 | case AMDVI_IOAPIC_INT_TYPE_ARBITRATED: | |
1271 | trace_amdvi_ir_delivery_mode("fixed/arbitrated"); | |
1272 | ret = __amdvi_int_remap_msi(iommu, origin, translated, dte, &irq, sid); | |
1273 | if (ret < 0) { | |
1274 | goto remap_fail; | |
1275 | } else { | |
1276 | /* Translate IRQ to MSI messages */ | |
1277 | x86_iommu_irq_to_msi_message(&irq, translated); | |
1278 | goto out; | |
1279 | } | |
1280 | break; | |
1281 | case AMDVI_IOAPIC_INT_TYPE_SMI: | |
1282 | error_report("SMI is not supported!"); | |
1283 | ret = -AMDVI_IR_ERR; | |
1284 | break; | |
1285 | case AMDVI_IOAPIC_INT_TYPE_NMI: | |
1286 | pass = dte[3] & AMDVI_DEV_NMI_PASS_MASK; | |
1287 | trace_amdvi_ir_delivery_mode("nmi"); | |
1288 | break; | |
1289 | case AMDVI_IOAPIC_INT_TYPE_INIT: | |
1290 | pass = dte[3] & AMDVI_DEV_INT_PASS_MASK; | |
1291 | trace_amdvi_ir_delivery_mode("init"); | |
1292 | break; | |
1293 | case AMDVI_IOAPIC_INT_TYPE_EINT: | |
1294 | pass = dte[3] & AMDVI_DEV_EINT_PASS_MASK; | |
1295 | trace_amdvi_ir_delivery_mode("eint"); | |
1296 | break; | |
1297 | default: | |
1298 | trace_amdvi_ir_delivery_mode("unsupported delivery_mode"); | |
1299 | ret = -AMDVI_IR_ERR; | |
1300 | break; | |
1301 | } | |
1302 | ||
1303 | if (ret < 0) { | |
1304 | goto remap_fail; | |
1305 | } | |
1306 | ||
1307 | /* | |
1308 | * The MSI address register bit[2] is used to get the destination | |
1309 | * mode. The dest_mode 1 is valid for fixed and arbitrated interrupts | |
1310 | * only. | |
1311 | */ | |
1312 | dest_mode = (origin->address >> MSI_ADDR_DEST_MODE_SHIFT) & 1; | |
1313 | if (dest_mode) { | |
1314 | trace_amdvi_ir_err("invalid dest_mode"); | |
1315 | ret = -AMDVI_IR_ERR; | |
1316 | goto remap_fail; | |
1317 | } | |
1318 | ||
1319 | if (pass) { | |
1320 | memcpy(translated, origin, sizeof(*origin)); | |
1321 | } else { | |
1322 | trace_amdvi_ir_err("passthrough is not enabled"); | |
1323 | ret = -AMDVI_IR_ERR; | |
1324 | goto remap_fail; | |
1325 | } | |
1326 | ||
577c470f BS |
1327 | out: |
1328 | trace_amdvi_ir_remap_msi(origin->address, origin->data, | |
1329 | translated->address, translated->data); | |
1330 | return 0; | |
b44159fe BS |
1331 | |
1332 | remap_fail: | |
1333 | return ret; | |
577c470f BS |
1334 | } |
1335 | ||
1336 | static int amdvi_int_remap(X86IOMMUState *iommu, | |
1337 | MSIMessage *origin, | |
1338 | MSIMessage *translated, | |
1339 | uint16_t sid) | |
1340 | { | |
1341 | return amdvi_int_remap_msi(AMD_IOMMU_DEVICE(iommu), origin, | |
1342 | translated, sid); | |
1343 | } | |
1344 | ||
1345 | static MemTxResult amdvi_mem_ir_write(void *opaque, hwaddr addr, | |
1346 | uint64_t value, unsigned size, | |
1347 | MemTxAttrs attrs) | |
1348 | { | |
1349 | int ret; | |
1350 | MSIMessage from = { 0, 0 }, to = { 0, 0 }; | |
1351 | uint16_t sid = AMDVI_IOAPIC_SB_DEVID; | |
1352 | ||
1353 | from.address = (uint64_t) addr + AMDVI_INT_ADDR_FIRST; | |
1354 | from.data = (uint32_t) value; | |
1355 | ||
1356 | trace_amdvi_mem_ir_write_req(addr, value, size); | |
1357 | ||
1358 | if (!attrs.unspecified) { | |
1359 | /* We have explicit Source ID */ | |
1360 | sid = attrs.requester_id; | |
1361 | } | |
1362 | ||
1363 | ret = amdvi_int_remap_msi(opaque, &from, &to, sid); | |
1364 | if (ret < 0) { | |
1365 | /* TODO: log the event using IOMMU log event interface */ | |
1366 | error_report_once("failed to remap interrupt from devid 0x%x", sid); | |
1367 | return MEMTX_ERROR; | |
1368 | } | |
1369 | ||
1370 | apic_get_class()->send_msi(&to); | |
1371 | ||
1372 | trace_amdvi_mem_ir_write(to.address, to.data); | |
1373 | return MEMTX_OK; | |
1374 | } | |
1375 | ||
1376 | static MemTxResult amdvi_mem_ir_read(void *opaque, hwaddr addr, | |
1377 | uint64_t *data, unsigned size, | |
1378 | MemTxAttrs attrs) | |
1379 | { | |
1380 | return MEMTX_OK; | |
1381 | } | |
1382 | ||
1383 | static const MemoryRegionOps amdvi_ir_ops = { | |
1384 | .read_with_attrs = amdvi_mem_ir_read, | |
1385 | .write_with_attrs = amdvi_mem_ir_write, | |
1386 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1387 | .impl = { | |
1388 | .min_access_size = 4, | |
1389 | .max_access_size = 4, | |
1390 | }, | |
1391 | .valid = { | |
1392 | .min_access_size = 4, | |
1393 | .max_access_size = 4, | |
1394 | } | |
1395 | }; | |
1396 | ||
d29a09ca DK |
1397 | static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) |
1398 | { | |
53244386 | 1399 | char name[128]; |
d29a09ca | 1400 | AMDVIState *s = opaque; |
53244386 | 1401 | AMDVIAddressSpace **iommu_as, *amdvi_dev_as; |
d29a09ca DK |
1402 | int bus_num = pci_bus_num(bus); |
1403 | ||
1404 | iommu_as = s->address_spaces[bus_num]; | |
1405 | ||
1406 | /* allocate memory during the first run */ | |
1407 | if (!iommu_as) { | |
1408 | iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX); | |
1409 | s->address_spaces[bus_num] = iommu_as; | |
1410 | } | |
1411 | ||
1412 | /* set up AMD-Vi region */ | |
1413 | if (!iommu_as[devfn]) { | |
53244386 BS |
1414 | snprintf(name, sizeof(name), "amd_iommu_devfn_%d", devfn); |
1415 | ||
d29a09ca DK |
1416 | iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace)); |
1417 | iommu_as[devfn]->bus_num = (uint8_t)bus_num; | |
1418 | iommu_as[devfn]->devfn = (uint8_t)devfn; | |
1419 | iommu_as[devfn]->iommu_state = s; | |
1420 | ||
53244386 BS |
1421 | amdvi_dev_as = iommu_as[devfn]; |
1422 | ||
1423 | /* | |
1424 | * Memory region relationships looks like (Address range shows | |
1425 | * only lower 32 bits to make it short in length...): | |
1426 | * | |
1427 | * |-----------------+-------------------+----------| | |
1428 | * | Name | Address range | Priority | | |
1429 | * |-----------------+-------------------+----------+ | |
1430 | * | amdvi_root | 00000000-ffffffff | 0 | | |
1431 | * | amdvi_iommu | 00000000-ffffffff | 1 | | |
577c470f | 1432 | * | amdvi_iommu_ir | fee00000-feefffff | 64 | |
53244386 BS |
1433 | * |-----------------+-------------------+----------| |
1434 | */ | |
1435 | memory_region_init_iommu(&amdvi_dev_as->iommu, | |
1436 | sizeof(amdvi_dev_as->iommu), | |
1221a474 AK |
1437 | TYPE_AMD_IOMMU_MEMORY_REGION, |
1438 | OBJECT(s), | |
53244386 BS |
1439 | "amd_iommu", UINT64_MAX); |
1440 | memory_region_init(&amdvi_dev_as->root, OBJECT(s), | |
1441 | "amdvi_root", UINT64_MAX); | |
1442 | address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name); | |
577c470f BS |
1443 | memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s), |
1444 | &amdvi_ir_ops, s, "amd_iommu_ir", | |
1445 | AMDVI_INT_ADDR_SIZE); | |
1446 | memory_region_add_subregion_overlap(&amdvi_dev_as->root, | |
1447 | AMDVI_INT_ADDR_FIRST, | |
1448 | &amdvi_dev_as->iommu_ir, | |
1449 | 64); | |
53244386 BS |
1450 | memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, |
1451 | MEMORY_REGION(&amdvi_dev_as->iommu), | |
1452 | 1); | |
d29a09ca DK |
1453 | } |
1454 | return &iommu_as[devfn]->as; | |
1455 | } | |
1456 | ||
1457 | static const MemoryRegionOps mmio_mem_ops = { | |
1458 | .read = amdvi_mmio_read, | |
1459 | .write = amdvi_mmio_write, | |
1460 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1461 | .impl = { | |
1462 | .min_access_size = 1, | |
1463 | .max_access_size = 8, | |
1464 | .unaligned = false, | |
1465 | }, | |
1466 | .valid = { | |
1467 | .min_access_size = 1, | |
1468 | .max_access_size = 8, | |
1469 | } | |
1470 | }; | |
1471 | ||
549d4005 EA |
1472 | static int amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, |
1473 | IOMMUNotifierFlag old, | |
1474 | IOMMUNotifierFlag new, | |
1475 | Error **errp) | |
d29a09ca DK |
1476 | { |
1477 | AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); | |
1478 | ||
a3276f78 | 1479 | if (new & IOMMU_NOTIFIER_MAP) { |
549d4005 EA |
1480 | error_setg(errp, |
1481 | "device %02x.%02x.%x requires iommu notifier which is not " | |
1482 | "currently supported", as->bus_num, PCI_SLOT(as->devfn), | |
1483 | PCI_FUNC(as->devfn)); | |
1484 | return -EINVAL; | |
a3276f78 | 1485 | } |
549d4005 | 1486 | return 0; |
d29a09ca DK |
1487 | } |
1488 | ||
1489 | static void amdvi_init(AMDVIState *s) | |
1490 | { | |
1491 | amdvi_iotlb_reset(s); | |
1492 | ||
d29a09ca DK |
1493 | s->devtab_len = 0; |
1494 | s->cmdbuf_len = 0; | |
1495 | s->cmdbuf_head = 0; | |
1496 | s->cmdbuf_tail = 0; | |
1497 | s->evtlog_head = 0; | |
1498 | s->evtlog_tail = 0; | |
1499 | s->excl_enabled = false; | |
1500 | s->excl_allow = false; | |
1501 | s->mmio_enabled = false; | |
1502 | s->enabled = false; | |
1503 | s->ats_enabled = false; | |
1504 | s->cmdbuf_enabled = false; | |
1505 | ||
1506 | /* reset MMIO */ | |
1507 | memset(s->mmior, 0, AMDVI_MMIO_SIZE); | |
1508 | amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, | |
1509 | 0xffffffffffffffef, 0); | |
1510 | amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); | |
1511 | ||
1512 | /* reset device ident */ | |
1513 | pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD); | |
1514 | pci_config_set_prog_interface(s->pci.dev.config, 00); | |
1515 | pci_config_set_device_id(s->pci.dev.config, s->devid); | |
1516 | pci_config_set_class(s->pci.dev.config, 0x0806); | |
1517 | ||
1518 | /* reset AMDVI specific capabilities, all r/o */ | |
1519 | pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES); | |
1520 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, | |
1521 | s->mmio.addr & ~(0xffff0000)); | |
1522 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, | |
1523 | (s->mmio.addr & ~(0xffff)) >> 16); | |
1524 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE, | |
1525 | 0xff000000); | |
1526 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0); | |
1527 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, | |
1528 | AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); | |
1529 | } | |
1530 | ||
8f6b7309 | 1531 | static void amdvi_sysbus_reset(DeviceState *dev) |
d29a09ca DK |
1532 | { |
1533 | AMDVIState *s = AMD_IOMMU_DEVICE(dev); | |
1534 | ||
1535 | msi_reset(&s->pci.dev); | |
1536 | amdvi_init(s); | |
1537 | } | |
1538 | ||
8f6b7309 | 1539 | static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) |
d29a09ca | 1540 | { |
1d5b128c | 1541 | int ret = 0; |
d29a09ca | 1542 | AMDVIState *s = AMD_IOMMU_DEVICE(dev); |
ef0e8fc7 | 1543 | MachineState *ms = MACHINE(qdev_get_machine()); |
29396ed9 | 1544 | PCMachineState *pcms = PC_MACHINE(ms); |
f0bb276b | 1545 | X86MachineState *x86ms = X86_MACHINE(ms); |
29396ed9 | 1546 | PCIBus *bus = pcms->bus; |
ef0e8fc7 | 1547 | |
d29a09ca DK |
1548 | s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, |
1549 | amdvi_uint64_equal, g_free, g_free); | |
1550 | ||
1551 | /* This device should take care of IOMMU PCI properties */ | |
475fc97d MA |
1552 | if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { |
1553 | return; | |
1554 | } | |
9a7c2a59 | 1555 | ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, |
c9b13a51 | 1556 | AMDVI_CAPAB_SIZE, errp); |
9a7c2a59 MZ |
1557 | if (ret < 0) { |
1558 | return; | |
1559 | } | |
1560 | s->capab_offset = ret; | |
1561 | ||
1562 | ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, | |
c9b13a51 | 1563 | AMDVI_CAPAB_REG_SIZE, errp); |
9a7c2a59 MZ |
1564 | if (ret < 0) { |
1565 | return; | |
1566 | } | |
1567 | ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, | |
c9b13a51 | 1568 | AMDVI_CAPAB_REG_SIZE, errp); |
9a7c2a59 MZ |
1569 | if (ret < 0) { |
1570 | return; | |
1571 | } | |
d29a09ca | 1572 | |
577c470f | 1573 | /* Pseudo address space under root PCI bus. */ |
f0bb276b | 1574 | x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); |
577c470f | 1575 | |
d29a09ca DK |
1576 | /* set up MMIO */ |
1577 | memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", | |
1578 | AMDVI_MMIO_SIZE); | |
1579 | ||
1580 | sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); | |
1581 | sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); | |
1582 | pci_setup_iommu(bus, amdvi_host_dma_iommu, s); | |
475fc97d | 1583 | s->devid = object_property_get_int(OBJECT(&s->pci), "addr", &error_abort); |
c9b13a51 | 1584 | msi_init(&s->pci.dev, 0, 1, true, false, errp); |
d29a09ca DK |
1585 | amdvi_init(s); |
1586 | } | |
1587 | ||
8f6b7309 | 1588 | static const VMStateDescription vmstate_amdvi_sysbus = { |
d29a09ca DK |
1589 | .name = "amd-iommu", |
1590 | .unmigratable = 1 | |
1591 | }; | |
1592 | ||
8f6b7309 | 1593 | static void amdvi_sysbus_instance_init(Object *klass) |
d29a09ca DK |
1594 | { |
1595 | AMDVIState *s = AMD_IOMMU_DEVICE(klass); | |
1596 | ||
1597 | object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); | |
1598 | } | |
1599 | ||
8f6b7309 | 1600 | static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) |
d29a09ca DK |
1601 | { |
1602 | DeviceClass *dc = DEVICE_CLASS(klass); | |
30c60f77 | 1603 | X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass); |
d29a09ca | 1604 | |
8f6b7309 PMD |
1605 | dc->reset = amdvi_sysbus_reset; |
1606 | dc->vmsd = &vmstate_amdvi_sysbus; | |
d29a09ca | 1607 | dc->hotpluggable = false; |
8f6b7309 | 1608 | dc_class->realize = amdvi_sysbus_realize; |
577c470f | 1609 | dc_class->int_remap = amdvi_int_remap; |
8ab5700c | 1610 | /* Supported by the pc-q35-* machine types */ |
e4f4fb1e | 1611 | dc->user_creatable = true; |
1ec202c9 EE |
1612 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
1613 | dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; | |
d29a09ca DK |
1614 | } |
1615 | ||
8f6b7309 | 1616 | static const TypeInfo amdvi_sysbus = { |
d29a09ca DK |
1617 | .name = TYPE_AMD_IOMMU_DEVICE, |
1618 | .parent = TYPE_X86_IOMMU_DEVICE, | |
1619 | .instance_size = sizeof(AMDVIState), | |
8f6b7309 PMD |
1620 | .instance_init = amdvi_sysbus_instance_init, |
1621 | .class_init = amdvi_sysbus_class_init | |
d29a09ca DK |
1622 | }; |
1623 | ||
64bc656d PMD |
1624 | static void amdvi_pci_class_init(ObjectClass *klass, void *data) |
1625 | { | |
1626 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1627 | ||
1628 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); | |
1629 | dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; | |
1630 | } | |
1631 | ||
64cba40c | 1632 | static const TypeInfo amdvi_pci = { |
e91830b1 | 1633 | .name = TYPE_AMD_IOMMU_PCI, |
d29a09ca DK |
1634 | .parent = TYPE_PCI_DEVICE, |
1635 | .instance_size = sizeof(AMDVIPCIState), | |
64bc656d | 1636 | .class_init = amdvi_pci_class_init, |
fd3b02c8 EH |
1637 | .interfaces = (InterfaceInfo[]) { |
1638 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
1639 | { }, | |
1640 | }, | |
d29a09ca DK |
1641 | }; |
1642 | ||
1221a474 AK |
1643 | static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data) |
1644 | { | |
1645 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); | |
1646 | ||
1647 | imrc->translate = amdvi_translate; | |
1648 | imrc->notify_flag_changed = amdvi_iommu_notify_flag_changed; | |
1649 | } | |
1650 | ||
1651 | static const TypeInfo amdvi_iommu_memory_region_info = { | |
1652 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
1653 | .name = TYPE_AMD_IOMMU_MEMORY_REGION, | |
1654 | .class_init = amdvi_iommu_memory_region_class_init, | |
1655 | }; | |
1656 | ||
64cba40c | 1657 | static void amdvi_register_types(void) |
d29a09ca | 1658 | { |
64cba40c | 1659 | type_register_static(&amdvi_pci); |
8f6b7309 | 1660 | type_register_static(&amdvi_sysbus); |
1221a474 | 1661 | type_register_static(&amdvi_iommu_memory_region_info); |
d29a09ca DK |
1662 | } |
1663 | ||
64cba40c | 1664 | type_init(amdvi_register_types); |