]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
7b31bbc2 | 19 | #include "qemu/osdep.h" |
da34e65c | 20 | #include "qapi/error.h" |
777872e5 | 21 | #ifndef _WIN32 |
d5a8f07c | 22 | #endif |
54936004 | 23 | |
f348b6d1 | 24 | #include "qemu/cutils.h" |
6180a181 | 25 | #include "cpu.h" |
63c91552 | 26 | #include "exec/exec-all.h" |
b67d9a52 | 27 | #include "tcg.h" |
741da0d3 | 28 | #include "hw/qdev-core.h" |
4485bd26 | 29 | #if !defined(CONFIG_USER_ONLY) |
47c8ca53 | 30 | #include "hw/boards.h" |
33c11879 | 31 | #include "hw/xen/xen.h" |
4485bd26 | 32 | #endif |
9c17d615 | 33 | #include "sysemu/kvm.h" |
2ff3de68 | 34 | #include "sysemu/sysemu.h" |
1de7afc9 PB |
35 | #include "qemu/timer.h" |
36 | #include "qemu/config-file.h" | |
75a34036 | 37 | #include "qemu/error-report.h" |
53a5960a | 38 | #if defined(CONFIG_USER_ONLY) |
a9c94277 | 39 | #include "qemu.h" |
432d268c | 40 | #else /* !CONFIG_USER_ONLY */ |
741da0d3 PB |
41 | #include "hw/hw.h" |
42 | #include "exec/memory.h" | |
df43d49c | 43 | #include "exec/ioport.h" |
741da0d3 PB |
44 | #include "sysemu/dma.h" |
45 | #include "exec/address-spaces.h" | |
9c17d615 | 46 | #include "sysemu/xen-mapcache.h" |
0ab8ed18 | 47 | #include "trace-root.h" |
d3a5038c | 48 | |
e2fa71f5 DDAG |
49 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE |
50 | #include <fcntl.h> | |
51 | #include <linux/falloc.h> | |
52 | #endif | |
53 | ||
53a5960a | 54 | #endif |
0d6d3c87 | 55 | #include "exec/cpu-all.h" |
0dc3f44a | 56 | #include "qemu/rcu_queue.h" |
4840f10e | 57 | #include "qemu/main-loop.h" |
5b6dd868 | 58 | #include "translate-all.h" |
7615936e | 59 | #include "sysemu/replay.h" |
0cac1b66 | 60 | |
022c62cb | 61 | #include "exec/memory-internal.h" |
220c3ebd | 62 | #include "exec/ram_addr.h" |
508127e2 | 63 | #include "exec/log.h" |
67d95c15 | 64 | |
9dfeca7c BR |
65 | #include "migration/vmstate.h" |
66 | ||
b35ba30f | 67 | #include "qemu/range.h" |
794e8f30 MT |
68 | #ifndef _WIN32 |
69 | #include "qemu/mmap-alloc.h" | |
70 | #endif | |
b35ba30f | 71 | |
db7b5426 | 72 | //#define DEBUG_SUBPAGE |
1196be37 | 73 | |
e2eef170 | 74 | #if !defined(CONFIG_USER_ONLY) |
0dc3f44a MD |
75 | /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes |
76 | * are protected by the ramlist lock. | |
77 | */ | |
0d53d9fe | 78 | RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
79 | |
80 | static MemoryRegion *system_memory; | |
309cb471 | 81 | static MemoryRegion *system_io; |
62152b8a | 82 | |
f6790af6 AK |
83 | AddressSpace address_space_io; |
84 | AddressSpace address_space_memory; | |
2673a5da | 85 | |
0844e007 | 86 | MemoryRegion io_mem_rom, io_mem_notdirty; |
acc9d80b | 87 | static MemoryRegion io_mem_unassigned; |
0e0df1e2 | 88 | |
7bd4f430 PB |
89 | /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ |
90 | #define RAM_PREALLOC (1 << 0) | |
91 | ||
dbcb8981 PB |
92 | /* RAM is mmap-ed with MAP_SHARED */ |
93 | #define RAM_SHARED (1 << 1) | |
94 | ||
62be4e3a MT |
95 | /* Only a portion of RAM (used_length) is actually used, and migrated. |
96 | * This used_length size can change across reboots. | |
97 | */ | |
98 | #define RAM_RESIZEABLE (1 << 2) | |
99 | ||
e2eef170 | 100 | #endif |
9fa3e853 | 101 | |
20bccb82 PM |
102 | #ifdef TARGET_PAGE_BITS_VARY |
103 | int target_page_bits; | |
104 | bool target_page_bits_decided; | |
105 | #endif | |
106 | ||
bdc44640 | 107 | struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); |
6a00d601 FB |
108 | /* current CPU in the current thread. It is only valid inside |
109 | cpu_exec() */ | |
f240eb6f | 110 | __thread CPUState *current_cpu; |
2e70f6ef | 111 | /* 0 = Do not count executed instructions. |
bf20dc07 | 112 | 1 = Precise instruction counting. |
2e70f6ef | 113 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 114 | int use_icount; |
6a00d601 | 115 | |
20bccb82 PM |
116 | bool set_preferred_target_page_bits(int bits) |
117 | { | |
118 | /* The target page size is the lowest common denominator for all | |
119 | * the CPUs in the system, so we can only make it smaller, never | |
120 | * larger. And we can't make it smaller once we've committed to | |
121 | * a particular size. | |
122 | */ | |
123 | #ifdef TARGET_PAGE_BITS_VARY | |
124 | assert(bits >= TARGET_PAGE_BITS_MIN); | |
125 | if (target_page_bits == 0 || target_page_bits > bits) { | |
126 | if (target_page_bits_decided) { | |
127 | return false; | |
128 | } | |
129 | target_page_bits = bits; | |
130 | } | |
131 | #endif | |
132 | return true; | |
133 | } | |
134 | ||
e2eef170 | 135 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 136 | |
20bccb82 PM |
137 | static void finalize_target_page_bits(void) |
138 | { | |
139 | #ifdef TARGET_PAGE_BITS_VARY | |
140 | if (target_page_bits == 0) { | |
141 | target_page_bits = TARGET_PAGE_BITS_MIN; | |
142 | } | |
143 | target_page_bits_decided = true; | |
144 | #endif | |
145 | } | |
146 | ||
1db8abb1 PB |
147 | typedef struct PhysPageEntry PhysPageEntry; |
148 | ||
149 | struct PhysPageEntry { | |
9736e55b | 150 | /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ |
8b795765 | 151 | uint32_t skip : 6; |
9736e55b | 152 | /* index into phys_sections (!skip) or phys_map_nodes (skip) */ |
8b795765 | 153 | uint32_t ptr : 26; |
1db8abb1 PB |
154 | }; |
155 | ||
8b795765 MT |
156 | #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) |
157 | ||
03f49957 | 158 | /* Size of the L2 (and L3, etc) page tables. */ |
57271d63 | 159 | #define ADDR_SPACE_BITS 64 |
03f49957 | 160 | |
026736ce | 161 | #define P_L2_BITS 9 |
03f49957 PB |
162 | #define P_L2_SIZE (1 << P_L2_BITS) |
163 | ||
164 | #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) | |
165 | ||
166 | typedef PhysPageEntry Node[P_L2_SIZE]; | |
0475d94f | 167 | |
53cb28cb | 168 | typedef struct PhysPageMap { |
79e2b9ae PB |
169 | struct rcu_head rcu; |
170 | ||
53cb28cb MA |
171 | unsigned sections_nb; |
172 | unsigned sections_nb_alloc; | |
173 | unsigned nodes_nb; | |
174 | unsigned nodes_nb_alloc; | |
175 | Node *nodes; | |
176 | MemoryRegionSection *sections; | |
177 | } PhysPageMap; | |
178 | ||
1db8abb1 | 179 | struct AddressSpaceDispatch { |
79e2b9ae PB |
180 | struct rcu_head rcu; |
181 | ||
729633c2 | 182 | MemoryRegionSection *mru_section; |
1db8abb1 PB |
183 | /* This is a multi-level map on the physical address space. |
184 | * The bottom level has pointers to MemoryRegionSections. | |
185 | */ | |
186 | PhysPageEntry phys_map; | |
53cb28cb | 187 | PhysPageMap map; |
acc9d80b | 188 | AddressSpace *as; |
1db8abb1 PB |
189 | }; |
190 | ||
90260c6c JK |
191 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
192 | typedef struct subpage_t { | |
193 | MemoryRegion iomem; | |
acc9d80b | 194 | AddressSpace *as; |
90260c6c | 195 | hwaddr base; |
2615fabd | 196 | uint16_t sub_section[]; |
90260c6c JK |
197 | } subpage_t; |
198 | ||
b41aac4f LPF |
199 | #define PHYS_SECTION_UNASSIGNED 0 |
200 | #define PHYS_SECTION_NOTDIRTY 1 | |
201 | #define PHYS_SECTION_ROM 2 | |
202 | #define PHYS_SECTION_WATCH 3 | |
5312bd8b | 203 | |
e2eef170 | 204 | static void io_mem_init(void); |
62152b8a | 205 | static void memory_map_init(void); |
09daed84 | 206 | static void tcg_commit(MemoryListener *listener); |
e2eef170 | 207 | |
1ec9b909 | 208 | static MemoryRegion io_mem_watch; |
32857f4d PM |
209 | |
210 | /** | |
211 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace | |
212 | * @cpu: the CPU whose AddressSpace this is | |
213 | * @as: the AddressSpace itself | |
214 | * @memory_dispatch: its dispatch pointer (cached, RCU protected) | |
215 | * @tcg_as_listener: listener for tracking changes to the AddressSpace | |
216 | */ | |
217 | struct CPUAddressSpace { | |
218 | CPUState *cpu; | |
219 | AddressSpace *as; | |
220 | struct AddressSpaceDispatch *memory_dispatch; | |
221 | MemoryListener tcg_as_listener; | |
222 | }; | |
223 | ||
6658ffb8 | 224 | #endif |
fd6ce8f6 | 225 | |
6d9a1304 | 226 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 227 | |
53cb28cb | 228 | static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) |
d6f2ea22 | 229 | { |
101420b8 | 230 | static unsigned alloc_hint = 16; |
53cb28cb | 231 | if (map->nodes_nb + nodes > map->nodes_nb_alloc) { |
101420b8 | 232 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint); |
53cb28cb MA |
233 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); |
234 | map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); | |
101420b8 | 235 | alloc_hint = map->nodes_nb_alloc; |
d6f2ea22 | 236 | } |
f7bf5461 AK |
237 | } |
238 | ||
db94604b | 239 | static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) |
f7bf5461 AK |
240 | { |
241 | unsigned i; | |
8b795765 | 242 | uint32_t ret; |
db94604b PB |
243 | PhysPageEntry e; |
244 | PhysPageEntry *p; | |
f7bf5461 | 245 | |
53cb28cb | 246 | ret = map->nodes_nb++; |
db94604b | 247 | p = map->nodes[ret]; |
f7bf5461 | 248 | assert(ret != PHYS_MAP_NODE_NIL); |
53cb28cb | 249 | assert(ret != map->nodes_nb_alloc); |
db94604b PB |
250 | |
251 | e.skip = leaf ? 0 : 1; | |
252 | e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; | |
03f49957 | 253 | for (i = 0; i < P_L2_SIZE; ++i) { |
db94604b | 254 | memcpy(&p[i], &e, sizeof(e)); |
d6f2ea22 | 255 | } |
f7bf5461 | 256 | return ret; |
d6f2ea22 AK |
257 | } |
258 | ||
53cb28cb MA |
259 | static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, |
260 | hwaddr *index, hwaddr *nb, uint16_t leaf, | |
2999097b | 261 | int level) |
f7bf5461 AK |
262 | { |
263 | PhysPageEntry *p; | |
03f49957 | 264 | hwaddr step = (hwaddr)1 << (level * P_L2_BITS); |
108c49b8 | 265 | |
9736e55b | 266 | if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { |
db94604b | 267 | lp->ptr = phys_map_node_alloc(map, level == 0); |
92e873b9 | 268 | } |
db94604b | 269 | p = map->nodes[lp->ptr]; |
03f49957 | 270 | lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
f7bf5461 | 271 | |
03f49957 | 272 | while (*nb && lp < &p[P_L2_SIZE]) { |
07f07b31 | 273 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
9736e55b | 274 | lp->skip = 0; |
c19e8800 | 275 | lp->ptr = leaf; |
07f07b31 AK |
276 | *index += step; |
277 | *nb -= step; | |
2999097b | 278 | } else { |
53cb28cb | 279 | phys_page_set_level(map, lp, index, nb, leaf, level - 1); |
2999097b AK |
280 | } |
281 | ++lp; | |
f7bf5461 AK |
282 | } |
283 | } | |
284 | ||
ac1970fb | 285 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 286 | hwaddr index, hwaddr nb, |
2999097b | 287 | uint16_t leaf) |
f7bf5461 | 288 | { |
2999097b | 289 | /* Wildly overreserve - it doesn't matter much. */ |
53cb28cb | 290 | phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); |
5cd2c5b6 | 291 | |
53cb28cb | 292 | phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
293 | } |
294 | ||
b35ba30f MT |
295 | /* Compact a non leaf page entry. Simply detect that the entry has a single child, |
296 | * and update our entry so we can skip it and go directly to the destination. | |
297 | */ | |
efee678d | 298 | static void phys_page_compact(PhysPageEntry *lp, Node *nodes) |
b35ba30f MT |
299 | { |
300 | unsigned valid_ptr = P_L2_SIZE; | |
301 | int valid = 0; | |
302 | PhysPageEntry *p; | |
303 | int i; | |
304 | ||
305 | if (lp->ptr == PHYS_MAP_NODE_NIL) { | |
306 | return; | |
307 | } | |
308 | ||
309 | p = nodes[lp->ptr]; | |
310 | for (i = 0; i < P_L2_SIZE; i++) { | |
311 | if (p[i].ptr == PHYS_MAP_NODE_NIL) { | |
312 | continue; | |
313 | } | |
314 | ||
315 | valid_ptr = i; | |
316 | valid++; | |
317 | if (p[i].skip) { | |
efee678d | 318 | phys_page_compact(&p[i], nodes); |
b35ba30f MT |
319 | } |
320 | } | |
321 | ||
322 | /* We can only compress if there's only one child. */ | |
323 | if (valid != 1) { | |
324 | return; | |
325 | } | |
326 | ||
327 | assert(valid_ptr < P_L2_SIZE); | |
328 | ||
329 | /* Don't compress if it won't fit in the # of bits we have. */ | |
330 | if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { | |
331 | return; | |
332 | } | |
333 | ||
334 | lp->ptr = p[valid_ptr].ptr; | |
335 | if (!p[valid_ptr].skip) { | |
336 | /* If our only child is a leaf, make this a leaf. */ | |
337 | /* By design, we should have made this node a leaf to begin with so we | |
338 | * should never reach here. | |
339 | * But since it's so simple to handle this, let's do it just in case we | |
340 | * change this rule. | |
341 | */ | |
342 | lp->skip = 0; | |
343 | } else { | |
344 | lp->skip += p[valid_ptr].skip; | |
345 | } | |
346 | } | |
347 | ||
348 | static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) | |
349 | { | |
b35ba30f | 350 | if (d->phys_map.skip) { |
efee678d | 351 | phys_page_compact(&d->phys_map, d->map.nodes); |
b35ba30f MT |
352 | } |
353 | } | |
354 | ||
29cb533d FZ |
355 | static inline bool section_covers_addr(const MemoryRegionSection *section, |
356 | hwaddr addr) | |
357 | { | |
358 | /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means | |
359 | * the section must cover the entire address space. | |
360 | */ | |
258dfaaa | 361 | return int128_gethi(section->size) || |
29cb533d | 362 | range_covers_byte(section->offset_within_address_space, |
258dfaaa | 363 | int128_getlo(section->size), addr); |
29cb533d FZ |
364 | } |
365 | ||
97115a8d | 366 | static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, |
9affd6fc | 367 | Node *nodes, MemoryRegionSection *sections) |
92e873b9 | 368 | { |
31ab2b4a | 369 | PhysPageEntry *p; |
97115a8d | 370 | hwaddr index = addr >> TARGET_PAGE_BITS; |
31ab2b4a | 371 | int i; |
f1f6e3b8 | 372 | |
9736e55b | 373 | for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { |
c19e8800 | 374 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
9affd6fc | 375 | return §ions[PHYS_SECTION_UNASSIGNED]; |
31ab2b4a | 376 | } |
9affd6fc | 377 | p = nodes[lp.ptr]; |
03f49957 | 378 | lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
5312bd8b | 379 | } |
b35ba30f | 380 | |
29cb533d | 381 | if (section_covers_addr(§ions[lp.ptr], addr)) { |
b35ba30f MT |
382 | return §ions[lp.ptr]; |
383 | } else { | |
384 | return §ions[PHYS_SECTION_UNASSIGNED]; | |
385 | } | |
f3705d53 AK |
386 | } |
387 | ||
e5548617 BS |
388 | bool memory_region_is_unassigned(MemoryRegion *mr) |
389 | { | |
2a8e7499 | 390 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 391 | && mr != &io_mem_watch; |
fd6ce8f6 | 392 | } |
149f54b5 | 393 | |
79e2b9ae | 394 | /* Called from RCU critical section */ |
c7086b4a | 395 | static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, |
90260c6c JK |
396 | hwaddr addr, |
397 | bool resolve_subpage) | |
9f029603 | 398 | { |
729633c2 | 399 | MemoryRegionSection *section = atomic_read(&d->mru_section); |
90260c6c | 400 | subpage_t *subpage; |
729633c2 | 401 | bool update; |
90260c6c | 402 | |
729633c2 FZ |
403 | if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] && |
404 | section_covers_addr(section, addr)) { | |
405 | update = false; | |
406 | } else { | |
407 | section = phys_page_find(d->phys_map, addr, d->map.nodes, | |
408 | d->map.sections); | |
409 | update = true; | |
410 | } | |
90260c6c JK |
411 | if (resolve_subpage && section->mr->subpage) { |
412 | subpage = container_of(section->mr, subpage_t, iomem); | |
53cb28cb | 413 | section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
90260c6c | 414 | } |
729633c2 FZ |
415 | if (update) { |
416 | atomic_set(&d->mru_section, section); | |
417 | } | |
90260c6c | 418 | return section; |
9f029603 JK |
419 | } |
420 | ||
79e2b9ae | 421 | /* Called from RCU critical section */ |
90260c6c | 422 | static MemoryRegionSection * |
c7086b4a | 423 | address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, |
90260c6c | 424 | hwaddr *plen, bool resolve_subpage) |
149f54b5 PB |
425 | { |
426 | MemoryRegionSection *section; | |
965eb2fc | 427 | MemoryRegion *mr; |
a87f3954 | 428 | Int128 diff; |
149f54b5 | 429 | |
c7086b4a | 430 | section = address_space_lookup_region(d, addr, resolve_subpage); |
149f54b5 PB |
431 | /* Compute offset within MemoryRegionSection */ |
432 | addr -= section->offset_within_address_space; | |
433 | ||
434 | /* Compute offset within MemoryRegion */ | |
435 | *xlat = addr + section->offset_within_region; | |
436 | ||
965eb2fc | 437 | mr = section->mr; |
b242e0e0 PB |
438 | |
439 | /* MMIO registers can be expected to perform full-width accesses based only | |
440 | * on their address, without considering adjacent registers that could | |
441 | * decode to completely different MemoryRegions. When such registers | |
442 | * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO | |
443 | * regions overlap wildly. For this reason we cannot clamp the accesses | |
444 | * here. | |
445 | * | |
446 | * If the length is small (as is the case for address_space_ldl/stl), | |
447 | * everything works fine. If the incoming length is large, however, | |
448 | * the caller really has to do the clamping through memory_access_size. | |
449 | */ | |
965eb2fc | 450 | if (memory_region_is_ram(mr)) { |
e4a511f8 | 451 | diff = int128_sub(section->size, int128_make64(addr)); |
965eb2fc PB |
452 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
453 | } | |
149f54b5 PB |
454 | return section; |
455 | } | |
90260c6c | 456 | |
41063e1e | 457 | /* Called from RCU critical section */ |
052c8fa9 JW |
458 | IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, |
459 | bool is_write) | |
460 | { | |
461 | IOMMUTLBEntry iotlb = {0}; | |
462 | MemoryRegionSection *section; | |
463 | MemoryRegion *mr; | |
464 | ||
465 | for (;;) { | |
466 | AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); | |
467 | section = address_space_lookup_region(d, addr, false); | |
468 | addr = addr - section->offset_within_address_space | |
469 | + section->offset_within_region; | |
470 | mr = section->mr; | |
471 | ||
472 | if (!mr->iommu_ops) { | |
473 | break; | |
474 | } | |
475 | ||
476 | iotlb = mr->iommu_ops->translate(mr, addr, is_write); | |
477 | if (!(iotlb.perm & (1 << is_write))) { | |
478 | iotlb.target_as = NULL; | |
479 | break; | |
480 | } | |
481 | ||
482 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | |
483 | | (addr & iotlb.addr_mask)); | |
484 | as = iotlb.target_as; | |
485 | } | |
486 | ||
487 | return iotlb; | |
488 | } | |
489 | ||
490 | /* Called from RCU critical section */ | |
5c8a00ce PB |
491 | MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, |
492 | hwaddr *xlat, hwaddr *plen, | |
493 | bool is_write) | |
90260c6c | 494 | { |
30951157 AK |
495 | IOMMUTLBEntry iotlb; |
496 | MemoryRegionSection *section; | |
497 | MemoryRegion *mr; | |
30951157 AK |
498 | |
499 | for (;;) { | |
79e2b9ae PB |
500 | AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); |
501 | section = address_space_translate_internal(d, addr, &addr, plen, true); | |
30951157 AK |
502 | mr = section->mr; |
503 | ||
504 | if (!mr->iommu_ops) { | |
505 | break; | |
506 | } | |
507 | ||
8d7b8cb9 | 508 | iotlb = mr->iommu_ops->translate(mr, addr, is_write); |
30951157 AK |
509 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) |
510 | | (addr & iotlb.addr_mask)); | |
23820dbf | 511 | *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); |
30951157 AK |
512 | if (!(iotlb.perm & (1 << is_write))) { |
513 | mr = &io_mem_unassigned; | |
514 | break; | |
515 | } | |
516 | ||
517 | as = iotlb.target_as; | |
518 | } | |
519 | ||
fe680d0d | 520 | if (xen_enabled() && memory_access_is_direct(mr, is_write)) { |
a87f3954 | 521 | hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; |
23820dbf | 522 | *plen = MIN(page, *plen); |
a87f3954 PB |
523 | } |
524 | ||
30951157 AK |
525 | *xlat = addr; |
526 | return mr; | |
90260c6c JK |
527 | } |
528 | ||
79e2b9ae | 529 | /* Called from RCU critical section */ |
90260c6c | 530 | MemoryRegionSection * |
d7898cda | 531 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
9d82b5a7 | 532 | hwaddr *xlat, hwaddr *plen) |
90260c6c | 533 | { |
30951157 | 534 | MemoryRegionSection *section; |
f35e44e7 | 535 | AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); |
d7898cda PM |
536 | |
537 | section = address_space_translate_internal(d, addr, xlat, plen, false); | |
30951157 AK |
538 | |
539 | assert(!section->mr->iommu_ops); | |
540 | return section; | |
90260c6c | 541 | } |
5b6dd868 | 542 | #endif |
fd6ce8f6 | 543 | |
b170fce3 | 544 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
545 | |
546 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 547 | { |
259186a7 | 548 | CPUState *cpu = opaque; |
a513fe19 | 549 | |
5b6dd868 BS |
550 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
551 | version_id is increased. */ | |
259186a7 | 552 | cpu->interrupt_request &= ~0x01; |
d10eb08f | 553 | tlb_flush(cpu); |
5b6dd868 BS |
554 | |
555 | return 0; | |
a513fe19 | 556 | } |
7501267e | 557 | |
6c3bff0e PD |
558 | static int cpu_common_pre_load(void *opaque) |
559 | { | |
560 | CPUState *cpu = opaque; | |
561 | ||
adee6424 | 562 | cpu->exception_index = -1; |
6c3bff0e PD |
563 | |
564 | return 0; | |
565 | } | |
566 | ||
567 | static bool cpu_common_exception_index_needed(void *opaque) | |
568 | { | |
569 | CPUState *cpu = opaque; | |
570 | ||
adee6424 | 571 | return tcg_enabled() && cpu->exception_index != -1; |
6c3bff0e PD |
572 | } |
573 | ||
574 | static const VMStateDescription vmstate_cpu_common_exception_index = { | |
575 | .name = "cpu_common/exception_index", | |
576 | .version_id = 1, | |
577 | .minimum_version_id = 1, | |
5cd8cada | 578 | .needed = cpu_common_exception_index_needed, |
6c3bff0e PD |
579 | .fields = (VMStateField[]) { |
580 | VMSTATE_INT32(exception_index, CPUState), | |
581 | VMSTATE_END_OF_LIST() | |
582 | } | |
583 | }; | |
584 | ||
bac05aa9 AS |
585 | static bool cpu_common_crash_occurred_needed(void *opaque) |
586 | { | |
587 | CPUState *cpu = opaque; | |
588 | ||
589 | return cpu->crash_occurred; | |
590 | } | |
591 | ||
592 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { | |
593 | .name = "cpu_common/crash_occurred", | |
594 | .version_id = 1, | |
595 | .minimum_version_id = 1, | |
596 | .needed = cpu_common_crash_occurred_needed, | |
597 | .fields = (VMStateField[]) { | |
598 | VMSTATE_BOOL(crash_occurred, CPUState), | |
599 | VMSTATE_END_OF_LIST() | |
600 | } | |
601 | }; | |
602 | ||
1a1562f5 | 603 | const VMStateDescription vmstate_cpu_common = { |
5b6dd868 BS |
604 | .name = "cpu_common", |
605 | .version_id = 1, | |
606 | .minimum_version_id = 1, | |
6c3bff0e | 607 | .pre_load = cpu_common_pre_load, |
5b6dd868 | 608 | .post_load = cpu_common_post_load, |
35d08458 | 609 | .fields = (VMStateField[]) { |
259186a7 AF |
610 | VMSTATE_UINT32(halted, CPUState), |
611 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 | 612 | VMSTATE_END_OF_LIST() |
6c3bff0e | 613 | }, |
5cd8cada JQ |
614 | .subsections = (const VMStateDescription*[]) { |
615 | &vmstate_cpu_common_exception_index, | |
bac05aa9 | 616 | &vmstate_cpu_common_crash_occurred, |
5cd8cada | 617 | NULL |
5b6dd868 BS |
618 | } |
619 | }; | |
1a1562f5 | 620 | |
5b6dd868 | 621 | #endif |
ea041c0e | 622 | |
38d8f5c8 | 623 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 624 | { |
bdc44640 | 625 | CPUState *cpu; |
ea041c0e | 626 | |
bdc44640 | 627 | CPU_FOREACH(cpu) { |
55e5c285 | 628 | if (cpu->cpu_index == index) { |
bdc44640 | 629 | return cpu; |
55e5c285 | 630 | } |
ea041c0e | 631 | } |
5b6dd868 | 632 | |
bdc44640 | 633 | return NULL; |
ea041c0e FB |
634 | } |
635 | ||
09daed84 | 636 | #if !defined(CONFIG_USER_ONLY) |
56943e8c | 637 | void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx) |
09daed84 | 638 | { |
12ebc9a7 PM |
639 | CPUAddressSpace *newas; |
640 | ||
641 | /* Target code should have set num_ases before calling us */ | |
642 | assert(asidx < cpu->num_ases); | |
643 | ||
56943e8c PM |
644 | if (asidx == 0) { |
645 | /* address space 0 gets the convenience alias */ | |
646 | cpu->as = as; | |
647 | } | |
648 | ||
12ebc9a7 PM |
649 | /* KVM cannot currently support multiple address spaces. */ |
650 | assert(asidx == 0 || !kvm_enabled()); | |
09daed84 | 651 | |
12ebc9a7 PM |
652 | if (!cpu->cpu_ases) { |
653 | cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); | |
09daed84 | 654 | } |
32857f4d | 655 | |
12ebc9a7 PM |
656 | newas = &cpu->cpu_ases[asidx]; |
657 | newas->cpu = cpu; | |
658 | newas->as = as; | |
56943e8c | 659 | if (tcg_enabled()) { |
12ebc9a7 PM |
660 | newas->tcg_as_listener.commit = tcg_commit; |
661 | memory_listener_register(&newas->tcg_as_listener, as); | |
56943e8c | 662 | } |
09daed84 | 663 | } |
651a5bc0 PM |
664 | |
665 | AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) | |
666 | { | |
667 | /* Return the AddressSpace corresponding to the specified index */ | |
668 | return cpu->cpu_ases[asidx].as; | |
669 | } | |
09daed84 EI |
670 | #endif |
671 | ||
7bbc124e | 672 | void cpu_exec_unrealizefn(CPUState *cpu) |
1c59eb39 | 673 | { |
9dfeca7c BR |
674 | CPUClass *cc = CPU_GET_CLASS(cpu); |
675 | ||
267f685b | 676 | cpu_list_remove(cpu); |
9dfeca7c BR |
677 | |
678 | if (cc->vmsd != NULL) { | |
679 | vmstate_unregister(NULL, cc->vmsd, cpu); | |
680 | } | |
681 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
682 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); | |
683 | } | |
1c59eb39 BR |
684 | } |
685 | ||
39e329e3 | 686 | void cpu_exec_initfn(CPUState *cpu) |
ea041c0e | 687 | { |
56943e8c | 688 | cpu->as = NULL; |
12ebc9a7 | 689 | cpu->num_ases = 0; |
56943e8c | 690 | |
291135b5 | 691 | #ifndef CONFIG_USER_ONLY |
291135b5 | 692 | cpu->thread_id = qemu_get_thread_id(); |
6731d864 PC |
693 | |
694 | /* This is a softmmu CPU object, so create a property for it | |
695 | * so users can wire up its memory. (This can't go in qom/cpu.c | |
696 | * because that file is compiled only once for both user-mode | |
697 | * and system builds.) The default if no link is set up is to use | |
698 | * the system address space. | |
699 | */ | |
700 | object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION, | |
701 | (Object **)&cpu->memory, | |
702 | qdev_prop_allow_set_link_before_realize, | |
703 | OBJ_PROP_LINK_UNREF_ON_RELEASE, | |
704 | &error_abort); | |
705 | cpu->memory = system_memory; | |
706 | object_ref(OBJECT(cpu->memory)); | |
291135b5 | 707 | #endif |
39e329e3 LV |
708 | } |
709 | ||
ce5b1bbf | 710 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
39e329e3 LV |
711 | { |
712 | CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); | |
291135b5 | 713 | |
267f685b | 714 | cpu_list_add(cpu); |
1bc7e522 IM |
715 | |
716 | #ifndef CONFIG_USER_ONLY | |
e0d47944 | 717 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { |
741da0d3 | 718 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); |
e0d47944 | 719 | } |
b170fce3 | 720 | if (cc->vmsd != NULL) { |
741da0d3 | 721 | vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); |
b170fce3 | 722 | } |
741da0d3 | 723 | #endif |
ea041c0e FB |
724 | } |
725 | ||
00b941e5 | 726 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
1e7855a5 | 727 | { |
a9353fe8 PM |
728 | /* Flush the whole TB as this will not have race conditions |
729 | * even if we don't have proper locking yet. | |
730 | * Ideally we would just invalidate the TBs for the | |
731 | * specified PC. | |
732 | */ | |
733 | tb_flush(cpu); | |
1e7855a5 | 734 | } |
d720b93d | 735 | |
c527ee8f | 736 | #if defined(CONFIG_USER_ONLY) |
75a34036 | 737 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
c527ee8f PB |
738 | |
739 | { | |
740 | } | |
741 | ||
3ee887e8 PM |
742 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
743 | int flags) | |
744 | { | |
745 | return -ENOSYS; | |
746 | } | |
747 | ||
748 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) | |
749 | { | |
750 | } | |
751 | ||
75a34036 | 752 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
c527ee8f PB |
753 | int flags, CPUWatchpoint **watchpoint) |
754 | { | |
755 | return -ENOSYS; | |
756 | } | |
757 | #else | |
6658ffb8 | 758 | /* Add a watchpoint. */ |
75a34036 | 759 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 760 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 761 | { |
c0ce998e | 762 | CPUWatchpoint *wp; |
6658ffb8 | 763 | |
05068c0d | 764 | /* forbid ranges which are empty or run off the end of the address space */ |
07e2863d | 765 | if (len == 0 || (addr + len - 1) < addr) { |
75a34036 AF |
766 | error_report("tried to set invalid watchpoint at %" |
767 | VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); | |
b4051334 AL |
768 | return -EINVAL; |
769 | } | |
7267c094 | 770 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
771 | |
772 | wp->vaddr = addr; | |
05068c0d | 773 | wp->len = len; |
a1d1bb31 AL |
774 | wp->flags = flags; |
775 | ||
2dc9f411 | 776 | /* keep all GDB-injected watchpoints in front */ |
ff4700b0 AF |
777 | if (flags & BP_GDB) { |
778 | QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); | |
779 | } else { | |
780 | QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); | |
781 | } | |
6658ffb8 | 782 | |
31b030d4 | 783 | tlb_flush_page(cpu, addr); |
a1d1bb31 AL |
784 | |
785 | if (watchpoint) | |
786 | *watchpoint = wp; | |
787 | return 0; | |
6658ffb8 PB |
788 | } |
789 | ||
a1d1bb31 | 790 | /* Remove a specific watchpoint. */ |
75a34036 | 791 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 792 | int flags) |
6658ffb8 | 793 | { |
a1d1bb31 | 794 | CPUWatchpoint *wp; |
6658ffb8 | 795 | |
ff4700b0 | 796 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d | 797 | if (addr == wp->vaddr && len == wp->len |
6e140f28 | 798 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
75a34036 | 799 | cpu_watchpoint_remove_by_ref(cpu, wp); |
6658ffb8 PB |
800 | return 0; |
801 | } | |
802 | } | |
a1d1bb31 | 803 | return -ENOENT; |
6658ffb8 PB |
804 | } |
805 | ||
a1d1bb31 | 806 | /* Remove a specific watchpoint by reference. */ |
75a34036 | 807 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) |
a1d1bb31 | 808 | { |
ff4700b0 | 809 | QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); |
7d03f82f | 810 | |
31b030d4 | 811 | tlb_flush_page(cpu, watchpoint->vaddr); |
a1d1bb31 | 812 | |
7267c094 | 813 | g_free(watchpoint); |
a1d1bb31 AL |
814 | } |
815 | ||
816 | /* Remove all matching watchpoints. */ | |
75a34036 | 817 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
a1d1bb31 | 818 | { |
c0ce998e | 819 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 820 | |
ff4700b0 | 821 | QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { |
75a34036 AF |
822 | if (wp->flags & mask) { |
823 | cpu_watchpoint_remove_by_ref(cpu, wp); | |
824 | } | |
c0ce998e | 825 | } |
7d03f82f | 826 | } |
05068c0d PM |
827 | |
828 | /* Return true if this watchpoint address matches the specified | |
829 | * access (ie the address range covered by the watchpoint overlaps | |
830 | * partially or completely with the address range covered by the | |
831 | * access). | |
832 | */ | |
833 | static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | |
834 | vaddr addr, | |
835 | vaddr len) | |
836 | { | |
837 | /* We know the lengths are non-zero, but a little caution is | |
838 | * required to avoid errors in the case where the range ends | |
839 | * exactly at the top of the address space and so addr + len | |
840 | * wraps round to zero. | |
841 | */ | |
842 | vaddr wpend = wp->vaddr + wp->len - 1; | |
843 | vaddr addrend = addr + len - 1; | |
844 | ||
845 | return !(addr > wpend || wp->vaddr > addrend); | |
846 | } | |
847 | ||
c527ee8f | 848 | #endif |
7d03f82f | 849 | |
a1d1bb31 | 850 | /* Add a breakpoint. */ |
b3310ab3 | 851 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, |
a1d1bb31 | 852 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 853 | { |
c0ce998e | 854 | CPUBreakpoint *bp; |
3b46e624 | 855 | |
7267c094 | 856 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 857 | |
a1d1bb31 AL |
858 | bp->pc = pc; |
859 | bp->flags = flags; | |
860 | ||
2dc9f411 | 861 | /* keep all GDB-injected breakpoints in front */ |
00b941e5 | 862 | if (flags & BP_GDB) { |
f0c3c505 | 863 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); |
00b941e5 | 864 | } else { |
f0c3c505 | 865 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); |
00b941e5 | 866 | } |
3b46e624 | 867 | |
f0c3c505 | 868 | breakpoint_invalidate(cpu, pc); |
a1d1bb31 | 869 | |
00b941e5 | 870 | if (breakpoint) { |
a1d1bb31 | 871 | *breakpoint = bp; |
00b941e5 | 872 | } |
4c3a88a2 | 873 | return 0; |
4c3a88a2 FB |
874 | } |
875 | ||
a1d1bb31 | 876 | /* Remove a specific breakpoint. */ |
b3310ab3 | 877 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) |
a1d1bb31 | 878 | { |
a1d1bb31 AL |
879 | CPUBreakpoint *bp; |
880 | ||
f0c3c505 | 881 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
a1d1bb31 | 882 | if (bp->pc == pc && bp->flags == flags) { |
b3310ab3 | 883 | cpu_breakpoint_remove_by_ref(cpu, bp); |
a1d1bb31 AL |
884 | return 0; |
885 | } | |
7d03f82f | 886 | } |
a1d1bb31 | 887 | return -ENOENT; |
7d03f82f EI |
888 | } |
889 | ||
a1d1bb31 | 890 | /* Remove a specific breakpoint by reference. */ |
b3310ab3 | 891 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) |
4c3a88a2 | 892 | { |
f0c3c505 AF |
893 | QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); |
894 | ||
895 | breakpoint_invalidate(cpu, breakpoint->pc); | |
a1d1bb31 | 896 | |
7267c094 | 897 | g_free(breakpoint); |
a1d1bb31 AL |
898 | } |
899 | ||
900 | /* Remove all matching breakpoints. */ | |
b3310ab3 | 901 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) |
a1d1bb31 | 902 | { |
c0ce998e | 903 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 904 | |
f0c3c505 | 905 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { |
b3310ab3 AF |
906 | if (bp->flags & mask) { |
907 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
908 | } | |
c0ce998e | 909 | } |
4c3a88a2 FB |
910 | } |
911 | ||
c33a346e FB |
912 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
913 | CPU loop after each instruction */ | |
3825b28f | 914 | void cpu_single_step(CPUState *cpu, int enabled) |
c33a346e | 915 | { |
ed2803da AF |
916 | if (cpu->singlestep_enabled != enabled) { |
917 | cpu->singlestep_enabled = enabled; | |
918 | if (kvm_enabled()) { | |
38e478ec | 919 | kvm_update_guest_debug(cpu, 0); |
ed2803da | 920 | } else { |
ccbb4d44 | 921 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 | 922 | /* XXX: only flush what is necessary */ |
bbd77c18 | 923 | tb_flush(cpu); |
e22a25c9 | 924 | } |
c33a346e | 925 | } |
c33a346e FB |
926 | } |
927 | ||
a47dddd7 | 928 | void cpu_abort(CPUState *cpu, const char *fmt, ...) |
7501267e FB |
929 | { |
930 | va_list ap; | |
493ae1f0 | 931 | va_list ap2; |
7501267e FB |
932 | |
933 | va_start(ap, fmt); | |
493ae1f0 | 934 | va_copy(ap2, ap); |
7501267e FB |
935 | fprintf(stderr, "qemu: fatal: "); |
936 | vfprintf(stderr, fmt, ap); | |
937 | fprintf(stderr, "\n"); | |
878096ee | 938 | cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
013a2942 | 939 | if (qemu_log_separate()) { |
1ee73216 | 940 | qemu_log_lock(); |
93fcfe39 AL |
941 | qemu_log("qemu: fatal: "); |
942 | qemu_log_vprintf(fmt, ap2); | |
943 | qemu_log("\n"); | |
a0762859 | 944 | log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 945 | qemu_log_flush(); |
1ee73216 | 946 | qemu_log_unlock(); |
93fcfe39 | 947 | qemu_log_close(); |
924edcae | 948 | } |
493ae1f0 | 949 | va_end(ap2); |
f9373291 | 950 | va_end(ap); |
7615936e | 951 | replay_finish(); |
fd052bf6 RV |
952 | #if defined(CONFIG_USER_ONLY) |
953 | { | |
954 | struct sigaction act; | |
955 | sigfillset(&act.sa_mask); | |
956 | act.sa_handler = SIG_DFL; | |
957 | sigaction(SIGABRT, &act, NULL); | |
958 | } | |
959 | #endif | |
7501267e FB |
960 | abort(); |
961 | } | |
962 | ||
0124311e | 963 | #if !defined(CONFIG_USER_ONLY) |
0dc3f44a | 964 | /* Called from RCU critical section */ |
041603fe PB |
965 | static RAMBlock *qemu_get_ram_block(ram_addr_t addr) |
966 | { | |
967 | RAMBlock *block; | |
968 | ||
43771539 | 969 | block = atomic_rcu_read(&ram_list.mru_block); |
9b8424d5 | 970 | if (block && addr - block->offset < block->max_length) { |
68851b98 | 971 | return block; |
041603fe | 972 | } |
0dc3f44a | 973 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
9b8424d5 | 974 | if (addr - block->offset < block->max_length) { |
041603fe PB |
975 | goto found; |
976 | } | |
977 | } | |
978 | ||
979 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
980 | abort(); | |
981 | ||
982 | found: | |
43771539 PB |
983 | /* It is safe to write mru_block outside the iothread lock. This |
984 | * is what happens: | |
985 | * | |
986 | * mru_block = xxx | |
987 | * rcu_read_unlock() | |
988 | * xxx removed from list | |
989 | * rcu_read_lock() | |
990 | * read mru_block | |
991 | * mru_block = NULL; | |
992 | * call_rcu(reclaim_ramblock, xxx); | |
993 | * rcu_read_unlock() | |
994 | * | |
995 | * atomic_rcu_set is not needed here. The block was already published | |
996 | * when it was placed into the list. Here we're just making an extra | |
997 | * copy of the pointer. | |
998 | */ | |
041603fe PB |
999 | ram_list.mru_block = block; |
1000 | return block; | |
1001 | } | |
1002 | ||
a2f4d5be | 1003 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) |
d24981d3 | 1004 | { |
9a13565d | 1005 | CPUState *cpu; |
041603fe | 1006 | ram_addr_t start1; |
a2f4d5be JQ |
1007 | RAMBlock *block; |
1008 | ram_addr_t end; | |
1009 | ||
1010 | end = TARGET_PAGE_ALIGN(start + length); | |
1011 | start &= TARGET_PAGE_MASK; | |
d24981d3 | 1012 | |
0dc3f44a | 1013 | rcu_read_lock(); |
041603fe PB |
1014 | block = qemu_get_ram_block(start); |
1015 | assert(block == qemu_get_ram_block(end - 1)); | |
1240be24 | 1016 | start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); |
9a13565d PC |
1017 | CPU_FOREACH(cpu) { |
1018 | tlb_reset_dirty(cpu, start1, length); | |
1019 | } | |
0dc3f44a | 1020 | rcu_read_unlock(); |
d24981d3 JQ |
1021 | } |
1022 | ||
5579c7f3 | 1023 | /* Note: start and end must be within the same ram block. */ |
03eebc9e SH |
1024 | bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, |
1025 | ram_addr_t length, | |
1026 | unsigned client) | |
1ccde1cb | 1027 | { |
5b82b703 | 1028 | DirtyMemoryBlocks *blocks; |
03eebc9e | 1029 | unsigned long end, page; |
5b82b703 | 1030 | bool dirty = false; |
03eebc9e SH |
1031 | |
1032 | if (length == 0) { | |
1033 | return false; | |
1034 | } | |
f23db169 | 1035 | |
03eebc9e SH |
1036 | end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; |
1037 | page = start >> TARGET_PAGE_BITS; | |
5b82b703 SH |
1038 | |
1039 | rcu_read_lock(); | |
1040 | ||
1041 | blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | |
1042 | ||
1043 | while (page < end) { | |
1044 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | |
1045 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; | |
1046 | unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); | |
1047 | ||
1048 | dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], | |
1049 | offset, num); | |
1050 | page += num; | |
1051 | } | |
1052 | ||
1053 | rcu_read_unlock(); | |
03eebc9e SH |
1054 | |
1055 | if (dirty && tcg_enabled()) { | |
a2f4d5be | 1056 | tlb_reset_dirty_range_all(start, length); |
5579c7f3 | 1057 | } |
03eebc9e SH |
1058 | |
1059 | return dirty; | |
1ccde1cb FB |
1060 | } |
1061 | ||
79e2b9ae | 1062 | /* Called from RCU critical section */ |
bb0e627a | 1063 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
149f54b5 PB |
1064 | MemoryRegionSection *section, |
1065 | target_ulong vaddr, | |
1066 | hwaddr paddr, hwaddr xlat, | |
1067 | int prot, | |
1068 | target_ulong *address) | |
e5548617 | 1069 | { |
a8170e5e | 1070 | hwaddr iotlb; |
e5548617 BS |
1071 | CPUWatchpoint *wp; |
1072 | ||
cc5bea60 | 1073 | if (memory_region_is_ram(section->mr)) { |
e5548617 | 1074 | /* Normal RAM. */ |
e4e69794 | 1075 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; |
e5548617 | 1076 | if (!section->readonly) { |
b41aac4f | 1077 | iotlb |= PHYS_SECTION_NOTDIRTY; |
e5548617 | 1078 | } else { |
b41aac4f | 1079 | iotlb |= PHYS_SECTION_ROM; |
e5548617 BS |
1080 | } |
1081 | } else { | |
0b8e2c10 PM |
1082 | AddressSpaceDispatch *d; |
1083 | ||
1084 | d = atomic_rcu_read(§ion->address_space->dispatch); | |
1085 | iotlb = section - d->map.sections; | |
149f54b5 | 1086 | iotlb += xlat; |
e5548617 BS |
1087 | } |
1088 | ||
1089 | /* Make accesses to pages with watchpoints go via the | |
1090 | watchpoint trap routines. */ | |
ff4700b0 | 1091 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d | 1092 | if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { |
e5548617 BS |
1093 | /* Avoid trapping reads of pages with a write breakpoint. */ |
1094 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
b41aac4f | 1095 | iotlb = PHYS_SECTION_WATCH + paddr; |
e5548617 BS |
1096 | *address |= TLB_MMIO; |
1097 | break; | |
1098 | } | |
1099 | } | |
1100 | } | |
1101 | ||
1102 | return iotlb; | |
1103 | } | |
9fa3e853 FB |
1104 | #endif /* defined(CONFIG_USER_ONLY) */ |
1105 | ||
e2eef170 | 1106 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 1107 | |
c227f099 | 1108 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1109 | uint16_t section); |
acc9d80b | 1110 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base); |
54688b1e | 1111 | |
a2b257d6 IM |
1112 | static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = |
1113 | qemu_anon_ram_alloc; | |
91138037 MA |
1114 | |
1115 | /* | |
1116 | * Set a custom physical guest memory alloator. | |
1117 | * Accelerators with unusual needs may need this. Hopefully, we can | |
1118 | * get rid of it eventually. | |
1119 | */ | |
a2b257d6 | 1120 | void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) |
91138037 MA |
1121 | { |
1122 | phys_mem_alloc = alloc; | |
1123 | } | |
1124 | ||
53cb28cb MA |
1125 | static uint16_t phys_section_add(PhysPageMap *map, |
1126 | MemoryRegionSection *section) | |
5312bd8b | 1127 | { |
68f3f65b PB |
1128 | /* The physical section number is ORed with a page-aligned |
1129 | * pointer to produce the iotlb entries. Thus it should | |
1130 | * never overflow into the page-aligned value. | |
1131 | */ | |
53cb28cb | 1132 | assert(map->sections_nb < TARGET_PAGE_SIZE); |
68f3f65b | 1133 | |
53cb28cb MA |
1134 | if (map->sections_nb == map->sections_nb_alloc) { |
1135 | map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); | |
1136 | map->sections = g_renew(MemoryRegionSection, map->sections, | |
1137 | map->sections_nb_alloc); | |
5312bd8b | 1138 | } |
53cb28cb | 1139 | map->sections[map->sections_nb] = *section; |
dfde4e6e | 1140 | memory_region_ref(section->mr); |
53cb28cb | 1141 | return map->sections_nb++; |
5312bd8b AK |
1142 | } |
1143 | ||
058bc4b5 PB |
1144 | static void phys_section_destroy(MemoryRegion *mr) |
1145 | { | |
55b4e80b DS |
1146 | bool have_sub_page = mr->subpage; |
1147 | ||
dfde4e6e PB |
1148 | memory_region_unref(mr); |
1149 | ||
55b4e80b | 1150 | if (have_sub_page) { |
058bc4b5 | 1151 | subpage_t *subpage = container_of(mr, subpage_t, iomem); |
b4fefef9 | 1152 | object_unref(OBJECT(&subpage->iomem)); |
058bc4b5 PB |
1153 | g_free(subpage); |
1154 | } | |
1155 | } | |
1156 | ||
6092666e | 1157 | static void phys_sections_free(PhysPageMap *map) |
5312bd8b | 1158 | { |
9affd6fc PB |
1159 | while (map->sections_nb > 0) { |
1160 | MemoryRegionSection *section = &map->sections[--map->sections_nb]; | |
058bc4b5 PB |
1161 | phys_section_destroy(section->mr); |
1162 | } | |
9affd6fc PB |
1163 | g_free(map->sections); |
1164 | g_free(map->nodes); | |
5312bd8b AK |
1165 | } |
1166 | ||
ac1970fb | 1167 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
1168 | { |
1169 | subpage_t *subpage; | |
a8170e5e | 1170 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 1171 | & TARGET_PAGE_MASK; |
97115a8d | 1172 | MemoryRegionSection *existing = phys_page_find(d->phys_map, base, |
53cb28cb | 1173 | d->map.nodes, d->map.sections); |
0f0cb164 AK |
1174 | MemoryRegionSection subsection = { |
1175 | .offset_within_address_space = base, | |
052e87b0 | 1176 | .size = int128_make64(TARGET_PAGE_SIZE), |
0f0cb164 | 1177 | }; |
a8170e5e | 1178 | hwaddr start, end; |
0f0cb164 | 1179 | |
f3705d53 | 1180 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 1181 | |
f3705d53 | 1182 | if (!(existing->mr->subpage)) { |
acc9d80b | 1183 | subpage = subpage_init(d->as, base); |
3be91e86 | 1184 | subsection.address_space = d->as; |
0f0cb164 | 1185 | subsection.mr = &subpage->iomem; |
ac1970fb | 1186 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
53cb28cb | 1187 | phys_section_add(&d->map, &subsection)); |
0f0cb164 | 1188 | } else { |
f3705d53 | 1189 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
1190 | } |
1191 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
052e87b0 | 1192 | end = start + int128_get64(section->size) - 1; |
53cb28cb MA |
1193 | subpage_register(subpage, start, end, |
1194 | phys_section_add(&d->map, section)); | |
0f0cb164 AK |
1195 | } |
1196 | ||
1197 | ||
052e87b0 PB |
1198 | static void register_multipage(AddressSpaceDispatch *d, |
1199 | MemoryRegionSection *section) | |
33417e70 | 1200 | { |
a8170e5e | 1201 | hwaddr start_addr = section->offset_within_address_space; |
53cb28cb | 1202 | uint16_t section_index = phys_section_add(&d->map, section); |
052e87b0 PB |
1203 | uint64_t num_pages = int128_get64(int128_rshift(section->size, |
1204 | TARGET_PAGE_BITS)); | |
dd81124b | 1205 | |
733d5ef5 PB |
1206 | assert(num_pages); |
1207 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); | |
33417e70 FB |
1208 | } |
1209 | ||
ac1970fb | 1210 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 1211 | { |
89ae337a | 1212 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
00752703 | 1213 | AddressSpaceDispatch *d = as->next_dispatch; |
99b9cc06 | 1214 | MemoryRegionSection now = *section, remain = *section; |
052e87b0 | 1215 | Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
0f0cb164 | 1216 | |
733d5ef5 PB |
1217 | if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { |
1218 | uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
1219 | - now.offset_within_address_space; | |
1220 | ||
052e87b0 | 1221 | now.size = int128_min(int128_make64(left), now.size); |
ac1970fb | 1222 | register_subpage(d, &now); |
733d5ef5 | 1223 | } else { |
052e87b0 | 1224 | now.size = int128_zero(); |
733d5ef5 | 1225 | } |
052e87b0 PB |
1226 | while (int128_ne(remain.size, now.size)) { |
1227 | remain.size = int128_sub(remain.size, now.size); | |
1228 | remain.offset_within_address_space += int128_get64(now.size); | |
1229 | remain.offset_within_region += int128_get64(now.size); | |
69b67646 | 1230 | now = remain; |
052e87b0 | 1231 | if (int128_lt(remain.size, page_size)) { |
733d5ef5 | 1232 | register_subpage(d, &now); |
88266249 | 1233 | } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { |
052e87b0 | 1234 | now.size = page_size; |
ac1970fb | 1235 | register_subpage(d, &now); |
69b67646 | 1236 | } else { |
052e87b0 | 1237 | now.size = int128_and(now.size, int128_neg(page_size)); |
ac1970fb | 1238 | register_multipage(d, &now); |
69b67646 | 1239 | } |
0f0cb164 AK |
1240 | } |
1241 | } | |
1242 | ||
62a2744c SY |
1243 | void qemu_flush_coalesced_mmio_buffer(void) |
1244 | { | |
1245 | if (kvm_enabled()) | |
1246 | kvm_flush_coalesced_mmio_buffer(); | |
1247 | } | |
1248 | ||
b2a8658e UD |
1249 | void qemu_mutex_lock_ramlist(void) |
1250 | { | |
1251 | qemu_mutex_lock(&ram_list.mutex); | |
1252 | } | |
1253 | ||
1254 | void qemu_mutex_unlock_ramlist(void) | |
1255 | { | |
1256 | qemu_mutex_unlock(&ram_list.mutex); | |
1257 | } | |
1258 | ||
e1e84ba0 | 1259 | #ifdef __linux__ |
d6af99c9 HZ |
1260 | static int64_t get_file_size(int fd) |
1261 | { | |
1262 | int64_t size = lseek(fd, 0, SEEK_END); | |
1263 | if (size < 0) { | |
1264 | return -errno; | |
1265 | } | |
1266 | return size; | |
1267 | } | |
1268 | ||
04b16653 AW |
1269 | static void *file_ram_alloc(RAMBlock *block, |
1270 | ram_addr_t memory, | |
7f56e740 PB |
1271 | const char *path, |
1272 | Error **errp) | |
c902760f | 1273 | { |
fd97fd44 | 1274 | bool unlink_on_error = false; |
c902760f | 1275 | char *filename; |
8ca761f6 PF |
1276 | char *sanitized_name; |
1277 | char *c; | |
056b68af | 1278 | void *area = MAP_FAILED; |
5c3ece79 | 1279 | int fd = -1; |
d6af99c9 | 1280 | int64_t file_size; |
c902760f MT |
1281 | |
1282 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
7f56e740 PB |
1283 | error_setg(errp, |
1284 | "host lacks kvm mmu notifiers, -mem-path unsupported"); | |
fd97fd44 | 1285 | return NULL; |
c902760f MT |
1286 | } |
1287 | ||
fd97fd44 MA |
1288 | for (;;) { |
1289 | fd = open(path, O_RDWR); | |
1290 | if (fd >= 0) { | |
1291 | /* @path names an existing file, use it */ | |
1292 | break; | |
8d31d6b6 | 1293 | } |
fd97fd44 MA |
1294 | if (errno == ENOENT) { |
1295 | /* @path names a file that doesn't exist, create it */ | |
1296 | fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); | |
1297 | if (fd >= 0) { | |
1298 | unlink_on_error = true; | |
1299 | break; | |
1300 | } | |
1301 | } else if (errno == EISDIR) { | |
1302 | /* @path names a directory, create a file there */ | |
1303 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ | |
1304 | sanitized_name = g_strdup(memory_region_name(block->mr)); | |
1305 | for (c = sanitized_name; *c != '\0'; c++) { | |
1306 | if (*c == '/') { | |
1307 | *c = '_'; | |
1308 | } | |
1309 | } | |
8ca761f6 | 1310 | |
fd97fd44 MA |
1311 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, |
1312 | sanitized_name); | |
1313 | g_free(sanitized_name); | |
8d31d6b6 | 1314 | |
fd97fd44 MA |
1315 | fd = mkstemp(filename); |
1316 | if (fd >= 0) { | |
1317 | unlink(filename); | |
1318 | g_free(filename); | |
1319 | break; | |
1320 | } | |
1321 | g_free(filename); | |
8d31d6b6 | 1322 | } |
fd97fd44 MA |
1323 | if (errno != EEXIST && errno != EINTR) { |
1324 | error_setg_errno(errp, errno, | |
1325 | "can't open backing store %s for guest RAM", | |
1326 | path); | |
1327 | goto error; | |
1328 | } | |
1329 | /* | |
1330 | * Try again on EINTR and EEXIST. The latter happens when | |
1331 | * something else creates the file between our two open(). | |
1332 | */ | |
8d31d6b6 | 1333 | } |
c902760f | 1334 | |
863e9621 | 1335 | block->page_size = qemu_fd_getpagesize(fd); |
8360668e HZ |
1336 | block->mr->align = block->page_size; |
1337 | #if defined(__s390x__) | |
1338 | if (kvm_enabled()) { | |
1339 | block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); | |
1340 | } | |
1341 | #endif | |
fd97fd44 | 1342 | |
d6af99c9 HZ |
1343 | file_size = get_file_size(fd); |
1344 | ||
863e9621 | 1345 | if (memory < block->page_size) { |
fd97fd44 | 1346 | error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " |
863e9621 DDAG |
1347 | "or larger than page size 0x%zx", |
1348 | memory, block->page_size); | |
f9a49dfa | 1349 | goto error; |
c902760f | 1350 | } |
c902760f | 1351 | |
1775f111 HZ |
1352 | if (file_size > 0 && file_size < memory) { |
1353 | error_setg(errp, "backing store %s size 0x%" PRIx64 | |
1354 | " does not match 'size' option 0x" RAM_ADDR_FMT, | |
1355 | path, file_size, memory); | |
1356 | goto error; | |
1357 | } | |
1358 | ||
863e9621 | 1359 | memory = ROUND_UP(memory, block->page_size); |
c902760f MT |
1360 | |
1361 | /* | |
1362 | * ftruncate is not supported by hugetlbfs in older | |
1363 | * hosts, so don't bother bailing out on errors. | |
1364 | * If anything goes wrong with it under other filesystems, | |
1365 | * mmap will fail. | |
d6af99c9 HZ |
1366 | * |
1367 | * Do not truncate the non-empty backend file to avoid corrupting | |
1368 | * the existing data in the file. Disabling shrinking is not | |
1369 | * enough. For example, the current vNVDIMM implementation stores | |
1370 | * the guest NVDIMM labels at the end of the backend file. If the | |
1371 | * backend file is later extended, QEMU will not be able to find | |
1372 | * those labels. Therefore, extending the non-empty backend file | |
1373 | * is disabled as well. | |
c902760f | 1374 | */ |
d6af99c9 | 1375 | if (!file_size && ftruncate(fd, memory)) { |
9742bf26 | 1376 | perror("ftruncate"); |
7f56e740 | 1377 | } |
c902760f | 1378 | |
d2f39add DD |
1379 | area = qemu_ram_mmap(fd, memory, block->mr->align, |
1380 | block->flags & RAM_SHARED); | |
c902760f | 1381 | if (area == MAP_FAILED) { |
7f56e740 | 1382 | error_setg_errno(errp, errno, |
fd97fd44 | 1383 | "unable to map backing store for guest RAM"); |
f9a49dfa | 1384 | goto error; |
c902760f | 1385 | } |
ef36fa14 MT |
1386 | |
1387 | if (mem_prealloc) { | |
056b68af IM |
1388 | os_mem_prealloc(fd, area, memory, errp); |
1389 | if (errp && *errp) { | |
1390 | goto error; | |
1391 | } | |
ef36fa14 MT |
1392 | } |
1393 | ||
04b16653 | 1394 | block->fd = fd; |
c902760f | 1395 | return area; |
f9a49dfa MT |
1396 | |
1397 | error: | |
056b68af IM |
1398 | if (area != MAP_FAILED) { |
1399 | qemu_ram_munmap(area, memory); | |
1400 | } | |
fd97fd44 MA |
1401 | if (unlink_on_error) { |
1402 | unlink(path); | |
1403 | } | |
5c3ece79 PB |
1404 | if (fd != -1) { |
1405 | close(fd); | |
1406 | } | |
f9a49dfa | 1407 | return NULL; |
c902760f MT |
1408 | } |
1409 | #endif | |
1410 | ||
0dc3f44a | 1411 | /* Called with the ramlist lock held. */ |
d17b5288 | 1412 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
1413 | { |
1414 | RAMBlock *block, *next_block; | |
3e837b2c | 1415 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 1416 | |
49cd9ac6 SH |
1417 | assert(size != 0); /* it would hand out same offset multiple times */ |
1418 | ||
0dc3f44a | 1419 | if (QLIST_EMPTY_RCU(&ram_list.blocks)) { |
04b16653 | 1420 | return 0; |
0d53d9fe | 1421 | } |
04b16653 | 1422 | |
0dc3f44a | 1423 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
f15fbc4b | 1424 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 | 1425 | |
62be4e3a | 1426 | end = block->offset + block->max_length; |
04b16653 | 1427 | |
0dc3f44a | 1428 | QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) { |
04b16653 AW |
1429 | if (next_block->offset >= end) { |
1430 | next = MIN(next, next_block->offset); | |
1431 | } | |
1432 | } | |
1433 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 1434 | offset = end; |
04b16653 AW |
1435 | mingap = next - end; |
1436 | } | |
1437 | } | |
3e837b2c AW |
1438 | |
1439 | if (offset == RAM_ADDR_MAX) { | |
1440 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1441 | (uint64_t)size); | |
1442 | abort(); | |
1443 | } | |
1444 | ||
04b16653 AW |
1445 | return offset; |
1446 | } | |
1447 | ||
652d7ec2 | 1448 | ram_addr_t last_ram_offset(void) |
d17b5288 AW |
1449 | { |
1450 | RAMBlock *block; | |
1451 | ram_addr_t last = 0; | |
1452 | ||
0dc3f44a MD |
1453 | rcu_read_lock(); |
1454 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { | |
62be4e3a | 1455 | last = MAX(last, block->offset + block->max_length); |
0d53d9fe | 1456 | } |
0dc3f44a | 1457 | rcu_read_unlock(); |
d17b5288 AW |
1458 | return last; |
1459 | } | |
1460 | ||
ddb97f1d JB |
1461 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1462 | { | |
1463 | int ret; | |
ddb97f1d JB |
1464 | |
1465 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
47c8ca53 | 1466 | if (!machine_dump_guest_core(current_machine)) { |
ddb97f1d JB |
1467 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
1468 | if (ret) { | |
1469 | perror("qemu_madvise"); | |
1470 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1471 | "but dump_guest_core=off specified\n"); | |
1472 | } | |
1473 | } | |
1474 | } | |
1475 | ||
422148d3 DDAG |
1476 | const char *qemu_ram_get_idstr(RAMBlock *rb) |
1477 | { | |
1478 | return rb->idstr; | |
1479 | } | |
1480 | ||
ae3a7047 | 1481 | /* Called with iothread lock held. */ |
fa53a0e5 | 1482 | void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) |
20cfe881 | 1483 | { |
fa53a0e5 | 1484 | RAMBlock *block; |
20cfe881 | 1485 | |
c5705a77 AK |
1486 | assert(new_block); |
1487 | assert(!new_block->idstr[0]); | |
84b89d78 | 1488 | |
09e5ab63 AL |
1489 | if (dev) { |
1490 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1491 | if (id) { |
1492 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1493 | g_free(id); |
84b89d78 CM |
1494 | } |
1495 | } | |
1496 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1497 | ||
ab0a9956 | 1498 | rcu_read_lock(); |
0dc3f44a | 1499 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
fa53a0e5 GA |
1500 | if (block != new_block && |
1501 | !strcmp(block->idstr, new_block->idstr)) { | |
84b89d78 CM |
1502 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1503 | new_block->idstr); | |
1504 | abort(); | |
1505 | } | |
1506 | } | |
0dc3f44a | 1507 | rcu_read_unlock(); |
c5705a77 AK |
1508 | } |
1509 | ||
ae3a7047 | 1510 | /* Called with iothread lock held. */ |
fa53a0e5 | 1511 | void qemu_ram_unset_idstr(RAMBlock *block) |
20cfe881 | 1512 | { |
ae3a7047 MD |
1513 | /* FIXME: arch_init.c assumes that this is not called throughout |
1514 | * migration. Ignore the problem since hot-unplug during migration | |
1515 | * does not work anyway. | |
1516 | */ | |
20cfe881 HT |
1517 | if (block) { |
1518 | memset(block->idstr, 0, sizeof(block->idstr)); | |
1519 | } | |
1520 | } | |
1521 | ||
863e9621 DDAG |
1522 | size_t qemu_ram_pagesize(RAMBlock *rb) |
1523 | { | |
1524 | return rb->page_size; | |
1525 | } | |
1526 | ||
67f11b5c DDAG |
1527 | /* Returns the largest size of page in use */ |
1528 | size_t qemu_ram_pagesize_largest(void) | |
1529 | { | |
1530 | RAMBlock *block; | |
1531 | size_t largest = 0; | |
1532 | ||
1533 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { | |
1534 | largest = MAX(largest, qemu_ram_pagesize(block)); | |
1535 | } | |
1536 | ||
1537 | return largest; | |
1538 | } | |
1539 | ||
8490fc78 LC |
1540 | static int memory_try_enable_merging(void *addr, size_t len) |
1541 | { | |
75cc7f01 | 1542 | if (!machine_mem_merge(current_machine)) { |
8490fc78 LC |
1543 | /* disabled by the user */ |
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1548 | } | |
1549 | ||
62be4e3a MT |
1550 | /* Only legal before guest might have detected the memory size: e.g. on |
1551 | * incoming migration, or right after reset. | |
1552 | * | |
1553 | * As memory core doesn't know how is memory accessed, it is up to | |
1554 | * resize callback to update device state and/or add assertions to detect | |
1555 | * misuse, if necessary. | |
1556 | */ | |
fa53a0e5 | 1557 | int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) |
62be4e3a | 1558 | { |
62be4e3a MT |
1559 | assert(block); |
1560 | ||
4ed023ce | 1561 | newsize = HOST_PAGE_ALIGN(newsize); |
129ddaf3 | 1562 | |
62be4e3a MT |
1563 | if (block->used_length == newsize) { |
1564 | return 0; | |
1565 | } | |
1566 | ||
1567 | if (!(block->flags & RAM_RESIZEABLE)) { | |
1568 | error_setg_errno(errp, EINVAL, | |
1569 | "Length mismatch: %s: 0x" RAM_ADDR_FMT | |
1570 | " in != 0x" RAM_ADDR_FMT, block->idstr, | |
1571 | newsize, block->used_length); | |
1572 | return -EINVAL; | |
1573 | } | |
1574 | ||
1575 | if (block->max_length < newsize) { | |
1576 | error_setg_errno(errp, EINVAL, | |
1577 | "Length too large: %s: 0x" RAM_ADDR_FMT | |
1578 | " > 0x" RAM_ADDR_FMT, block->idstr, | |
1579 | newsize, block->max_length); | |
1580 | return -EINVAL; | |
1581 | } | |
1582 | ||
1583 | cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); | |
1584 | block->used_length = newsize; | |
58d2707e PB |
1585 | cpu_physical_memory_set_dirty_range(block->offset, block->used_length, |
1586 | DIRTY_CLIENTS_ALL); | |
62be4e3a MT |
1587 | memory_region_set_size(block->mr, newsize); |
1588 | if (block->resized) { | |
1589 | block->resized(block->idstr, newsize, block->host); | |
1590 | } | |
1591 | return 0; | |
1592 | } | |
1593 | ||
5b82b703 SH |
1594 | /* Called with ram_list.mutex held */ |
1595 | static void dirty_memory_extend(ram_addr_t old_ram_size, | |
1596 | ram_addr_t new_ram_size) | |
1597 | { | |
1598 | ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, | |
1599 | DIRTY_MEMORY_BLOCK_SIZE); | |
1600 | ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, | |
1601 | DIRTY_MEMORY_BLOCK_SIZE); | |
1602 | int i; | |
1603 | ||
1604 | /* Only need to extend if block count increased */ | |
1605 | if (new_num_blocks <= old_num_blocks) { | |
1606 | return; | |
1607 | } | |
1608 | ||
1609 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { | |
1610 | DirtyMemoryBlocks *old_blocks; | |
1611 | DirtyMemoryBlocks *new_blocks; | |
1612 | int j; | |
1613 | ||
1614 | old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); | |
1615 | new_blocks = g_malloc(sizeof(*new_blocks) + | |
1616 | sizeof(new_blocks->blocks[0]) * new_num_blocks); | |
1617 | ||
1618 | if (old_num_blocks) { | |
1619 | memcpy(new_blocks->blocks, old_blocks->blocks, | |
1620 | old_num_blocks * sizeof(old_blocks->blocks[0])); | |
1621 | } | |
1622 | ||
1623 | for (j = old_num_blocks; j < new_num_blocks; j++) { | |
1624 | new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); | |
1625 | } | |
1626 | ||
1627 | atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); | |
1628 | ||
1629 | if (old_blocks) { | |
1630 | g_free_rcu(old_blocks, rcu); | |
1631 | } | |
1632 | } | |
1633 | } | |
1634 | ||
528f46af | 1635 | static void ram_block_add(RAMBlock *new_block, Error **errp) |
c5705a77 | 1636 | { |
e1c57ab8 | 1637 | RAMBlock *block; |
0d53d9fe | 1638 | RAMBlock *last_block = NULL; |
2152f5ca | 1639 | ram_addr_t old_ram_size, new_ram_size; |
37aa7a0e | 1640 | Error *err = NULL; |
2152f5ca JQ |
1641 | |
1642 | old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; | |
c5705a77 | 1643 | |
b2a8658e | 1644 | qemu_mutex_lock_ramlist(); |
9b8424d5 | 1645 | new_block->offset = find_ram_offset(new_block->max_length); |
e1c57ab8 PB |
1646 | |
1647 | if (!new_block->host) { | |
1648 | if (xen_enabled()) { | |
9b8424d5 | 1649 | xen_ram_alloc(new_block->offset, new_block->max_length, |
37aa7a0e MA |
1650 | new_block->mr, &err); |
1651 | if (err) { | |
1652 | error_propagate(errp, err); | |
1653 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1654 | return; |
37aa7a0e | 1655 | } |
e1c57ab8 | 1656 | } else { |
9b8424d5 | 1657 | new_block->host = phys_mem_alloc(new_block->max_length, |
a2b257d6 | 1658 | &new_block->mr->align); |
39228250 | 1659 | if (!new_block->host) { |
ef701d7b HT |
1660 | error_setg_errno(errp, errno, |
1661 | "cannot set up guest memory '%s'", | |
1662 | memory_region_name(new_block->mr)); | |
1663 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1664 | return; |
39228250 | 1665 | } |
9b8424d5 | 1666 | memory_try_enable_merging(new_block->host, new_block->max_length); |
6977dfe6 | 1667 | } |
c902760f | 1668 | } |
94a6b54f | 1669 | |
dd631697 LZ |
1670 | new_ram_size = MAX(old_ram_size, |
1671 | (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); | |
1672 | if (new_ram_size > old_ram_size) { | |
1673 | migration_bitmap_extend(old_ram_size, new_ram_size); | |
5b82b703 | 1674 | dirty_memory_extend(old_ram_size, new_ram_size); |
dd631697 | 1675 | } |
0d53d9fe MD |
1676 | /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, |
1677 | * QLIST (which has an RCU-friendly variant) does not have insertion at | |
1678 | * tail, so save the last element in last_block. | |
1679 | */ | |
0dc3f44a | 1680 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
0d53d9fe | 1681 | last_block = block; |
9b8424d5 | 1682 | if (block->max_length < new_block->max_length) { |
abb26d63 PB |
1683 | break; |
1684 | } | |
1685 | } | |
1686 | if (block) { | |
0dc3f44a | 1687 | QLIST_INSERT_BEFORE_RCU(block, new_block, next); |
0d53d9fe | 1688 | } else if (last_block) { |
0dc3f44a | 1689 | QLIST_INSERT_AFTER_RCU(last_block, new_block, next); |
0d53d9fe | 1690 | } else { /* list is empty */ |
0dc3f44a | 1691 | QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); |
abb26d63 | 1692 | } |
0d6d3c87 | 1693 | ram_list.mru_block = NULL; |
94a6b54f | 1694 | |
0dc3f44a MD |
1695 | /* Write list before version */ |
1696 | smp_wmb(); | |
f798b07f | 1697 | ram_list.version++; |
b2a8658e | 1698 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1699 | |
9b8424d5 | 1700 | cpu_physical_memory_set_dirty_range(new_block->offset, |
58d2707e PB |
1701 | new_block->used_length, |
1702 | DIRTY_CLIENTS_ALL); | |
94a6b54f | 1703 | |
a904c911 PB |
1704 | if (new_block->host) { |
1705 | qemu_ram_setup_dump(new_block->host, new_block->max_length); | |
1706 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); | |
c2cd627d | 1707 | /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */ |
a904c911 | 1708 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); |
0987d735 | 1709 | ram_block_notify_add(new_block->host, new_block->max_length); |
e1c57ab8 | 1710 | } |
94a6b54f | 1711 | } |
e9a1ab19 | 1712 | |
0b183fc8 | 1713 | #ifdef __linux__ |
528f46af FZ |
1714 | RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, |
1715 | bool share, const char *mem_path, | |
1716 | Error **errp) | |
e1c57ab8 PB |
1717 | { |
1718 | RAMBlock *new_block; | |
ef701d7b | 1719 | Error *local_err = NULL; |
e1c57ab8 PB |
1720 | |
1721 | if (xen_enabled()) { | |
7f56e740 | 1722 | error_setg(errp, "-mem-path not supported with Xen"); |
528f46af | 1723 | return NULL; |
e1c57ab8 PB |
1724 | } |
1725 | ||
1726 | if (phys_mem_alloc != qemu_anon_ram_alloc) { | |
1727 | /* | |
1728 | * file_ram_alloc() needs to allocate just like | |
1729 | * phys_mem_alloc, but we haven't bothered to provide | |
1730 | * a hook there. | |
1731 | */ | |
7f56e740 PB |
1732 | error_setg(errp, |
1733 | "-mem-path not supported with this accelerator"); | |
528f46af | 1734 | return NULL; |
e1c57ab8 PB |
1735 | } |
1736 | ||
4ed023ce | 1737 | size = HOST_PAGE_ALIGN(size); |
e1c57ab8 PB |
1738 | new_block = g_malloc0(sizeof(*new_block)); |
1739 | new_block->mr = mr; | |
9b8424d5 MT |
1740 | new_block->used_length = size; |
1741 | new_block->max_length = size; | |
dbcb8981 | 1742 | new_block->flags = share ? RAM_SHARED : 0; |
7f56e740 PB |
1743 | new_block->host = file_ram_alloc(new_block, size, |
1744 | mem_path, errp); | |
1745 | if (!new_block->host) { | |
1746 | g_free(new_block); | |
528f46af | 1747 | return NULL; |
7f56e740 PB |
1748 | } |
1749 | ||
528f46af | 1750 | ram_block_add(new_block, &local_err); |
ef701d7b HT |
1751 | if (local_err) { |
1752 | g_free(new_block); | |
1753 | error_propagate(errp, local_err); | |
528f46af | 1754 | return NULL; |
ef701d7b | 1755 | } |
528f46af | 1756 | return new_block; |
e1c57ab8 | 1757 | } |
0b183fc8 | 1758 | #endif |
e1c57ab8 | 1759 | |
62be4e3a | 1760 | static |
528f46af FZ |
1761 | RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, |
1762 | void (*resized)(const char*, | |
1763 | uint64_t length, | |
1764 | void *host), | |
1765 | void *host, bool resizeable, | |
1766 | MemoryRegion *mr, Error **errp) | |
e1c57ab8 PB |
1767 | { |
1768 | RAMBlock *new_block; | |
ef701d7b | 1769 | Error *local_err = NULL; |
e1c57ab8 | 1770 | |
4ed023ce DDAG |
1771 | size = HOST_PAGE_ALIGN(size); |
1772 | max_size = HOST_PAGE_ALIGN(max_size); | |
e1c57ab8 PB |
1773 | new_block = g_malloc0(sizeof(*new_block)); |
1774 | new_block->mr = mr; | |
62be4e3a | 1775 | new_block->resized = resized; |
9b8424d5 MT |
1776 | new_block->used_length = size; |
1777 | new_block->max_length = max_size; | |
62be4e3a | 1778 | assert(max_size >= size); |
e1c57ab8 | 1779 | new_block->fd = -1; |
863e9621 | 1780 | new_block->page_size = getpagesize(); |
e1c57ab8 PB |
1781 | new_block->host = host; |
1782 | if (host) { | |
7bd4f430 | 1783 | new_block->flags |= RAM_PREALLOC; |
e1c57ab8 | 1784 | } |
62be4e3a MT |
1785 | if (resizeable) { |
1786 | new_block->flags |= RAM_RESIZEABLE; | |
1787 | } | |
528f46af | 1788 | ram_block_add(new_block, &local_err); |
ef701d7b HT |
1789 | if (local_err) { |
1790 | g_free(new_block); | |
1791 | error_propagate(errp, local_err); | |
528f46af | 1792 | return NULL; |
ef701d7b | 1793 | } |
528f46af | 1794 | return new_block; |
e1c57ab8 PB |
1795 | } |
1796 | ||
528f46af | 1797 | RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
62be4e3a MT |
1798 | MemoryRegion *mr, Error **errp) |
1799 | { | |
1800 | return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); | |
1801 | } | |
1802 | ||
528f46af | 1803 | RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) |
6977dfe6 | 1804 | { |
62be4e3a MT |
1805 | return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); |
1806 | } | |
1807 | ||
528f46af | 1808 | RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, |
62be4e3a MT |
1809 | void (*resized)(const char*, |
1810 | uint64_t length, | |
1811 | void *host), | |
1812 | MemoryRegion *mr, Error **errp) | |
1813 | { | |
1814 | return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); | |
6977dfe6 YT |
1815 | } |
1816 | ||
43771539 PB |
1817 | static void reclaim_ramblock(RAMBlock *block) |
1818 | { | |
1819 | if (block->flags & RAM_PREALLOC) { | |
1820 | ; | |
1821 | } else if (xen_enabled()) { | |
1822 | xen_invalidate_map_cache_entry(block->host); | |
1823 | #ifndef _WIN32 | |
1824 | } else if (block->fd >= 0) { | |
2f3a2bb1 | 1825 | qemu_ram_munmap(block->host, block->max_length); |
43771539 PB |
1826 | close(block->fd); |
1827 | #endif | |
1828 | } else { | |
1829 | qemu_anon_ram_free(block->host, block->max_length); | |
1830 | } | |
1831 | g_free(block); | |
1832 | } | |
1833 | ||
f1060c55 | 1834 | void qemu_ram_free(RAMBlock *block) |
e9a1ab19 | 1835 | { |
85bc2a15 MAL |
1836 | if (!block) { |
1837 | return; | |
1838 | } | |
1839 | ||
0987d735 PB |
1840 | if (block->host) { |
1841 | ram_block_notify_remove(block->host, block->max_length); | |
1842 | } | |
1843 | ||
b2a8658e | 1844 | qemu_mutex_lock_ramlist(); |
f1060c55 FZ |
1845 | QLIST_REMOVE_RCU(block, next); |
1846 | ram_list.mru_block = NULL; | |
1847 | /* Write list before version */ | |
1848 | smp_wmb(); | |
1849 | ram_list.version++; | |
1850 | call_rcu(block, reclaim_ramblock, rcu); | |
b2a8658e | 1851 | qemu_mutex_unlock_ramlist(); |
e9a1ab19 FB |
1852 | } |
1853 | ||
cd19cfa2 HY |
1854 | #ifndef _WIN32 |
1855 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
1856 | { | |
1857 | RAMBlock *block; | |
1858 | ram_addr_t offset; | |
1859 | int flags; | |
1860 | void *area, *vaddr; | |
1861 | ||
0dc3f44a | 1862 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
cd19cfa2 | 1863 | offset = addr - block->offset; |
9b8424d5 | 1864 | if (offset < block->max_length) { |
1240be24 | 1865 | vaddr = ramblock_ptr(block, offset); |
7bd4f430 | 1866 | if (block->flags & RAM_PREALLOC) { |
cd19cfa2 | 1867 | ; |
dfeaf2ab MA |
1868 | } else if (xen_enabled()) { |
1869 | abort(); | |
cd19cfa2 HY |
1870 | } else { |
1871 | flags = MAP_FIXED; | |
3435f395 | 1872 | if (block->fd >= 0) { |
dbcb8981 PB |
1873 | flags |= (block->flags & RAM_SHARED ? |
1874 | MAP_SHARED : MAP_PRIVATE); | |
3435f395 MA |
1875 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
1876 | flags, block->fd, offset); | |
cd19cfa2 | 1877 | } else { |
2eb9fbaa MA |
1878 | /* |
1879 | * Remap needs to match alloc. Accelerators that | |
1880 | * set phys_mem_alloc never remap. If they did, | |
1881 | * we'd need a remap hook here. | |
1882 | */ | |
1883 | assert(phys_mem_alloc == qemu_anon_ram_alloc); | |
1884 | ||
cd19cfa2 HY |
1885 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
1886 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1887 | flags, -1, 0); | |
cd19cfa2 HY |
1888 | } |
1889 | if (area != vaddr) { | |
f15fbc4b AP |
1890 | fprintf(stderr, "Could not remap addr: " |
1891 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
1892 | length, addr); |
1893 | exit(1); | |
1894 | } | |
8490fc78 | 1895 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 1896 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 | 1897 | } |
cd19cfa2 HY |
1898 | } |
1899 | } | |
1900 | } | |
1901 | #endif /* !_WIN32 */ | |
1902 | ||
1b5ec234 | 1903 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
ae3a7047 MD |
1904 | * This should not be used for general purpose DMA. Use address_space_map |
1905 | * or address_space_rw instead. For local memory (e.g. video ram) that the | |
1906 | * device owns, use memory_region_get_ram_ptr. | |
0dc3f44a | 1907 | * |
49b24afc | 1908 | * Called within RCU critical section. |
1b5ec234 | 1909 | */ |
0878d0e1 | 1910 | void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) |
1b5ec234 | 1911 | { |
3655cb9c GA |
1912 | RAMBlock *block = ram_block; |
1913 | ||
1914 | if (block == NULL) { | |
1915 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 1916 | addr -= block->offset; |
3655cb9c | 1917 | } |
ae3a7047 MD |
1918 | |
1919 | if (xen_enabled() && block->host == NULL) { | |
0d6d3c87 PB |
1920 | /* We need to check if the requested address is in the RAM |
1921 | * because we don't want to map the entire memory in QEMU. | |
1922 | * In that case just map until the end of the page. | |
1923 | */ | |
1924 | if (block->offset == 0) { | |
49b24afc | 1925 | return xen_map_cache(addr, 0, 0); |
0d6d3c87 | 1926 | } |
ae3a7047 MD |
1927 | |
1928 | block->host = xen_map_cache(block->offset, block->max_length, 1); | |
0d6d3c87 | 1929 | } |
0878d0e1 | 1930 | return ramblock_ptr(block, addr); |
dc828ca1 PB |
1931 | } |
1932 | ||
0878d0e1 | 1933 | /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr |
ae3a7047 | 1934 | * but takes a size argument. |
0dc3f44a | 1935 | * |
e81bcda5 | 1936 | * Called within RCU critical section. |
ae3a7047 | 1937 | */ |
3655cb9c GA |
1938 | static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, |
1939 | hwaddr *size) | |
38bee5dc | 1940 | { |
3655cb9c | 1941 | RAMBlock *block = ram_block; |
8ab934f9 SS |
1942 | if (*size == 0) { |
1943 | return NULL; | |
1944 | } | |
e81bcda5 | 1945 | |
3655cb9c GA |
1946 | if (block == NULL) { |
1947 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 1948 | addr -= block->offset; |
3655cb9c | 1949 | } |
0878d0e1 | 1950 | *size = MIN(*size, block->max_length - addr); |
e81bcda5 PB |
1951 | |
1952 | if (xen_enabled() && block->host == NULL) { | |
1953 | /* We need to check if the requested address is in the RAM | |
1954 | * because we don't want to map the entire memory in QEMU. | |
1955 | * In that case just map the requested area. | |
1956 | */ | |
1957 | if (block->offset == 0) { | |
1958 | return xen_map_cache(addr, *size, 1); | |
38bee5dc SS |
1959 | } |
1960 | ||
e81bcda5 | 1961 | block->host = xen_map_cache(block->offset, block->max_length, 1); |
38bee5dc | 1962 | } |
e81bcda5 | 1963 | |
0878d0e1 | 1964 | return ramblock_ptr(block, addr); |
38bee5dc SS |
1965 | } |
1966 | ||
422148d3 DDAG |
1967 | /* |
1968 | * Translates a host ptr back to a RAMBlock, a ram_addr and an offset | |
1969 | * in that RAMBlock. | |
1970 | * | |
1971 | * ptr: Host pointer to look up | |
1972 | * round_offset: If true round the result offset down to a page boundary | |
1973 | * *ram_addr: set to result ram_addr | |
1974 | * *offset: set to result offset within the RAMBlock | |
1975 | * | |
1976 | * Returns: RAMBlock (or NULL if not found) | |
ae3a7047 MD |
1977 | * |
1978 | * By the time this function returns, the returned pointer is not protected | |
1979 | * by RCU anymore. If the caller is not within an RCU critical section and | |
1980 | * does not hold the iothread lock, it must have other means of protecting the | |
1981 | * pointer, such as a reference to the region that includes the incoming | |
1982 | * ram_addr_t. | |
1983 | */ | |
422148d3 | 1984 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
422148d3 | 1985 | ram_addr_t *offset) |
5579c7f3 | 1986 | { |
94a6b54f PB |
1987 | RAMBlock *block; |
1988 | uint8_t *host = ptr; | |
1989 | ||
868bb33f | 1990 | if (xen_enabled()) { |
f615f396 | 1991 | ram_addr_t ram_addr; |
0dc3f44a | 1992 | rcu_read_lock(); |
f615f396 PB |
1993 | ram_addr = xen_ram_addr_from_mapcache(ptr); |
1994 | block = qemu_get_ram_block(ram_addr); | |
422148d3 | 1995 | if (block) { |
d6b6aec4 | 1996 | *offset = ram_addr - block->offset; |
422148d3 | 1997 | } |
0dc3f44a | 1998 | rcu_read_unlock(); |
422148d3 | 1999 | return block; |
712c2b41 SS |
2000 | } |
2001 | ||
0dc3f44a MD |
2002 | rcu_read_lock(); |
2003 | block = atomic_rcu_read(&ram_list.mru_block); | |
9b8424d5 | 2004 | if (block && block->host && host - block->host < block->max_length) { |
23887b79 PB |
2005 | goto found; |
2006 | } | |
2007 | ||
0dc3f44a | 2008 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { |
432d268c JN |
2009 | /* This case append when the block is not mapped. */ |
2010 | if (block->host == NULL) { | |
2011 | continue; | |
2012 | } | |
9b8424d5 | 2013 | if (host - block->host < block->max_length) { |
23887b79 | 2014 | goto found; |
f471a17e | 2015 | } |
94a6b54f | 2016 | } |
432d268c | 2017 | |
0dc3f44a | 2018 | rcu_read_unlock(); |
1b5ec234 | 2019 | return NULL; |
23887b79 PB |
2020 | |
2021 | found: | |
422148d3 DDAG |
2022 | *offset = (host - block->host); |
2023 | if (round_offset) { | |
2024 | *offset &= TARGET_PAGE_MASK; | |
2025 | } | |
0dc3f44a | 2026 | rcu_read_unlock(); |
422148d3 DDAG |
2027 | return block; |
2028 | } | |
2029 | ||
e3dd7493 DDAG |
2030 | /* |
2031 | * Finds the named RAMBlock | |
2032 | * | |
2033 | * name: The name of RAMBlock to find | |
2034 | * | |
2035 | * Returns: RAMBlock (or NULL if not found) | |
2036 | */ | |
2037 | RAMBlock *qemu_ram_block_by_name(const char *name) | |
2038 | { | |
2039 | RAMBlock *block; | |
2040 | ||
2041 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { | |
2042 | if (!strcmp(name, block->idstr)) { | |
2043 | return block; | |
2044 | } | |
2045 | } | |
2046 | ||
2047 | return NULL; | |
2048 | } | |
2049 | ||
422148d3 DDAG |
2050 | /* Some of the softmmu routines need to translate from a host pointer |
2051 | (typically a TLB entry) back to a ram offset. */ | |
07bdaa41 | 2052 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
422148d3 DDAG |
2053 | { |
2054 | RAMBlock *block; | |
f615f396 | 2055 | ram_addr_t offset; |
422148d3 | 2056 | |
f615f396 | 2057 | block = qemu_ram_block_from_host(ptr, false, &offset); |
422148d3 | 2058 | if (!block) { |
07bdaa41 | 2059 | return RAM_ADDR_INVALID; |
422148d3 DDAG |
2060 | } |
2061 | ||
07bdaa41 | 2062 | return block->offset + offset; |
e890261f | 2063 | } |
f471a17e | 2064 | |
49b24afc | 2065 | /* Called within RCU critical section. */ |
a8170e5e | 2066 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 2067 | uint64_t val, unsigned size) |
9fa3e853 | 2068 | { |
ba051fb5 AB |
2069 | bool locked = false; |
2070 | ||
52159192 | 2071 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { |
ba051fb5 AB |
2072 | locked = true; |
2073 | tb_lock(); | |
0e0df1e2 | 2074 | tb_invalidate_phys_page_fast(ram_addr, size); |
3a7d929e | 2075 | } |
0e0df1e2 AK |
2076 | switch (size) { |
2077 | case 1: | |
0878d0e1 | 2078 | stb_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2079 | break; |
2080 | case 2: | |
0878d0e1 | 2081 | stw_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2082 | break; |
2083 | case 4: | |
0878d0e1 | 2084 | stl_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2085 | break; |
2086 | default: | |
2087 | abort(); | |
3a7d929e | 2088 | } |
ba051fb5 AB |
2089 | |
2090 | if (locked) { | |
2091 | tb_unlock(); | |
2092 | } | |
2093 | ||
58d2707e PB |
2094 | /* Set both VGA and migration bits for simplicity and to remove |
2095 | * the notdirty callback faster. | |
2096 | */ | |
2097 | cpu_physical_memory_set_dirty_range(ram_addr, size, | |
2098 | DIRTY_CLIENTS_NOCODE); | |
f23db169 FB |
2099 | /* we remove the notdirty callback only if the code has been |
2100 | flushed */ | |
a2cd8c85 | 2101 | if (!cpu_physical_memory_is_clean(ram_addr)) { |
bcae01e4 | 2102 | tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr); |
4917cf44 | 2103 | } |
9fa3e853 FB |
2104 | } |
2105 | ||
b018ddf6 PB |
2106 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
2107 | unsigned size, bool is_write) | |
2108 | { | |
2109 | return is_write; | |
2110 | } | |
2111 | ||
0e0df1e2 | 2112 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 2113 | .write = notdirty_mem_write, |
b018ddf6 | 2114 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 2115 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
2116 | }; |
2117 | ||
0f459d16 | 2118 | /* Generate a debug exception if a watchpoint has been hit. */ |
66b9b43c | 2119 | static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) |
0f459d16 | 2120 | { |
93afeade | 2121 | CPUState *cpu = current_cpu; |
568496c0 | 2122 | CPUClass *cc = CPU_GET_CLASS(cpu); |
93afeade | 2123 | CPUArchState *env = cpu->env_ptr; |
06d55cc1 | 2124 | target_ulong pc, cs_base; |
0f459d16 | 2125 | target_ulong vaddr; |
a1d1bb31 | 2126 | CPUWatchpoint *wp; |
89fee74a | 2127 | uint32_t cpu_flags; |
0f459d16 | 2128 | |
ff4700b0 | 2129 | if (cpu->watchpoint_hit) { |
06d55cc1 AL |
2130 | /* We re-entered the check after replacing the TB. Now raise |
2131 | * the debug interrupt so that is will trigger after the | |
2132 | * current instruction. */ | |
93afeade | 2133 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
2134 | return; |
2135 | } | |
93afeade | 2136 | vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
40612000 | 2137 | vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); |
ff4700b0 | 2138 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d PM |
2139 | if (cpu_watchpoint_address_matches(wp, vaddr, len) |
2140 | && (wp->flags & flags)) { | |
08225676 PM |
2141 | if (flags == BP_MEM_READ) { |
2142 | wp->flags |= BP_WATCHPOINT_HIT_READ; | |
2143 | } else { | |
2144 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; | |
2145 | } | |
2146 | wp->hitaddr = vaddr; | |
66b9b43c | 2147 | wp->hitattrs = attrs; |
ff4700b0 | 2148 | if (!cpu->watchpoint_hit) { |
568496c0 SF |
2149 | if (wp->flags & BP_CPU && |
2150 | !cc->debug_check_watchpoint(cpu, wp)) { | |
2151 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
2152 | continue; | |
2153 | } | |
ff4700b0 | 2154 | cpu->watchpoint_hit = wp; |
a5e99826 | 2155 | |
8d04fb55 JK |
2156 | /* Both tb_lock and iothread_mutex will be reset when |
2157 | * cpu_loop_exit or cpu_loop_exit_noexc longjmp | |
2158 | * back into the cpu_exec main loop. | |
a5e99826 FK |
2159 | */ |
2160 | tb_lock(); | |
239c51a5 | 2161 | tb_check_watchpoint(cpu); |
6e140f28 | 2162 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
27103424 | 2163 | cpu->exception_index = EXCP_DEBUG; |
5638d180 | 2164 | cpu_loop_exit(cpu); |
6e140f28 AL |
2165 | } else { |
2166 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
648f034c | 2167 | tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); |
6886b980 | 2168 | cpu_loop_exit_noexc(cpu); |
6e140f28 | 2169 | } |
06d55cc1 | 2170 | } |
6e140f28 AL |
2171 | } else { |
2172 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
2173 | } |
2174 | } | |
2175 | } | |
2176 | ||
6658ffb8 PB |
2177 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
2178 | so these check for a hit then pass through to the normal out-of-line | |
2179 | phys routines. */ | |
66b9b43c PM |
2180 | static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, |
2181 | unsigned size, MemTxAttrs attrs) | |
6658ffb8 | 2182 | { |
66b9b43c PM |
2183 | MemTxResult res; |
2184 | uint64_t data; | |
79ed0416 PM |
2185 | int asidx = cpu_asidx_from_attrs(current_cpu, attrs); |
2186 | AddressSpace *as = current_cpu->cpu_ases[asidx].as; | |
66b9b43c PM |
2187 | |
2188 | check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); | |
1ec9b909 | 2189 | switch (size) { |
66b9b43c | 2190 | case 1: |
79ed0416 | 2191 | data = address_space_ldub(as, addr, attrs, &res); |
66b9b43c PM |
2192 | break; |
2193 | case 2: | |
79ed0416 | 2194 | data = address_space_lduw(as, addr, attrs, &res); |
66b9b43c PM |
2195 | break; |
2196 | case 4: | |
79ed0416 | 2197 | data = address_space_ldl(as, addr, attrs, &res); |
66b9b43c | 2198 | break; |
1ec9b909 AK |
2199 | default: abort(); |
2200 | } | |
66b9b43c PM |
2201 | *pdata = data; |
2202 | return res; | |
6658ffb8 PB |
2203 | } |
2204 | ||
66b9b43c PM |
2205 | static MemTxResult watch_mem_write(void *opaque, hwaddr addr, |
2206 | uint64_t val, unsigned size, | |
2207 | MemTxAttrs attrs) | |
6658ffb8 | 2208 | { |
66b9b43c | 2209 | MemTxResult res; |
79ed0416 PM |
2210 | int asidx = cpu_asidx_from_attrs(current_cpu, attrs); |
2211 | AddressSpace *as = current_cpu->cpu_ases[asidx].as; | |
66b9b43c PM |
2212 | |
2213 | check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); | |
1ec9b909 | 2214 | switch (size) { |
67364150 | 2215 | case 1: |
79ed0416 | 2216 | address_space_stb(as, addr, val, attrs, &res); |
67364150 MF |
2217 | break; |
2218 | case 2: | |
79ed0416 | 2219 | address_space_stw(as, addr, val, attrs, &res); |
67364150 MF |
2220 | break; |
2221 | case 4: | |
79ed0416 | 2222 | address_space_stl(as, addr, val, attrs, &res); |
67364150 | 2223 | break; |
1ec9b909 AK |
2224 | default: abort(); |
2225 | } | |
66b9b43c | 2226 | return res; |
6658ffb8 PB |
2227 | } |
2228 | ||
1ec9b909 | 2229 | static const MemoryRegionOps watch_mem_ops = { |
66b9b43c PM |
2230 | .read_with_attrs = watch_mem_read, |
2231 | .write_with_attrs = watch_mem_write, | |
1ec9b909 | 2232 | .endianness = DEVICE_NATIVE_ENDIAN, |
6658ffb8 | 2233 | }; |
6658ffb8 | 2234 | |
f25a49e0 PM |
2235 | static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, |
2236 | unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2237 | { |
acc9d80b | 2238 | subpage_t *subpage = opaque; |
ff6cff75 | 2239 | uint8_t buf[8]; |
5c9eb028 | 2240 | MemTxResult res; |
791af8c8 | 2241 | |
db7b5426 | 2242 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2243 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, |
acc9d80b | 2244 | subpage, len, addr); |
db7b5426 | 2245 | #endif |
5c9eb028 PM |
2246 | res = address_space_read(subpage->as, addr + subpage->base, |
2247 | attrs, buf, len); | |
2248 | if (res) { | |
2249 | return res; | |
f25a49e0 | 2250 | } |
acc9d80b JK |
2251 | switch (len) { |
2252 | case 1: | |
f25a49e0 PM |
2253 | *data = ldub_p(buf); |
2254 | return MEMTX_OK; | |
acc9d80b | 2255 | case 2: |
f25a49e0 PM |
2256 | *data = lduw_p(buf); |
2257 | return MEMTX_OK; | |
acc9d80b | 2258 | case 4: |
f25a49e0 PM |
2259 | *data = ldl_p(buf); |
2260 | return MEMTX_OK; | |
ff6cff75 | 2261 | case 8: |
f25a49e0 PM |
2262 | *data = ldq_p(buf); |
2263 | return MEMTX_OK; | |
acc9d80b JK |
2264 | default: |
2265 | abort(); | |
2266 | } | |
db7b5426 BS |
2267 | } |
2268 | ||
f25a49e0 PM |
2269 | static MemTxResult subpage_write(void *opaque, hwaddr addr, |
2270 | uint64_t value, unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2271 | { |
acc9d80b | 2272 | subpage_t *subpage = opaque; |
ff6cff75 | 2273 | uint8_t buf[8]; |
acc9d80b | 2274 | |
db7b5426 | 2275 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2276 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx |
acc9d80b JK |
2277 | " value %"PRIx64"\n", |
2278 | __func__, subpage, len, addr, value); | |
db7b5426 | 2279 | #endif |
acc9d80b JK |
2280 | switch (len) { |
2281 | case 1: | |
2282 | stb_p(buf, value); | |
2283 | break; | |
2284 | case 2: | |
2285 | stw_p(buf, value); | |
2286 | break; | |
2287 | case 4: | |
2288 | stl_p(buf, value); | |
2289 | break; | |
ff6cff75 PB |
2290 | case 8: |
2291 | stq_p(buf, value); | |
2292 | break; | |
acc9d80b JK |
2293 | default: |
2294 | abort(); | |
2295 | } | |
5c9eb028 PM |
2296 | return address_space_write(subpage->as, addr + subpage->base, |
2297 | attrs, buf, len); | |
db7b5426 BS |
2298 | } |
2299 | ||
c353e4cc | 2300 | static bool subpage_accepts(void *opaque, hwaddr addr, |
016e9d62 | 2301 | unsigned len, bool is_write) |
c353e4cc | 2302 | { |
acc9d80b | 2303 | subpage_t *subpage = opaque; |
c353e4cc | 2304 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2305 | printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", |
acc9d80b | 2306 | __func__, subpage, is_write ? 'w' : 'r', len, addr); |
c353e4cc PB |
2307 | #endif |
2308 | ||
acc9d80b | 2309 | return address_space_access_valid(subpage->as, addr + subpage->base, |
016e9d62 | 2310 | len, is_write); |
c353e4cc PB |
2311 | } |
2312 | ||
70c68e44 | 2313 | static const MemoryRegionOps subpage_ops = { |
f25a49e0 PM |
2314 | .read_with_attrs = subpage_read, |
2315 | .write_with_attrs = subpage_write, | |
ff6cff75 PB |
2316 | .impl.min_access_size = 1, |
2317 | .impl.max_access_size = 8, | |
2318 | .valid.min_access_size = 1, | |
2319 | .valid.max_access_size = 8, | |
c353e4cc | 2320 | .valid.accepts = subpage_accepts, |
70c68e44 | 2321 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
2322 | }; |
2323 | ||
c227f099 | 2324 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 2325 | uint16_t section) |
db7b5426 BS |
2326 | { |
2327 | int idx, eidx; | |
2328 | ||
2329 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
2330 | return -1; | |
2331 | idx = SUBPAGE_IDX(start); | |
2332 | eidx = SUBPAGE_IDX(end); | |
2333 | #if defined(DEBUG_SUBPAGE) | |
016e9d62 AK |
2334 | printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", |
2335 | __func__, mmio, start, end, idx, eidx, section); | |
db7b5426 | 2336 | #endif |
db7b5426 | 2337 | for (; idx <= eidx; idx++) { |
5312bd8b | 2338 | mmio->sub_section[idx] = section; |
db7b5426 BS |
2339 | } |
2340 | ||
2341 | return 0; | |
2342 | } | |
2343 | ||
acc9d80b | 2344 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base) |
db7b5426 | 2345 | { |
c227f099 | 2346 | subpage_t *mmio; |
db7b5426 | 2347 | |
2615fabd | 2348 | mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); |
acc9d80b | 2349 | mmio->as = as; |
1eec614b | 2350 | mmio->base = base; |
2c9b15ca | 2351 | memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, |
b4fefef9 | 2352 | NULL, TARGET_PAGE_SIZE); |
b3b00c78 | 2353 | mmio->iomem.subpage = true; |
db7b5426 | 2354 | #if defined(DEBUG_SUBPAGE) |
016e9d62 AK |
2355 | printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, |
2356 | mmio, base, TARGET_PAGE_SIZE); | |
db7b5426 | 2357 | #endif |
b41aac4f | 2358 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); |
db7b5426 BS |
2359 | |
2360 | return mmio; | |
2361 | } | |
2362 | ||
a656e22f PC |
2363 | static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, |
2364 | MemoryRegion *mr) | |
5312bd8b | 2365 | { |
a656e22f | 2366 | assert(as); |
5312bd8b | 2367 | MemoryRegionSection section = { |
a656e22f | 2368 | .address_space = as, |
5312bd8b AK |
2369 | .mr = mr, |
2370 | .offset_within_address_space = 0, | |
2371 | .offset_within_region = 0, | |
052e87b0 | 2372 | .size = int128_2_64(), |
5312bd8b AK |
2373 | }; |
2374 | ||
53cb28cb | 2375 | return phys_section_add(map, §ion); |
5312bd8b AK |
2376 | } |
2377 | ||
a54c87b6 | 2378 | MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs) |
aa102231 | 2379 | { |
a54c87b6 PM |
2380 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
2381 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; | |
32857f4d | 2382 | AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); |
79e2b9ae | 2383 | MemoryRegionSection *sections = d->map.sections; |
9d82b5a7 PB |
2384 | |
2385 | return sections[index & ~TARGET_PAGE_MASK].mr; | |
aa102231 AK |
2386 | } |
2387 | ||
e9179ce1 AK |
2388 | static void io_mem_init(void) |
2389 | { | |
1f6245e5 | 2390 | memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); |
2c9b15ca | 2391 | memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, |
1f6245e5 | 2392 | NULL, UINT64_MAX); |
8d04fb55 JK |
2393 | |
2394 | /* io_mem_notdirty calls tb_invalidate_phys_page_fast, | |
2395 | * which can be called without the iothread mutex. | |
2396 | */ | |
2c9b15ca | 2397 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, |
1f6245e5 | 2398 | NULL, UINT64_MAX); |
8d04fb55 JK |
2399 | memory_region_clear_global_locking(&io_mem_notdirty); |
2400 | ||
2c9b15ca | 2401 | memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, |
1f6245e5 | 2402 | NULL, UINT64_MAX); |
e9179ce1 AK |
2403 | } |
2404 | ||
ac1970fb | 2405 | static void mem_begin(MemoryListener *listener) |
00752703 PB |
2406 | { |
2407 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); | |
53cb28cb MA |
2408 | AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); |
2409 | uint16_t n; | |
2410 | ||
a656e22f | 2411 | n = dummy_section(&d->map, as, &io_mem_unassigned); |
53cb28cb | 2412 | assert(n == PHYS_SECTION_UNASSIGNED); |
a656e22f | 2413 | n = dummy_section(&d->map, as, &io_mem_notdirty); |
53cb28cb | 2414 | assert(n == PHYS_SECTION_NOTDIRTY); |
a656e22f | 2415 | n = dummy_section(&d->map, as, &io_mem_rom); |
53cb28cb | 2416 | assert(n == PHYS_SECTION_ROM); |
a656e22f | 2417 | n = dummy_section(&d->map, as, &io_mem_watch); |
53cb28cb | 2418 | assert(n == PHYS_SECTION_WATCH); |
00752703 | 2419 | |
9736e55b | 2420 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; |
00752703 PB |
2421 | d->as = as; |
2422 | as->next_dispatch = d; | |
2423 | } | |
2424 | ||
79e2b9ae PB |
2425 | static void address_space_dispatch_free(AddressSpaceDispatch *d) |
2426 | { | |
2427 | phys_sections_free(&d->map); | |
2428 | g_free(d); | |
2429 | } | |
2430 | ||
00752703 | 2431 | static void mem_commit(MemoryListener *listener) |
ac1970fb | 2432 | { |
89ae337a | 2433 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
0475d94f PB |
2434 | AddressSpaceDispatch *cur = as->dispatch; |
2435 | AddressSpaceDispatch *next = as->next_dispatch; | |
2436 | ||
53cb28cb | 2437 | phys_page_compact_all(next, next->map.nodes_nb); |
b35ba30f | 2438 | |
79e2b9ae | 2439 | atomic_rcu_set(&as->dispatch, next); |
53cb28cb | 2440 | if (cur) { |
79e2b9ae | 2441 | call_rcu(cur, address_space_dispatch_free, rcu); |
53cb28cb | 2442 | } |
9affd6fc PB |
2443 | } |
2444 | ||
1d71148e | 2445 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 2446 | { |
32857f4d PM |
2447 | CPUAddressSpace *cpuas; |
2448 | AddressSpaceDispatch *d; | |
117712c3 AK |
2449 | |
2450 | /* since each CPU stores ram addresses in its TLB cache, we must | |
2451 | reset the modified entries */ | |
32857f4d PM |
2452 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
2453 | cpu_reloading_memory_map(); | |
2454 | /* The CPU and TLB are protected by the iothread lock. | |
2455 | * We reload the dispatch pointer now because cpu_reloading_memory_map() | |
2456 | * may have split the RCU critical section. | |
2457 | */ | |
2458 | d = atomic_rcu_read(&cpuas->as->dispatch); | |
f35e44e7 | 2459 | atomic_rcu_set(&cpuas->memory_dispatch, d); |
d10eb08f | 2460 | tlb_flush(cpuas->cpu); |
50c1e149 AK |
2461 | } |
2462 | ||
ac1970fb AK |
2463 | void address_space_init_dispatch(AddressSpace *as) |
2464 | { | |
00752703 | 2465 | as->dispatch = NULL; |
89ae337a | 2466 | as->dispatch_listener = (MemoryListener) { |
ac1970fb | 2467 | .begin = mem_begin, |
00752703 | 2468 | .commit = mem_commit, |
ac1970fb AK |
2469 | .region_add = mem_add, |
2470 | .region_nop = mem_add, | |
2471 | .priority = 0, | |
2472 | }; | |
89ae337a | 2473 | memory_listener_register(&as->dispatch_listener, as); |
ac1970fb AK |
2474 | } |
2475 | ||
6e48e8f9 PB |
2476 | void address_space_unregister(AddressSpace *as) |
2477 | { | |
2478 | memory_listener_unregister(&as->dispatch_listener); | |
2479 | } | |
2480 | ||
83f3c251 AK |
2481 | void address_space_destroy_dispatch(AddressSpace *as) |
2482 | { | |
2483 | AddressSpaceDispatch *d = as->dispatch; | |
2484 | ||
79e2b9ae PB |
2485 | atomic_rcu_set(&as->dispatch, NULL); |
2486 | if (d) { | |
2487 | call_rcu(d, address_space_dispatch_free, rcu); | |
2488 | } | |
83f3c251 AK |
2489 | } |
2490 | ||
62152b8a AK |
2491 | static void memory_map_init(void) |
2492 | { | |
7267c094 | 2493 | system_memory = g_malloc(sizeof(*system_memory)); |
03f49957 | 2494 | |
57271d63 | 2495 | memory_region_init(system_memory, NULL, "system", UINT64_MAX); |
7dca8043 | 2496 | address_space_init(&address_space_memory, system_memory, "memory"); |
309cb471 | 2497 | |
7267c094 | 2498 | system_io = g_malloc(sizeof(*system_io)); |
3bb28b72 JK |
2499 | memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", |
2500 | 65536); | |
7dca8043 | 2501 | address_space_init(&address_space_io, system_io, "I/O"); |
62152b8a AK |
2502 | } |
2503 | ||
2504 | MemoryRegion *get_system_memory(void) | |
2505 | { | |
2506 | return system_memory; | |
2507 | } | |
2508 | ||
309cb471 AK |
2509 | MemoryRegion *get_system_io(void) |
2510 | { | |
2511 | return system_io; | |
2512 | } | |
2513 | ||
e2eef170 PB |
2514 | #endif /* !defined(CONFIG_USER_ONLY) */ |
2515 | ||
13eb76e0 FB |
2516 | /* physical memory access (slow version, mainly for debug) */ |
2517 | #if defined(CONFIG_USER_ONLY) | |
f17ec444 | 2518 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
a68fe89c | 2519 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2520 | { |
2521 | int l, flags; | |
2522 | target_ulong page; | |
53a5960a | 2523 | void * p; |
13eb76e0 FB |
2524 | |
2525 | while (len > 0) { | |
2526 | page = addr & TARGET_PAGE_MASK; | |
2527 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2528 | if (l > len) | |
2529 | l = len; | |
2530 | flags = page_get_flags(page); | |
2531 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 2532 | return -1; |
13eb76e0 FB |
2533 | if (is_write) { |
2534 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 2535 | return -1; |
579a97f7 | 2536 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 2537 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 2538 | return -1; |
72fb7daa AJ |
2539 | memcpy(p, buf, l); |
2540 | unlock_user(p, addr, l); | |
13eb76e0 FB |
2541 | } else { |
2542 | if (!(flags & PAGE_READ)) | |
a68fe89c | 2543 | return -1; |
579a97f7 | 2544 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 2545 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 2546 | return -1; |
72fb7daa | 2547 | memcpy(buf, p, l); |
5b257578 | 2548 | unlock_user(p, addr, 0); |
13eb76e0 FB |
2549 | } |
2550 | len -= l; | |
2551 | buf += l; | |
2552 | addr += l; | |
2553 | } | |
a68fe89c | 2554 | return 0; |
13eb76e0 | 2555 | } |
8df1cd07 | 2556 | |
13eb76e0 | 2557 | #else |
51d7a9eb | 2558 | |
845b6214 | 2559 | static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, |
a8170e5e | 2560 | hwaddr length) |
51d7a9eb | 2561 | { |
e87f7778 | 2562 | uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); |
0878d0e1 PB |
2563 | addr += memory_region_get_ram_addr(mr); |
2564 | ||
e87f7778 PB |
2565 | /* No early return if dirty_log_mask is or becomes 0, because |
2566 | * cpu_physical_memory_set_dirty_range will still call | |
2567 | * xen_modified_memory. | |
2568 | */ | |
2569 | if (dirty_log_mask) { | |
2570 | dirty_log_mask = | |
2571 | cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); | |
2572 | } | |
2573 | if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { | |
ba051fb5 | 2574 | tb_lock(); |
e87f7778 | 2575 | tb_invalidate_phys_range(addr, addr + length); |
ba051fb5 | 2576 | tb_unlock(); |
e87f7778 | 2577 | dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); |
51d7a9eb | 2578 | } |
e87f7778 | 2579 | cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); |
51d7a9eb AP |
2580 | } |
2581 | ||
23326164 | 2582 | static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) |
82f2563f | 2583 | { |
e1622f4b | 2584 | unsigned access_size_max = mr->ops->valid.max_access_size; |
23326164 RH |
2585 | |
2586 | /* Regions are assumed to support 1-4 byte accesses unless | |
2587 | otherwise specified. */ | |
23326164 RH |
2588 | if (access_size_max == 0) { |
2589 | access_size_max = 4; | |
2590 | } | |
2591 | ||
2592 | /* Bound the maximum access by the alignment of the address. */ | |
2593 | if (!mr->ops->impl.unaligned) { | |
2594 | unsigned align_size_max = addr & -addr; | |
2595 | if (align_size_max != 0 && align_size_max < access_size_max) { | |
2596 | access_size_max = align_size_max; | |
2597 | } | |
82f2563f | 2598 | } |
23326164 RH |
2599 | |
2600 | /* Don't attempt accesses larger than the maximum. */ | |
2601 | if (l > access_size_max) { | |
2602 | l = access_size_max; | |
82f2563f | 2603 | } |
6554f5c0 | 2604 | l = pow2floor(l); |
23326164 RH |
2605 | |
2606 | return l; | |
82f2563f PB |
2607 | } |
2608 | ||
4840f10e | 2609 | static bool prepare_mmio_access(MemoryRegion *mr) |
125b3806 | 2610 | { |
4840f10e JK |
2611 | bool unlocked = !qemu_mutex_iothread_locked(); |
2612 | bool release_lock = false; | |
2613 | ||
2614 | if (unlocked && mr->global_locking) { | |
2615 | qemu_mutex_lock_iothread(); | |
2616 | unlocked = false; | |
2617 | release_lock = true; | |
2618 | } | |
125b3806 | 2619 | if (mr->flush_coalesced_mmio) { |
4840f10e JK |
2620 | if (unlocked) { |
2621 | qemu_mutex_lock_iothread(); | |
2622 | } | |
125b3806 | 2623 | qemu_flush_coalesced_mmio_buffer(); |
4840f10e JK |
2624 | if (unlocked) { |
2625 | qemu_mutex_unlock_iothread(); | |
2626 | } | |
125b3806 | 2627 | } |
4840f10e JK |
2628 | |
2629 | return release_lock; | |
125b3806 PB |
2630 | } |
2631 | ||
a203ac70 PB |
2632 | /* Called within RCU critical section. */ |
2633 | static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr, | |
2634 | MemTxAttrs attrs, | |
2635 | const uint8_t *buf, | |
2636 | int len, hwaddr addr1, | |
2637 | hwaddr l, MemoryRegion *mr) | |
13eb76e0 | 2638 | { |
13eb76e0 | 2639 | uint8_t *ptr; |
791af8c8 | 2640 | uint64_t val; |
3b643495 | 2641 | MemTxResult result = MEMTX_OK; |
4840f10e | 2642 | bool release_lock = false; |
3b46e624 | 2643 | |
a203ac70 | 2644 | for (;;) { |
eb7eeb88 PB |
2645 | if (!memory_access_is_direct(mr, true)) { |
2646 | release_lock |= prepare_mmio_access(mr); | |
2647 | l = memory_access_size(mr, l, addr1); | |
2648 | /* XXX: could force current_cpu to NULL to avoid | |
2649 | potential bugs */ | |
2650 | switch (l) { | |
2651 | case 8: | |
2652 | /* 64 bit write access */ | |
2653 | val = ldq_p(buf); | |
2654 | result |= memory_region_dispatch_write(mr, addr1, val, 8, | |
2655 | attrs); | |
2656 | break; | |
2657 | case 4: | |
2658 | /* 32 bit write access */ | |
6da67de6 | 2659 | val = (uint32_t)ldl_p(buf); |
eb7eeb88 PB |
2660 | result |= memory_region_dispatch_write(mr, addr1, val, 4, |
2661 | attrs); | |
2662 | break; | |
2663 | case 2: | |
2664 | /* 16 bit write access */ | |
2665 | val = lduw_p(buf); | |
2666 | result |= memory_region_dispatch_write(mr, addr1, val, 2, | |
2667 | attrs); | |
2668 | break; | |
2669 | case 1: | |
2670 | /* 8 bit write access */ | |
2671 | val = ldub_p(buf); | |
2672 | result |= memory_region_dispatch_write(mr, addr1, val, 1, | |
2673 | attrs); | |
2674 | break; | |
2675 | default: | |
2676 | abort(); | |
13eb76e0 FB |
2677 | } |
2678 | } else { | |
eb7eeb88 | 2679 | /* RAM case */ |
0878d0e1 | 2680 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
eb7eeb88 PB |
2681 | memcpy(ptr, buf, l); |
2682 | invalidate_and_set_dirty(mr, addr1, l); | |
13eb76e0 | 2683 | } |
4840f10e JK |
2684 | |
2685 | if (release_lock) { | |
2686 | qemu_mutex_unlock_iothread(); | |
2687 | release_lock = false; | |
2688 | } | |
2689 | ||
13eb76e0 FB |
2690 | len -= l; |
2691 | buf += l; | |
2692 | addr += l; | |
a203ac70 PB |
2693 | |
2694 | if (!len) { | |
2695 | break; | |
2696 | } | |
2697 | ||
2698 | l = len; | |
2699 | mr = address_space_translate(as, addr, &addr1, &l, true); | |
13eb76e0 | 2700 | } |
fd8aaa76 | 2701 | |
3b643495 | 2702 | return result; |
13eb76e0 | 2703 | } |
8df1cd07 | 2704 | |
a203ac70 PB |
2705 | MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
2706 | const uint8_t *buf, int len) | |
ac1970fb | 2707 | { |
eb7eeb88 | 2708 | hwaddr l; |
eb7eeb88 PB |
2709 | hwaddr addr1; |
2710 | MemoryRegion *mr; | |
2711 | MemTxResult result = MEMTX_OK; | |
eb7eeb88 | 2712 | |
a203ac70 PB |
2713 | if (len > 0) { |
2714 | rcu_read_lock(); | |
eb7eeb88 | 2715 | l = len; |
a203ac70 PB |
2716 | mr = address_space_translate(as, addr, &addr1, &l, true); |
2717 | result = address_space_write_continue(as, addr, attrs, buf, len, | |
2718 | addr1, l, mr); | |
2719 | rcu_read_unlock(); | |
2720 | } | |
2721 | ||
2722 | return result; | |
2723 | } | |
2724 | ||
2725 | /* Called within RCU critical section. */ | |
2726 | MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr, | |
2727 | MemTxAttrs attrs, uint8_t *buf, | |
2728 | int len, hwaddr addr1, hwaddr l, | |
2729 | MemoryRegion *mr) | |
2730 | { | |
2731 | uint8_t *ptr; | |
2732 | uint64_t val; | |
2733 | MemTxResult result = MEMTX_OK; | |
2734 | bool release_lock = false; | |
eb7eeb88 | 2735 | |
a203ac70 | 2736 | for (;;) { |
eb7eeb88 PB |
2737 | if (!memory_access_is_direct(mr, false)) { |
2738 | /* I/O case */ | |
2739 | release_lock |= prepare_mmio_access(mr); | |
2740 | l = memory_access_size(mr, l, addr1); | |
2741 | switch (l) { | |
2742 | case 8: | |
2743 | /* 64 bit read access */ | |
2744 | result |= memory_region_dispatch_read(mr, addr1, &val, 8, | |
2745 | attrs); | |
2746 | stq_p(buf, val); | |
2747 | break; | |
2748 | case 4: | |
2749 | /* 32 bit read access */ | |
2750 | result |= memory_region_dispatch_read(mr, addr1, &val, 4, | |
2751 | attrs); | |
2752 | stl_p(buf, val); | |
2753 | break; | |
2754 | case 2: | |
2755 | /* 16 bit read access */ | |
2756 | result |= memory_region_dispatch_read(mr, addr1, &val, 2, | |
2757 | attrs); | |
2758 | stw_p(buf, val); | |
2759 | break; | |
2760 | case 1: | |
2761 | /* 8 bit read access */ | |
2762 | result |= memory_region_dispatch_read(mr, addr1, &val, 1, | |
2763 | attrs); | |
2764 | stb_p(buf, val); | |
2765 | break; | |
2766 | default: | |
2767 | abort(); | |
2768 | } | |
2769 | } else { | |
2770 | /* RAM case */ | |
0878d0e1 | 2771 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
eb7eeb88 PB |
2772 | memcpy(buf, ptr, l); |
2773 | } | |
2774 | ||
2775 | if (release_lock) { | |
2776 | qemu_mutex_unlock_iothread(); | |
2777 | release_lock = false; | |
2778 | } | |
2779 | ||
2780 | len -= l; | |
2781 | buf += l; | |
2782 | addr += l; | |
a203ac70 PB |
2783 | |
2784 | if (!len) { | |
2785 | break; | |
2786 | } | |
2787 | ||
2788 | l = len; | |
2789 | mr = address_space_translate(as, addr, &addr1, &l, false); | |
2790 | } | |
2791 | ||
2792 | return result; | |
2793 | } | |
2794 | ||
3cc8f884 PB |
2795 | MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, |
2796 | MemTxAttrs attrs, uint8_t *buf, int len) | |
a203ac70 PB |
2797 | { |
2798 | hwaddr l; | |
2799 | hwaddr addr1; | |
2800 | MemoryRegion *mr; | |
2801 | MemTxResult result = MEMTX_OK; | |
2802 | ||
2803 | if (len > 0) { | |
2804 | rcu_read_lock(); | |
2805 | l = len; | |
2806 | mr = address_space_translate(as, addr, &addr1, &l, false); | |
2807 | result = address_space_read_continue(as, addr, attrs, buf, len, | |
2808 | addr1, l, mr); | |
2809 | rcu_read_unlock(); | |
eb7eeb88 | 2810 | } |
eb7eeb88 PB |
2811 | |
2812 | return result; | |
ac1970fb AK |
2813 | } |
2814 | ||
eb7eeb88 PB |
2815 | MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
2816 | uint8_t *buf, int len, bool is_write) | |
2817 | { | |
2818 | if (is_write) { | |
2819 | return address_space_write(as, addr, attrs, (uint8_t *)buf, len); | |
2820 | } else { | |
2821 | return address_space_read(as, addr, attrs, (uint8_t *)buf, len); | |
2822 | } | |
2823 | } | |
ac1970fb | 2824 | |
a8170e5e | 2825 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
2826 | int len, int is_write) |
2827 | { | |
5c9eb028 PM |
2828 | address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, |
2829 | buf, len, is_write); | |
ac1970fb AK |
2830 | } |
2831 | ||
582b55a9 AG |
2832 | enum write_rom_type { |
2833 | WRITE_DATA, | |
2834 | FLUSH_CACHE, | |
2835 | }; | |
2836 | ||
2a221651 | 2837 | static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, |
582b55a9 | 2838 | hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) |
d0ecd2aa | 2839 | { |
149f54b5 | 2840 | hwaddr l; |
d0ecd2aa | 2841 | uint8_t *ptr; |
149f54b5 | 2842 | hwaddr addr1; |
5c8a00ce | 2843 | MemoryRegion *mr; |
3b46e624 | 2844 | |
41063e1e | 2845 | rcu_read_lock(); |
d0ecd2aa | 2846 | while (len > 0) { |
149f54b5 | 2847 | l = len; |
2a221651 | 2848 | mr = address_space_translate(as, addr, &addr1, &l, true); |
3b46e624 | 2849 | |
5c8a00ce PB |
2850 | if (!(memory_region_is_ram(mr) || |
2851 | memory_region_is_romd(mr))) { | |
b242e0e0 | 2852 | l = memory_access_size(mr, l, addr1); |
d0ecd2aa | 2853 | } else { |
d0ecd2aa | 2854 | /* ROM/RAM case */ |
0878d0e1 | 2855 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
582b55a9 AG |
2856 | switch (type) { |
2857 | case WRITE_DATA: | |
2858 | memcpy(ptr, buf, l); | |
845b6214 | 2859 | invalidate_and_set_dirty(mr, addr1, l); |
582b55a9 AG |
2860 | break; |
2861 | case FLUSH_CACHE: | |
2862 | flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); | |
2863 | break; | |
2864 | } | |
d0ecd2aa FB |
2865 | } |
2866 | len -= l; | |
2867 | buf += l; | |
2868 | addr += l; | |
2869 | } | |
41063e1e | 2870 | rcu_read_unlock(); |
d0ecd2aa FB |
2871 | } |
2872 | ||
582b55a9 | 2873 | /* used for ROM loading : can write in RAM and ROM */ |
2a221651 | 2874 | void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, |
582b55a9 AG |
2875 | const uint8_t *buf, int len) |
2876 | { | |
2a221651 | 2877 | cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); |
582b55a9 AG |
2878 | } |
2879 | ||
2880 | void cpu_flush_icache_range(hwaddr start, int len) | |
2881 | { | |
2882 | /* | |
2883 | * This function should do the same thing as an icache flush that was | |
2884 | * triggered from within the guest. For TCG we are always cache coherent, | |
2885 | * so there is no need to flush anything. For KVM / Xen we need to flush | |
2886 | * the host's instruction cache at least. | |
2887 | */ | |
2888 | if (tcg_enabled()) { | |
2889 | return; | |
2890 | } | |
2891 | ||
2a221651 EI |
2892 | cpu_physical_memory_write_rom_internal(&address_space_memory, |
2893 | start, NULL, len, FLUSH_CACHE); | |
582b55a9 AG |
2894 | } |
2895 | ||
6d16c2f8 | 2896 | typedef struct { |
d3e71559 | 2897 | MemoryRegion *mr; |
6d16c2f8 | 2898 | void *buffer; |
a8170e5e AK |
2899 | hwaddr addr; |
2900 | hwaddr len; | |
c2cba0ff | 2901 | bool in_use; |
6d16c2f8 AL |
2902 | } BounceBuffer; |
2903 | ||
2904 | static BounceBuffer bounce; | |
2905 | ||
ba223c29 | 2906 | typedef struct MapClient { |
e95205e1 | 2907 | QEMUBH *bh; |
72cf2d4f | 2908 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
2909 | } MapClient; |
2910 | ||
38e047b5 | 2911 | QemuMutex map_client_list_lock; |
72cf2d4f BS |
2912 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
2913 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 | 2914 | |
e95205e1 FZ |
2915 | static void cpu_unregister_map_client_do(MapClient *client) |
2916 | { | |
2917 | QLIST_REMOVE(client, link); | |
2918 | g_free(client); | |
2919 | } | |
2920 | ||
33b6c2ed FZ |
2921 | static void cpu_notify_map_clients_locked(void) |
2922 | { | |
2923 | MapClient *client; | |
2924 | ||
2925 | while (!QLIST_EMPTY(&map_client_list)) { | |
2926 | client = QLIST_FIRST(&map_client_list); | |
e95205e1 FZ |
2927 | qemu_bh_schedule(client->bh); |
2928 | cpu_unregister_map_client_do(client); | |
33b6c2ed FZ |
2929 | } |
2930 | } | |
2931 | ||
e95205e1 | 2932 | void cpu_register_map_client(QEMUBH *bh) |
ba223c29 | 2933 | { |
7267c094 | 2934 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 | 2935 | |
38e047b5 | 2936 | qemu_mutex_lock(&map_client_list_lock); |
e95205e1 | 2937 | client->bh = bh; |
72cf2d4f | 2938 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
33b6c2ed FZ |
2939 | if (!atomic_read(&bounce.in_use)) { |
2940 | cpu_notify_map_clients_locked(); | |
2941 | } | |
38e047b5 | 2942 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
2943 | } |
2944 | ||
38e047b5 | 2945 | void cpu_exec_init_all(void) |
ba223c29 | 2946 | { |
38e047b5 | 2947 | qemu_mutex_init(&ram_list.mutex); |
20bccb82 PM |
2948 | /* The data structures we set up here depend on knowing the page size, |
2949 | * so no more changes can be made after this point. | |
2950 | * In an ideal world, nothing we did before we had finished the | |
2951 | * machine setup would care about the target page size, and we could | |
2952 | * do this much later, rather than requiring board models to state | |
2953 | * up front what their requirements are. | |
2954 | */ | |
2955 | finalize_target_page_bits(); | |
38e047b5 | 2956 | io_mem_init(); |
680a4783 | 2957 | memory_map_init(); |
38e047b5 | 2958 | qemu_mutex_init(&map_client_list_lock); |
ba223c29 AL |
2959 | } |
2960 | ||
e95205e1 | 2961 | void cpu_unregister_map_client(QEMUBH *bh) |
ba223c29 AL |
2962 | { |
2963 | MapClient *client; | |
2964 | ||
e95205e1 FZ |
2965 | qemu_mutex_lock(&map_client_list_lock); |
2966 | QLIST_FOREACH(client, &map_client_list, link) { | |
2967 | if (client->bh == bh) { | |
2968 | cpu_unregister_map_client_do(client); | |
2969 | break; | |
2970 | } | |
ba223c29 | 2971 | } |
e95205e1 | 2972 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
2973 | } |
2974 | ||
2975 | static void cpu_notify_map_clients(void) | |
2976 | { | |
38e047b5 | 2977 | qemu_mutex_lock(&map_client_list_lock); |
33b6c2ed | 2978 | cpu_notify_map_clients_locked(); |
38e047b5 | 2979 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
2980 | } |
2981 | ||
51644ab7 PB |
2982 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2983 | { | |
5c8a00ce | 2984 | MemoryRegion *mr; |
51644ab7 PB |
2985 | hwaddr l, xlat; |
2986 | ||
41063e1e | 2987 | rcu_read_lock(); |
51644ab7 PB |
2988 | while (len > 0) { |
2989 | l = len; | |
5c8a00ce PB |
2990 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
2991 | if (!memory_access_is_direct(mr, is_write)) { | |
2992 | l = memory_access_size(mr, l, addr); | |
2993 | if (!memory_region_access_valid(mr, xlat, l, is_write)) { | |
5ad4a2b7 | 2994 | rcu_read_unlock(); |
51644ab7 PB |
2995 | return false; |
2996 | } | |
2997 | } | |
2998 | ||
2999 | len -= l; | |
3000 | addr += l; | |
3001 | } | |
41063e1e | 3002 | rcu_read_unlock(); |
51644ab7 PB |
3003 | return true; |
3004 | } | |
3005 | ||
715c31ec PB |
3006 | static hwaddr |
3007 | address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len, | |
3008 | MemoryRegion *mr, hwaddr base, hwaddr len, | |
3009 | bool is_write) | |
3010 | { | |
3011 | hwaddr done = 0; | |
3012 | hwaddr xlat; | |
3013 | MemoryRegion *this_mr; | |
3014 | ||
3015 | for (;;) { | |
3016 | target_len -= len; | |
3017 | addr += len; | |
3018 | done += len; | |
3019 | if (target_len == 0) { | |
3020 | return done; | |
3021 | } | |
3022 | ||
3023 | len = target_len; | |
3024 | this_mr = address_space_translate(as, addr, &xlat, &len, is_write); | |
3025 | if (this_mr != mr || xlat != base + done) { | |
3026 | return done; | |
3027 | } | |
3028 | } | |
3029 | } | |
3030 | ||
6d16c2f8 AL |
3031 | /* Map a physical memory region into a host virtual address. |
3032 | * May map a subset of the requested range, given by and returned in *plen. | |
3033 | * May return NULL if resources needed to perform the mapping are exhausted. | |
3034 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
3035 | * Use cpu_register_map_client() to know when retrying the map operation is |
3036 | * likely to succeed. | |
6d16c2f8 | 3037 | */ |
ac1970fb | 3038 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
3039 | hwaddr addr, |
3040 | hwaddr *plen, | |
ac1970fb | 3041 | bool is_write) |
6d16c2f8 | 3042 | { |
a8170e5e | 3043 | hwaddr len = *plen; |
715c31ec PB |
3044 | hwaddr l, xlat; |
3045 | MemoryRegion *mr; | |
e81bcda5 | 3046 | void *ptr; |
6d16c2f8 | 3047 | |
e3127ae0 PB |
3048 | if (len == 0) { |
3049 | return NULL; | |
3050 | } | |
38bee5dc | 3051 | |
e3127ae0 | 3052 | l = len; |
41063e1e | 3053 | rcu_read_lock(); |
e3127ae0 | 3054 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
41063e1e | 3055 | |
e3127ae0 | 3056 | if (!memory_access_is_direct(mr, is_write)) { |
c2cba0ff | 3057 | if (atomic_xchg(&bounce.in_use, true)) { |
41063e1e | 3058 | rcu_read_unlock(); |
e3127ae0 | 3059 | return NULL; |
6d16c2f8 | 3060 | } |
e85d9db5 KW |
3061 | /* Avoid unbounded allocations */ |
3062 | l = MIN(l, TARGET_PAGE_SIZE); | |
3063 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); | |
e3127ae0 PB |
3064 | bounce.addr = addr; |
3065 | bounce.len = l; | |
d3e71559 PB |
3066 | |
3067 | memory_region_ref(mr); | |
3068 | bounce.mr = mr; | |
e3127ae0 | 3069 | if (!is_write) { |
5c9eb028 PM |
3070 | address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, |
3071 | bounce.buffer, l); | |
8ab934f9 | 3072 | } |
6d16c2f8 | 3073 | |
41063e1e | 3074 | rcu_read_unlock(); |
e3127ae0 PB |
3075 | *plen = l; |
3076 | return bounce.buffer; | |
3077 | } | |
3078 | ||
e3127ae0 | 3079 | |
d3e71559 | 3080 | memory_region_ref(mr); |
715c31ec PB |
3081 | *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write); |
3082 | ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen); | |
e81bcda5 PB |
3083 | rcu_read_unlock(); |
3084 | ||
3085 | return ptr; | |
6d16c2f8 AL |
3086 | } |
3087 | ||
ac1970fb | 3088 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
3089 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
3090 | * the amount of memory that was actually read or written by the caller. | |
3091 | */ | |
a8170e5e AK |
3092 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
3093 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
3094 | { |
3095 | if (buffer != bounce.buffer) { | |
d3e71559 PB |
3096 | MemoryRegion *mr; |
3097 | ram_addr_t addr1; | |
3098 | ||
07bdaa41 | 3099 | mr = memory_region_from_host(buffer, &addr1); |
d3e71559 | 3100 | assert(mr != NULL); |
6d16c2f8 | 3101 | if (is_write) { |
845b6214 | 3102 | invalidate_and_set_dirty(mr, addr1, access_len); |
6d16c2f8 | 3103 | } |
868bb33f | 3104 | if (xen_enabled()) { |
e41d7c69 | 3105 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 3106 | } |
d3e71559 | 3107 | memory_region_unref(mr); |
6d16c2f8 AL |
3108 | return; |
3109 | } | |
3110 | if (is_write) { | |
5c9eb028 PM |
3111 | address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, |
3112 | bounce.buffer, access_len); | |
6d16c2f8 | 3113 | } |
f8a83245 | 3114 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 3115 | bounce.buffer = NULL; |
d3e71559 | 3116 | memory_region_unref(bounce.mr); |
c2cba0ff | 3117 | atomic_mb_set(&bounce.in_use, false); |
ba223c29 | 3118 | cpu_notify_map_clients(); |
6d16c2f8 | 3119 | } |
d0ecd2aa | 3120 | |
a8170e5e AK |
3121 | void *cpu_physical_memory_map(hwaddr addr, |
3122 | hwaddr *plen, | |
ac1970fb AK |
3123 | int is_write) |
3124 | { | |
3125 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
3126 | } | |
3127 | ||
a8170e5e AK |
3128 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
3129 | int is_write, hwaddr access_len) | |
ac1970fb AK |
3130 | { |
3131 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
3132 | } | |
3133 | ||
0ce265ff PB |
3134 | #define ARG1_DECL AddressSpace *as |
3135 | #define ARG1 as | |
3136 | #define SUFFIX | |
3137 | #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) | |
3138 | #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write) | |
3139 | #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs) | |
3140 | #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len) | |
3141 | #define RCU_READ_LOCK(...) rcu_read_lock() | |
3142 | #define RCU_READ_UNLOCK(...) rcu_read_unlock() | |
3143 | #include "memory_ldst.inc.c" | |
1e78bcc1 | 3144 | |
1f4e496e PB |
3145 | int64_t address_space_cache_init(MemoryRegionCache *cache, |
3146 | AddressSpace *as, | |
3147 | hwaddr addr, | |
3148 | hwaddr len, | |
3149 | bool is_write) | |
3150 | { | |
3151 | hwaddr l, xlat; | |
3152 | MemoryRegion *mr; | |
3153 | void *ptr; | |
3154 | ||
3155 | assert(len > 0); | |
3156 | ||
3157 | l = len; | |
3158 | mr = address_space_translate(as, addr, &xlat, &l, is_write); | |
3159 | if (!memory_access_is_direct(mr, is_write)) { | |
3160 | return -EINVAL; | |
3161 | } | |
3162 | ||
3163 | l = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write); | |
3164 | ptr = qemu_ram_ptr_length(mr->ram_block, xlat, &l); | |
3165 | ||
3166 | cache->xlat = xlat; | |
3167 | cache->is_write = is_write; | |
3168 | cache->mr = mr; | |
3169 | cache->ptr = ptr; | |
3170 | cache->len = l; | |
3171 | memory_region_ref(cache->mr); | |
3172 | ||
3173 | return l; | |
3174 | } | |
3175 | ||
3176 | void address_space_cache_invalidate(MemoryRegionCache *cache, | |
3177 | hwaddr addr, | |
3178 | hwaddr access_len) | |
3179 | { | |
3180 | assert(cache->is_write); | |
3181 | invalidate_and_set_dirty(cache->mr, addr + cache->xlat, access_len); | |
3182 | } | |
3183 | ||
3184 | void address_space_cache_destroy(MemoryRegionCache *cache) | |
3185 | { | |
3186 | if (!cache->mr) { | |
3187 | return; | |
3188 | } | |
3189 | ||
3190 | if (xen_enabled()) { | |
3191 | xen_invalidate_map_cache_entry(cache->ptr); | |
3192 | } | |
3193 | memory_region_unref(cache->mr); | |
91047df3 | 3194 | cache->mr = NULL; |
1f4e496e PB |
3195 | } |
3196 | ||
3197 | /* Called from RCU critical section. This function has the same | |
3198 | * semantics as address_space_translate, but it only works on a | |
3199 | * predefined range of a MemoryRegion that was mapped with | |
3200 | * address_space_cache_init. | |
3201 | */ | |
3202 | static inline MemoryRegion *address_space_translate_cached( | |
3203 | MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, | |
3204 | hwaddr *plen, bool is_write) | |
3205 | { | |
3206 | assert(addr < cache->len && *plen <= cache->len - addr); | |
3207 | *xlat = addr + cache->xlat; | |
3208 | return cache->mr; | |
3209 | } | |
3210 | ||
3211 | #define ARG1_DECL MemoryRegionCache *cache | |
3212 | #define ARG1 cache | |
3213 | #define SUFFIX _cached | |
3214 | #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) | |
3215 | #define IS_DIRECT(mr, is_write) true | |
3216 | #define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat)) | |
3217 | #define INVALIDATE(mr, ofs, len) ((void)0) | |
3218 | #define RCU_READ_LOCK() ((void)0) | |
3219 | #define RCU_READ_UNLOCK() ((void)0) | |
3220 | #include "memory_ldst.inc.c" | |
3221 | ||
5e2972fd | 3222 | /* virtual memory access for debug (includes writing to ROM) */ |
f17ec444 | 3223 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
b448f2f3 | 3224 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
3225 | { |
3226 | int l; | |
a8170e5e | 3227 | hwaddr phys_addr; |
9b3c35e0 | 3228 | target_ulong page; |
13eb76e0 FB |
3229 | |
3230 | while (len > 0) { | |
5232e4c7 PM |
3231 | int asidx; |
3232 | MemTxAttrs attrs; | |
3233 | ||
13eb76e0 | 3234 | page = addr & TARGET_PAGE_MASK; |
5232e4c7 PM |
3235 | phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); |
3236 | asidx = cpu_asidx_from_attrs(cpu, attrs); | |
13eb76e0 FB |
3237 | /* if no physical page mapped, return an error */ |
3238 | if (phys_addr == -1) | |
3239 | return -1; | |
3240 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3241 | if (l > len) | |
3242 | l = len; | |
5e2972fd | 3243 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
2e38847b | 3244 | if (is_write) { |
5232e4c7 PM |
3245 | cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as, |
3246 | phys_addr, buf, l); | |
2e38847b | 3247 | } else { |
5232e4c7 PM |
3248 | address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, |
3249 | MEMTXATTRS_UNSPECIFIED, | |
5c9eb028 | 3250 | buf, l, 0); |
2e38847b | 3251 | } |
13eb76e0 FB |
3252 | len -= l; |
3253 | buf += l; | |
3254 | addr += l; | |
3255 | } | |
3256 | return 0; | |
3257 | } | |
038629a6 DDAG |
3258 | |
3259 | /* | |
3260 | * Allows code that needs to deal with migration bitmaps etc to still be built | |
3261 | * target independent. | |
3262 | */ | |
3263 | size_t qemu_target_page_bits(void) | |
3264 | { | |
3265 | return TARGET_PAGE_BITS; | |
3266 | } | |
3267 | ||
a68fe89c | 3268 | #endif |
13eb76e0 | 3269 | |
8e4a424b BS |
3270 | /* |
3271 | * A helper function for the _utterly broken_ virtio device model to find out if | |
3272 | * it's running on a big endian machine. Don't do this at home kids! | |
3273 | */ | |
98ed8ecf GK |
3274 | bool target_words_bigendian(void); |
3275 | bool target_words_bigendian(void) | |
8e4a424b BS |
3276 | { |
3277 | #if defined(TARGET_WORDS_BIGENDIAN) | |
3278 | return true; | |
3279 | #else | |
3280 | return false; | |
3281 | #endif | |
3282 | } | |
3283 | ||
76f35538 | 3284 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 3285 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 | 3286 | { |
5c8a00ce | 3287 | MemoryRegion*mr; |
149f54b5 | 3288 | hwaddr l = 1; |
41063e1e | 3289 | bool res; |
76f35538 | 3290 | |
41063e1e | 3291 | rcu_read_lock(); |
5c8a00ce PB |
3292 | mr = address_space_translate(&address_space_memory, |
3293 | phys_addr, &phys_addr, &l, false); | |
76f35538 | 3294 | |
41063e1e PB |
3295 | res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); |
3296 | rcu_read_unlock(); | |
3297 | return res; | |
76f35538 | 3298 | } |
bd2fa51f | 3299 | |
e3807054 | 3300 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) |
bd2fa51f MH |
3301 | { |
3302 | RAMBlock *block; | |
e3807054 | 3303 | int ret = 0; |
bd2fa51f | 3304 | |
0dc3f44a MD |
3305 | rcu_read_lock(); |
3306 | QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { | |
e3807054 DDAG |
3307 | ret = func(block->idstr, block->host, block->offset, |
3308 | block->used_length, opaque); | |
3309 | if (ret) { | |
3310 | break; | |
3311 | } | |
bd2fa51f | 3312 | } |
0dc3f44a | 3313 | rcu_read_unlock(); |
e3807054 | 3314 | return ret; |
bd2fa51f | 3315 | } |
d3a5038c DDAG |
3316 | |
3317 | /* | |
3318 | * Unmap pages of memory from start to start+length such that | |
3319 | * they a) read as 0, b) Trigger whatever fault mechanism | |
3320 | * the OS provides for postcopy. | |
3321 | * The pages must be unmapped by the end of the function. | |
3322 | * Returns: 0 on success, none-0 on failure | |
3323 | * | |
3324 | */ | |
3325 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) | |
3326 | { | |
3327 | int ret = -1; | |
3328 | ||
3329 | uint8_t *host_startaddr = rb->host + start; | |
3330 | ||
3331 | if ((uintptr_t)host_startaddr & (rb->page_size - 1)) { | |
3332 | error_report("ram_block_discard_range: Unaligned start address: %p", | |
3333 | host_startaddr); | |
3334 | goto err; | |
3335 | } | |
3336 | ||
3337 | if ((start + length) <= rb->used_length) { | |
3338 | uint8_t *host_endaddr = host_startaddr + length; | |
3339 | if ((uintptr_t)host_endaddr & (rb->page_size - 1)) { | |
3340 | error_report("ram_block_discard_range: Unaligned end address: %p", | |
3341 | host_endaddr); | |
3342 | goto err; | |
3343 | } | |
3344 | ||
3345 | errno = ENOTSUP; /* If we are missing MADVISE etc */ | |
3346 | ||
e2fa71f5 | 3347 | if (rb->page_size == qemu_host_page_size) { |
d3a5038c | 3348 | #if defined(CONFIG_MADVISE) |
e2fa71f5 DDAG |
3349 | /* Note: We need the madvise MADV_DONTNEED behaviour of definitely |
3350 | * freeing the page. | |
3351 | */ | |
3352 | ret = madvise(host_startaddr, length, MADV_DONTNEED); | |
d3a5038c | 3353 | #endif |
e2fa71f5 DDAG |
3354 | } else { |
3355 | /* Huge page case - unfortunately it can't do DONTNEED, but | |
3356 | * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the | |
3357 | * huge page file. | |
3358 | */ | |
3359 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE | |
3360 | ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | |
3361 | start, length); | |
3362 | #endif | |
3363 | } | |
d3a5038c DDAG |
3364 | if (ret) { |
3365 | ret = -errno; | |
3366 | error_report("ram_block_discard_range: Failed to discard range " | |
3367 | "%s:%" PRIx64 " +%zx (%d)", | |
3368 | rb->idstr, start, length, ret); | |
3369 | } | |
3370 | } else { | |
3371 | error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 | |
3372 | "/%zx/" RAM_ADDR_FMT")", | |
3373 | rb->idstr, start, length, rb->used_length); | |
3374 | } | |
3375 | ||
3376 | err: | |
3377 | return ret; | |
3378 | } | |
3379 | ||
ec3f8c99 | 3380 | #endif |