]> git.ipfire.org Git - thirdparty/qemu.git/blame - accel/tcg/cputlb.c
memory: Single byte swap along the I/O path
[thirdparty/qemu.git] / accel / tcg / cputlb.c
CommitLineData
0cac1b66
BS
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
0cac1b66
BS
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
7b31bbc2 20#include "qemu/osdep.h"
8d04fb55 21#include "qemu/main-loop.h"
0cac1b66 22#include "cpu.h"
022c62cb
PB
23#include "exec/exec-all.h"
24#include "exec/memory.h"
25#include "exec/address-spaces.h"
f08b6170 26#include "exec/cpu_ldst.h"
022c62cb 27#include "exec/cputlb.h"
022c62cb 28#include "exec/memory-internal.h"
220c3ebd 29#include "exec/ram_addr.h"
0f590e74 30#include "tcg/tcg.h"
d7f30403
PM
31#include "qemu/error-report.h"
32#include "exec/log.h"
c482cb11
RH
33#include "exec/helper-proto.h"
34#include "qemu/atomic.h"
e6cd4bb5 35#include "qemu/atomic128.h"
0cac1b66 36
8526e1f4
AB
37/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38/* #define DEBUG_TLB */
39/* #define DEBUG_TLB_LOG */
40
41#ifdef DEBUG_TLB
42# define DEBUG_TLB_GATE 1
43# ifdef DEBUG_TLB_LOG
44# define DEBUG_TLB_LOG_GATE 1
45# else
46# define DEBUG_TLB_LOG_GATE 0
47# endif
48#else
49# define DEBUG_TLB_GATE 0
50# define DEBUG_TLB_LOG_GATE 0
51#endif
52
53#define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56 ## __VA_ARGS__); \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59 } \
60} while (0)
0cac1b66 61
ea9025cb 62#define assert_cpu_is_self(cpu) do { \
f0aff0f1 63 if (DEBUG_TLB_GATE) { \
ea9025cb 64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
f0aff0f1
AB
65 } \
66 } while (0)
67
e3b9ca81
FK
68/* run_on_cpu_data.target_ptr should always be big enough for a
69 * target_ulong even on 32 bit builds */
70QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71
e7218445
AB
72/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73 */
74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76
86e1eff8
EC
77static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
78{
a40ec84e 79 return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
86e1eff8
EC
80}
81
79e42085 82static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
86e1eff8
EC
83 size_t max_entries)
84{
79e42085
RH
85 desc->window_begin_ns = ns;
86 desc->window_max_entries = max_entries;
86e1eff8
EC
87}
88
89static void tlb_dyn_init(CPUArchState *env)
90{
91 int i;
92
93 for (i = 0; i < NB_MMU_MODES; i++) {
a40ec84e 94 CPUTLBDesc *desc = &env_tlb(env)->d[i];
86e1eff8
EC
95 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
96
79e42085 97 tlb_window_reset(desc, get_clock_realtime(), 0);
86e1eff8 98 desc->n_used_entries = 0;
a40ec84e
RH
99 env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
100 env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
101 env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
86e1eff8
EC
102 }
103}
104
105/**
106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107 * @env: CPU that owns the TLB
108 * @mmu_idx: MMU index of the TLB
109 *
110 * Called with tlb_lock_held.
111 *
112 * We have two main constraints when resizing a TLB: (1) we only resize it
113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114 * the array or unnecessarily flushing it), which means we do not control how
115 * frequently the resizing can occur; (2) we don't have access to the guest's
116 * future scheduling decisions, and therefore have to decide the magnitude of
117 * the resize based on past observations.
118 *
119 * In general, a memory-hungry process can benefit greatly from an appropriately
120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121 * we just have to make the TLB as large as possible; while an oversized TLB
122 * results in minimal TLB miss rates, it also takes longer to be flushed
123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
124 * performance.
125 *
126 * To achieve near-optimal performance for all kinds of workloads, we:
127 *
128 * 1. Aggressively increase the size of the TLB when the use rate of the
129 * TLB being flushed is high, since it is likely that in the near future this
130 * memory-hungry process will execute again, and its memory hungriness will
131 * probably be similar.
132 *
133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
134 * reasonably large time window. The rationale is that if in such a time window
135 * we have not observed a high TLB use rate, it is likely that we won't observe
136 * it in the near future. In that case, once a time window expires we downsize
137 * the TLB to match the maximum use rate observed in the window.
138 *
139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140 * since in that range performance is likely near-optimal. Recall that the TLB
141 * is direct mapped, so we want the use rate to be low (or at least not too
142 * high), since otherwise we are likely to have a significant amount of
143 * conflict misses.
144 */
145static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
146{
a40ec84e 147 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
86e1eff8
EC
148 size_t old_size = tlb_n_entries(env, mmu_idx);
149 size_t rate;
150 size_t new_size = old_size;
151 int64_t now = get_clock_realtime();
152 int64_t window_len_ms = 100;
153 int64_t window_len_ns = window_len_ms * 1000 * 1000;
79e42085 154 bool window_expired = now > desc->window_begin_ns + window_len_ns;
86e1eff8 155
79e42085
RH
156 if (desc->n_used_entries > desc->window_max_entries) {
157 desc->window_max_entries = desc->n_used_entries;
86e1eff8 158 }
79e42085 159 rate = desc->window_max_entries * 100 / old_size;
86e1eff8
EC
160
161 if (rate > 70) {
162 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
163 } else if (rate < 30 && window_expired) {
79e42085
RH
164 size_t ceil = pow2ceil(desc->window_max_entries);
165 size_t expected_rate = desc->window_max_entries * 100 / ceil;
86e1eff8
EC
166
167 /*
168 * Avoid undersizing when the max number of entries seen is just below
169 * a pow2. For instance, if max_entries == 1025, the expected use rate
170 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172 * later. Thus, make sure that the expected use rate remains below 70%.
173 * (and since we double the size, that means the lowest rate we'd
174 * expect to get is 35%, which is still in the 30-70% range where
175 * we consider that the size is appropriate.)
176 */
177 if (expected_rate > 70) {
178 ceil *= 2;
179 }
180 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
181 }
182
183 if (new_size == old_size) {
184 if (window_expired) {
79e42085 185 tlb_window_reset(desc, now, desc->n_used_entries);
86e1eff8
EC
186 }
187 return;
188 }
189
a40ec84e
RH
190 g_free(env_tlb(env)->f[mmu_idx].table);
191 g_free(env_tlb(env)->d[mmu_idx].iotlb);
86e1eff8 192
79e42085 193 tlb_window_reset(desc, now, 0);
86e1eff8 194 /* desc->n_used_entries is cleared by the caller */
a40ec84e
RH
195 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
196 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
197 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
86e1eff8
EC
198 /*
199 * If the allocations fail, try smaller sizes. We just freed some
200 * memory, so going back to half of new_size has a good chance of working.
201 * Increased memory pressure elsewhere in the system might cause the
202 * allocations to fail though, so we progressively reduce the allocation
203 * size, aborting if we cannot even allocate the smallest TLB we support.
204 */
a40ec84e
RH
205 while (env_tlb(env)->f[mmu_idx].table == NULL ||
206 env_tlb(env)->d[mmu_idx].iotlb == NULL) {
86e1eff8
EC
207 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
208 error_report("%s: %s", __func__, strerror(errno));
209 abort();
210 }
211 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
a40ec84e 212 env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
86e1eff8 213
a40ec84e
RH
214 g_free(env_tlb(env)->f[mmu_idx].table);
215 g_free(env_tlb(env)->d[mmu_idx].iotlb);
216 env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
217 env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
86e1eff8
EC
218 }
219}
220
221static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
222{
223 tlb_mmu_resize_locked(env, mmu_idx);
a40ec84e
RH
224 memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
225 env_tlb(env)->d[mmu_idx].n_used_entries = 0;
86e1eff8
EC
226}
227
228static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
229{
a40ec84e 230 env_tlb(env)->d[mmu_idx].n_used_entries++;
86e1eff8
EC
231}
232
233static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
234{
a40ec84e 235 env_tlb(env)->d[mmu_idx].n_used_entries--;
86e1eff8
EC
236}
237
5005e253
EC
238void tlb_init(CPUState *cpu)
239{
71aec354
EC
240 CPUArchState *env = cpu->env_ptr;
241
a40ec84e 242 qemu_spin_init(&env_tlb(env)->c.lock);
3d1523ce
RH
243
244 /* Ensure that cpu_reset performs a full flush. */
a40ec84e 245 env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
86e1eff8
EC
246
247 tlb_dyn_init(env);
5005e253
EC
248}
249
c3b9a07a
AB
250/* flush_all_helper: run fn across all cpus
251 *
252 * If the wait flag is set then the src cpu's helper will be queued as
253 * "safe" work and the loop exited creating a synchronisation point
254 * where all queued work will be finished before execution starts
255 * again.
256 */
257static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
258 run_on_cpu_data d)
259{
260 CPUState *cpu;
261
262 CPU_FOREACH(cpu) {
263 if (cpu != src) {
264 async_run_on_cpu(cpu, fn, d);
265 }
266 }
267}
268
e09de0a2 269void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
83974cf4
EC
270{
271 CPUState *cpu;
e09de0a2 272 size_t full = 0, part = 0, elide = 0;
83974cf4
EC
273
274 CPU_FOREACH(cpu) {
275 CPUArchState *env = cpu->env_ptr;
276
a40ec84e
RH
277 full += atomic_read(&env_tlb(env)->c.full_flush_count);
278 part += atomic_read(&env_tlb(env)->c.part_flush_count);
279 elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
83974cf4 280 }
e09de0a2
RH
281 *pfull = full;
282 *ppart = part;
283 *pelide = elide;
83974cf4 284}
0cac1b66 285
1308e026
RH
286static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
287{
86e1eff8 288 tlb_table_flush_by_mmuidx(env, mmu_idx);
a40ec84e
RH
289 env_tlb(env)->d[mmu_idx].large_page_addr = -1;
290 env_tlb(env)->d[mmu_idx].large_page_mask = -1;
291 env_tlb(env)->d[mmu_idx].vindex = 0;
292 memset(env_tlb(env)->d[mmu_idx].vtable, -1,
293 sizeof(env_tlb(env)->d[0].vtable));
1308e026
RH
294}
295
e7218445 296static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
d7a74a9d
PM
297{
298 CPUArchState *env = cpu->env_ptr;
3d1523ce
RH
299 uint16_t asked = data.host_int;
300 uint16_t all_dirty, work, to_clean;
d7a74a9d 301
f0aff0f1 302 assert_cpu_is_self(cpu);
d7a74a9d 303
3d1523ce 304 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
e7218445 305
a40ec84e 306 qemu_spin_lock(&env_tlb(env)->c.lock);
60a2ad7d 307
a40ec84e 308 all_dirty = env_tlb(env)->c.dirty;
3d1523ce
RH
309 to_clean = asked & all_dirty;
310 all_dirty &= ~to_clean;
a40ec84e 311 env_tlb(env)->c.dirty = all_dirty;
3d1523ce
RH
312
313 for (work = to_clean; work != 0; work &= work - 1) {
314 int mmu_idx = ctz32(work);
315 tlb_flush_one_mmuidx_locked(env, mmu_idx);
d7a74a9d 316 }
3d1523ce 317
a40ec84e 318 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 319
f3ced3c5 320 cpu_tb_jmp_cache_clear(cpu);
64f2674b 321
3d1523ce 322 if (to_clean == ALL_MMUIDX_BITS) {
a40ec84e
RH
323 atomic_set(&env_tlb(env)->c.full_flush_count,
324 env_tlb(env)->c.full_flush_count + 1);
e09de0a2 325 } else {
a40ec84e
RH
326 atomic_set(&env_tlb(env)->c.part_flush_count,
327 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3d1523ce 328 if (to_clean != asked) {
a40ec84e
RH
329 atomic_set(&env_tlb(env)->c.elide_flush_count,
330 env_tlb(env)->c.elide_flush_count +
3d1523ce
RH
331 ctpop16(asked & ~to_clean));
332 }
64f2674b 333 }
d7a74a9d
PM
334}
335
0336cbf8 336void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d 337{
e7218445
AB
338 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
339
64f2674b 340 if (cpu->created && !qemu_cpu_is_self(cpu)) {
ab651105
RH
341 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
342 RUN_ON_CPU_HOST_INT(idxmap));
e7218445 343 } else {
60a2ad7d 344 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
e7218445 345 }
d7a74a9d
PM
346}
347
64f2674b
RH
348void tlb_flush(CPUState *cpu)
349{
350 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
351}
352
c3b9a07a
AB
353void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
354{
355 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
356
357 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
358
359 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
360 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
361}
362
64f2674b
RH
363void tlb_flush_all_cpus(CPUState *src_cpu)
364{
365 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
366}
367
368void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
c3b9a07a
AB
369{
370 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
371
372 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
373
374 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
375 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
376}
377
64f2674b
RH
378void tlb_flush_all_cpus_synced(CPUState *src_cpu)
379{
380 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
381}
382
68fea038
RH
383static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
384 target_ulong page)
385{
386 return tlb_hit_page(tlb_entry->addr_read, page) ||
403f290c 387 tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
68fea038
RH
388 tlb_hit_page(tlb_entry->addr_code, page);
389}
c3b9a07a 390
3cea94bb
EC
391/**
392 * tlb_entry_is_empty - return true if the entry is not in use
393 * @te: pointer to CPUTLBEntry
394 */
395static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
396{
397 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
398}
399
53d28455 400/* Called with tlb_c.lock held */
86e1eff8 401static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
71aec354 402 target_ulong page)
0cac1b66 403{
68fea038 404 if (tlb_hit_page_anyprot(tlb_entry, page)) {
4fadb3bb 405 memset(tlb_entry, -1, sizeof(*tlb_entry));
86e1eff8 406 return true;
0cac1b66 407 }
86e1eff8 408 return false;
0cac1b66
BS
409}
410
53d28455 411/* Called with tlb_c.lock held */
71aec354
EC
412static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
413 target_ulong page)
68fea038 414{
a40ec84e 415 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
68fea038 416 int k;
71aec354 417
29a0af61 418 assert_cpu_is_self(env_cpu(env));
68fea038 419 for (k = 0; k < CPU_VTLB_SIZE; k++) {
a40ec84e 420 if (tlb_flush_entry_locked(&d->vtable[k], page)) {
86e1eff8
EC
421 tlb_n_used_entries_dec(env, mmu_idx);
422 }
68fea038
RH
423 }
424}
425
1308e026
RH
426static void tlb_flush_page_locked(CPUArchState *env, int midx,
427 target_ulong page)
428{
a40ec84e
RH
429 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
430 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
1308e026
RH
431
432 /* Check if we need to flush due to large pages. */
433 if ((page & lp_mask) == lp_addr) {
434 tlb_debug("forcing full flush midx %d ("
435 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
436 midx, lp_addr, lp_mask);
437 tlb_flush_one_mmuidx_locked(env, midx);
438 } else {
86e1eff8
EC
439 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
440 tlb_n_used_entries_dec(env, midx);
441 }
1308e026
RH
442 tlb_flush_vtlb_page_locked(env, midx, page);
443 }
444}
445
e7218445
AB
446/* As we are going to hijack the bottom bits of the page address for a
447 * mmuidx bit mask we need to fail to build if we can't do that
448 */
449QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
450
451static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
452 run_on_cpu_data data)
d7a74a9d
PM
453{
454 CPUArchState *env = cpu->env_ptr;
e7218445
AB
455 target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
456 target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
457 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
e7218445 458 int mmu_idx;
d7a74a9d 459
f0aff0f1 460 assert_cpu_is_self(cpu);
d7a74a9d 461
1308e026 462 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
383beda9 463 addr, mmu_idx_bitmap);
d7a74a9d 464
a40ec84e 465 qemu_spin_lock(&env_tlb(env)->c.lock);
0336cbf8
AB
466 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
467 if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
1308e026 468 tlb_flush_page_locked(env, mmu_idx, addr);
d7a74a9d
PM
469 }
470 }
a40ec84e 471 qemu_spin_unlock(&env_tlb(env)->c.lock);
d7a74a9d 472
d7a74a9d
PM
473 tb_flush_jmp_cache(cpu, addr);
474}
475
e7218445
AB
476void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
477{
478 target_ulong addr_and_mmu_idx;
479
480 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
481
482 /* This should already be page aligned */
483 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
484 addr_and_mmu_idx |= idxmap;
485
486 if (!qemu_cpu_is_self(cpu)) {
1308e026 487 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
e7218445
AB
488 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489 } else {
1308e026 490 tlb_flush_page_by_mmuidx_async_work(
e7218445
AB
491 cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
492 }
493}
494
f8144c6c
RH
495void tlb_flush_page(CPUState *cpu, target_ulong addr)
496{
497 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
498}
499
c3b9a07a
AB
500void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
501 uint16_t idxmap)
e3b9ca81 502{
1308e026 503 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
c3b9a07a 504 target_ulong addr_and_mmu_idx;
e3b9ca81 505
c3b9a07a
AB
506 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
507
508 /* This should already be page aligned */
509 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
510 addr_and_mmu_idx |= idxmap;
511
512 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
513 fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
514}
515
f8144c6c
RH
516void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
517{
518 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
519}
520
c3b9a07a 521void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
1308e026
RH
522 target_ulong addr,
523 uint16_t idxmap)
c3b9a07a 524{
1308e026 525 const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
c3b9a07a
AB
526 target_ulong addr_and_mmu_idx;
527
528 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
529
530 /* This should already be page aligned */
531 addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
532 addr_and_mmu_idx |= idxmap;
533
534 flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
535 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
536}
537
f8144c6c 538void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
c3b9a07a 539{
f8144c6c 540 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
e3b9ca81
FK
541}
542
0cac1b66
BS
543/* update the TLBs so that writes to code in the virtual page 'addr'
544 can be detected */
545void tlb_protect_code(ram_addr_t ram_addr)
546{
03eebc9e
SH
547 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
548 DIRTY_MEMORY_CODE);
0cac1b66
BS
549}
550
551/* update the TLB so that writes in physical page 'phys_addr' are no longer
552 tested for self modifying code */
9564f52d 553void tlb_unprotect_code(ram_addr_t ram_addr)
0cac1b66 554{
52159192 555 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
0cac1b66
BS
556}
557
0cac1b66 558
b0706b71
AB
559/*
560 * Dirty write flag handling
561 *
562 * When the TCG code writes to a location it looks up the address in
563 * the TLB and uses that data to compute the final address. If any of
564 * the lower bits of the address are set then the slow path is forced.
565 * There are a number of reasons to do this but for normal RAM the
566 * most usual is detecting writes to code regions which may invalidate
567 * generated code.
568 *
71aec354
EC
569 * Other vCPUs might be reading their TLBs during guest execution, so we update
570 * te->addr_write with atomic_set. We don't need to worry about this for
571 * oversized guests as MTTCG is disabled for them.
b0706b71 572 *
53d28455 573 * Called with tlb_c.lock held.
b0706b71 574 */
71aec354
EC
575static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
576 uintptr_t start, uintptr_t length)
0cac1b66 577{
b0706b71 578 uintptr_t addr = tlb_entry->addr_write;
0cac1b66 579
b0706b71
AB
580 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
581 addr &= TARGET_PAGE_MASK;
582 addr += tlb_entry->addend;
0cac1b66 583 if ((addr - start) < length) {
71aec354 584#if TCG_OVERSIZED_GUEST
0cac1b66 585 tlb_entry->addr_write |= TLB_NOTDIRTY;
b0706b71 586#else
71aec354
EC
587 atomic_set(&tlb_entry->addr_write,
588 tlb_entry->addr_write | TLB_NOTDIRTY);
589#endif
b0706b71
AB
590 }
591 }
b0706b71
AB
592}
593
71aec354 594/*
53d28455 595 * Called with tlb_c.lock held.
71aec354
EC
596 * Called only from the vCPU context, i.e. the TLB's owner thread.
597 */
598static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
b0706b71 599{
b0706b71 600 *d = *s;
0cac1b66
BS
601}
602
b0706b71 603/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
71aec354 604 * the target vCPU).
53d28455 605 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
71aec354 606 * thing actually updated is the target TLB entry ->addr_write flags.
b0706b71 607 */
9a13565d 608void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
0cac1b66
BS
609{
610 CPUArchState *env;
611
9a13565d 612 int mmu_idx;
0cac1b66 613
9a13565d 614 env = cpu->env_ptr;
a40ec84e 615 qemu_spin_lock(&env_tlb(env)->c.lock);
9a13565d
PC
616 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
617 unsigned int i;
86e1eff8 618 unsigned int n = tlb_n_entries(env, mmu_idx);
0cac1b66 619
86e1eff8 620 for (i = 0; i < n; i++) {
a40ec84e
RH
621 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
622 start1, length);
9a13565d 623 }
88e89a57 624
9a13565d 625 for (i = 0; i < CPU_VTLB_SIZE; i++) {
a40ec84e
RH
626 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
627 start1, length);
0cac1b66
BS
628 }
629 }
a40ec84e 630 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
631}
632
53d28455 633/* Called with tlb_c.lock held */
71aec354
EC
634static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
635 target_ulong vaddr)
0cac1b66
BS
636{
637 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
638 tlb_entry->addr_write = vaddr;
639 }
640}
641
642/* update the TLB corresponding to virtual page vaddr
643 so that it is no longer dirty */
bcae01e4 644void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
0cac1b66 645{
bcae01e4 646 CPUArchState *env = cpu->env_ptr;
0cac1b66
BS
647 int mmu_idx;
648
f0aff0f1
AB
649 assert_cpu_is_self(cpu);
650
0cac1b66 651 vaddr &= TARGET_PAGE_MASK;
a40ec84e 652 qemu_spin_lock(&env_tlb(env)->c.lock);
0cac1b66 653 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
383beda9 654 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
0cac1b66 655 }
88e89a57
XT
656
657 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
658 int k;
659 for (k = 0; k < CPU_VTLB_SIZE; k++) {
a40ec84e 660 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
88e89a57
XT
661 }
662 }
a40ec84e 663 qemu_spin_unlock(&env_tlb(env)->c.lock);
0cac1b66
BS
664}
665
666/* Our TLB does not support large pages, so remember the area covered by
667 large pages and trigger a full TLB flush if these are invalidated. */
1308e026
RH
668static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
669 target_ulong vaddr, target_ulong size)
0cac1b66 670{
a40ec84e 671 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1308e026 672 target_ulong lp_mask = ~(size - 1);
0cac1b66 673
1308e026
RH
674 if (lp_addr == (target_ulong)-1) {
675 /* No previous large page. */
676 lp_addr = vaddr;
677 } else {
678 /* Extend the existing region to include the new page.
679 This is a compromise between unnecessary flushes and
680 the cost of maintaining a full variable size TLB. */
a40ec84e 681 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1308e026
RH
682 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
683 lp_mask <<= 1;
684 }
0cac1b66 685 }
a40ec84e
RH
686 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
687 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
0cac1b66
BS
688}
689
690/* Add a new TLB entry. At most one entry for a given virtual address
79e2b9ae
PB
691 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
692 * supplied size is only used by tlb_flush_page.
693 *
694 * Called from TCG-generated code, which is under an RCU read-side
695 * critical section.
696 */
fadc1cbe
PM
697void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
698 hwaddr paddr, MemTxAttrs attrs, int prot,
699 int mmu_idx, target_ulong size)
0cac1b66 700{
0c591eb0 701 CPUArchState *env = cpu->env_ptr;
a40ec84e
RH
702 CPUTLB *tlb = env_tlb(env);
703 CPUTLBDesc *desc = &tlb->d[mmu_idx];
0cac1b66
BS
704 MemoryRegionSection *section;
705 unsigned int index;
706 target_ulong address;
707 target_ulong code_address;
708 uintptr_t addend;
68fea038 709 CPUTLBEntry *te, tn;
55df6fcf
PM
710 hwaddr iotlb, xlat, sz, paddr_page;
711 target_ulong vaddr_page;
d7898cda 712 int asidx = cpu_asidx_from_attrs(cpu, attrs);
0cac1b66 713
f0aff0f1 714 assert_cpu_is_self(cpu);
55df6fcf 715
1308e026 716 if (size <= TARGET_PAGE_SIZE) {
55df6fcf
PM
717 sz = TARGET_PAGE_SIZE;
718 } else {
1308e026 719 tlb_add_large_page(env, mmu_idx, vaddr, size);
55df6fcf 720 sz = size;
0cac1b66 721 }
55df6fcf
PM
722 vaddr_page = vaddr & TARGET_PAGE_MASK;
723 paddr_page = paddr & TARGET_PAGE_MASK;
149f54b5 724
55df6fcf
PM
725 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
726 &xlat, &sz, attrs, &prot);
149f54b5
PB
727 assert(sz >= TARGET_PAGE_SIZE);
728
8526e1f4
AB
729 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
730 " prot=%x idx=%d\n",
731 vaddr, paddr, prot, mmu_idx);
0cac1b66 732
55df6fcf
PM
733 address = vaddr_page;
734 if (size < TARGET_PAGE_SIZE) {
735 /*
736 * Slow-path the TLB entries; we will repeat the MMU check and TLB
737 * fill on every access.
738 */
739 address |= TLB_RECHECK;
740 }
741 if (!memory_region_is_ram(section->mr) &&
742 !memory_region_is_romd(section->mr)) {
8f3e03cb 743 /* IO memory case */
0cac1b66 744 address |= TLB_MMIO;
8f3e03cb
PB
745 addend = 0;
746 } else {
747 /* TLB_MMIO for rom/romd handled below */
149f54b5 748 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
0cac1b66 749 }
0cac1b66
BS
750
751 code_address = address;
55df6fcf
PM
752 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
753 paddr_page, xlat, prot, &address);
0cac1b66 754
383beda9
RH
755 index = tlb_index(env, mmu_idx, vaddr_page);
756 te = tlb_entry(env, mmu_idx, vaddr_page);
b0706b71 757
71aec354
EC
758 /*
759 * Hold the TLB lock for the rest of the function. We could acquire/release
760 * the lock several times in the function, but it is faster to amortize the
761 * acquisition cost by acquiring it just once. Note that this leads to
762 * a longer critical section, but this is not a concern since the TLB lock
763 * is unlikely to be contended.
764 */
a40ec84e 765 qemu_spin_lock(&tlb->c.lock);
71aec354 766
3d1523ce 767 /* Note that the tlb is no longer clean. */
a40ec84e 768 tlb->c.dirty |= 1 << mmu_idx;
3d1523ce 769
71aec354
EC
770 /* Make sure there's no cached translation for the new page. */
771 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
772
68fea038
RH
773 /*
774 * Only evict the old entry to the victim tlb if it's for a
775 * different page; otherwise just overwrite the stale data.
776 */
3cea94bb 777 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
a40ec84e
RH
778 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
779 CPUTLBEntry *tv = &desc->vtable[vidx];
b0706b71 780
68fea038 781 /* Evict the old entry into the victim tlb. */
71aec354 782 copy_tlb_helper_locked(tv, te);
a40ec84e 783 desc->viotlb[vidx] = desc->iotlb[index];
86e1eff8 784 tlb_n_used_entries_dec(env, mmu_idx);
68fea038 785 }
88e89a57
XT
786
787 /* refill the tlb */
ace41090
PM
788 /*
789 * At this point iotlb contains a physical section number in the lower
790 * TARGET_PAGE_BITS, and either
791 * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
792 * + the offset within section->mr of the page base (otherwise)
55df6fcf 793 * We subtract the vaddr_page (which is page aligned and thus won't
ace41090
PM
794 * disturb the low bits) to give an offset which can be added to the
795 * (non-page-aligned) vaddr of the eventual memory access to get
796 * the MemoryRegion offset for the access. Note that the vaddr we
797 * subtract here is that of the page base, and not the same as the
798 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
799 */
a40ec84e
RH
800 desc->iotlb[index].addr = iotlb - vaddr_page;
801 desc->iotlb[index].attrs = attrs;
b0706b71
AB
802
803 /* Now calculate the new entry */
55df6fcf 804 tn.addend = addend - vaddr_page;
0cac1b66 805 if (prot & PAGE_READ) {
b0706b71 806 tn.addr_read = address;
0cac1b66 807 } else {
b0706b71 808 tn.addr_read = -1;
0cac1b66
BS
809 }
810
811 if (prot & PAGE_EXEC) {
b0706b71 812 tn.addr_code = code_address;
0cac1b66 813 } else {
b0706b71 814 tn.addr_code = -1;
0cac1b66 815 }
b0706b71
AB
816
817 tn.addr_write = -1;
0cac1b66
BS
818 if (prot & PAGE_WRITE) {
819 if ((memory_region_is_ram(section->mr) && section->readonly)
cc5bea60 820 || memory_region_is_romd(section->mr)) {
0cac1b66 821 /* Write access calls the I/O callback. */
b0706b71 822 tn.addr_write = address | TLB_MMIO;
0cac1b66 823 } else if (memory_region_is_ram(section->mr)
8e41fb63 824 && cpu_physical_memory_is_clean(
55df6fcf 825 memory_region_get_ram_addr(section->mr) + xlat)) {
b0706b71 826 tn.addr_write = address | TLB_NOTDIRTY;
0cac1b66 827 } else {
b0706b71 828 tn.addr_write = address;
0cac1b66 829 }
f52bfb12
DH
830 if (prot & PAGE_WRITE_INV) {
831 tn.addr_write |= TLB_INVALID_MASK;
832 }
0cac1b66 833 }
b0706b71 834
71aec354 835 copy_tlb_helper_locked(te, &tn);
86e1eff8 836 tlb_n_used_entries_inc(env, mmu_idx);
a40ec84e 837 qemu_spin_unlock(&tlb->c.lock);
0cac1b66
BS
838}
839
fadc1cbe
PM
840/* Add a new TLB entry, but without specifying the memory
841 * transaction attributes to be used.
842 */
843void tlb_set_page(CPUState *cpu, target_ulong vaddr,
844 hwaddr paddr, int prot,
845 int mmu_idx, target_ulong size)
846{
847 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
848 prot, mmu_idx, size);
849}
850
857baec1
AB
851static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
852{
853 ram_addr_t ram_addr;
854
855 ram_addr = qemu_ram_addr_from_host(ptr);
856 if (ram_addr == RAM_ADDR_INVALID) {
857 error_report("Bad ram pointer %p", ptr);
858 abort();
859 }
860 return ram_addr;
861}
862
c319dc13
RH
863/*
864 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
865 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
866 * be discarded and looked up again (e.g. via tlb_entry()).
867 */
868static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
869 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
870{
871 CPUClass *cc = CPU_GET_CLASS(cpu);
872 bool ok;
873
874 /*
875 * This is not a probe, so only valid return is success; failure
876 * should result in exception + longjmp to the cpu loop.
877 */
878 ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
879 assert(ok);
880}
881
82a45b96 882static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
f1be3696 883 int mmu_idx, target_ulong addr, uintptr_t retaddr,
be5c4787 884 MMUAccessType access_type, MemOp op)
82a45b96 885{
29a0af61 886 CPUState *cpu = env_cpu(env);
2d54f194
PM
887 hwaddr mr_offset;
888 MemoryRegionSection *section;
889 MemoryRegion *mr;
82a45b96 890 uint64_t val;
8d04fb55 891 bool locked = false;
04e3aabd 892 MemTxResult r;
82a45b96 893
2d54f194
PM
894 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
895 mr = section->mr;
896 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
82a45b96
RH
897 cpu->mem_io_pc = retaddr;
898 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
899 cpu_io_recompile(cpu, retaddr);
900 }
901
902 cpu->mem_io_vaddr = addr;
dbea78a4 903 cpu->mem_io_access_type = access_type;
8d04fb55 904
8b812533 905 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
8d04fb55
JK
906 qemu_mutex_lock_iothread();
907 locked = true;
908 }
be5c4787 909 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
04e3aabd 910 if (r != MEMTX_OK) {
2d54f194
PM
911 hwaddr physaddr = mr_offset +
912 section->offset_within_address_space -
913 section->offset_within_region;
914
be5c4787 915 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
04e3aabd
PM
916 mmu_idx, iotlbentry->attrs, r, retaddr);
917 }
8d04fb55
JK
918 if (locked) {
919 qemu_mutex_unlock_iothread();
920 }
921
82a45b96
RH
922 return val;
923}
924
925static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
f1be3696 926 int mmu_idx, uint64_t val, target_ulong addr,
be5c4787 927 uintptr_t retaddr, MemOp op)
82a45b96 928{
29a0af61 929 CPUState *cpu = env_cpu(env);
2d54f194
PM
930 hwaddr mr_offset;
931 MemoryRegionSection *section;
932 MemoryRegion *mr;
8d04fb55 933 bool locked = false;
04e3aabd 934 MemTxResult r;
82a45b96 935
2d54f194
PM
936 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
937 mr = section->mr;
938 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
82a45b96
RH
939 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
940 cpu_io_recompile(cpu, retaddr);
941 }
82a45b96
RH
942 cpu->mem_io_vaddr = addr;
943 cpu->mem_io_pc = retaddr;
8d04fb55 944
8b812533 945 if (mr->global_locking && !qemu_mutex_iothread_locked()) {
8d04fb55
JK
946 qemu_mutex_lock_iothread();
947 locked = true;
948 }
be5c4787 949 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
04e3aabd 950 if (r != MEMTX_OK) {
2d54f194
PM
951 hwaddr physaddr = mr_offset +
952 section->offset_within_address_space -
953 section->offset_within_region;
954
be5c4787
TN
955 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
956 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
957 retaddr);
04e3aabd 958 }
8d04fb55
JK
959 if (locked) {
960 qemu_mutex_unlock_iothread();
961 }
82a45b96
RH
962}
963
4811e909
RH
964static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
965{
966#if TCG_OVERSIZED_GUEST
967 return *(target_ulong *)((uintptr_t)entry + ofs);
968#else
969 /* ofs might correspond to .addr_write, so use atomic_read */
970 return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
971#endif
972}
973
7e9a7c50
RH
974/* Return true if ADDR is present in the victim tlb, and has been copied
975 back to the main tlb. */
976static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
977 size_t elt_ofs, target_ulong page)
978{
979 size_t vidx;
71aec354 980
29a0af61 981 assert_cpu_is_self(env_cpu(env));
7e9a7c50 982 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
a40ec84e
RH
983 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
984 target_ulong cmp;
985
986 /* elt_ofs might correspond to .addr_write, so use atomic_read */
987#if TCG_OVERSIZED_GUEST
988 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
989#else
990 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
991#endif
7e9a7c50
RH
992
993 if (cmp == page) {
994 /* Found entry in victim tlb, swap tlb and iotlb. */
a40ec84e 995 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
b0706b71 996
a40ec84e 997 qemu_spin_lock(&env_tlb(env)->c.lock);
71aec354
EC
998 copy_tlb_helper_locked(&tmptlb, tlb);
999 copy_tlb_helper_locked(tlb, vtlb);
1000 copy_tlb_helper_locked(vtlb, &tmptlb);
a40ec84e 1001 qemu_spin_unlock(&env_tlb(env)->c.lock);
b0706b71 1002
a40ec84e
RH
1003 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1004 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
7e9a7c50
RH
1005 tmpio = *io; *io = *vio; *vio = tmpio;
1006 return true;
1007 }
1008 }
1009 return false;
1010}
1011
1012/* Macro to call the above, with local variables from the use context. */
a390284b 1013#define VICTIM_TLB_HIT(TY, ADDR) \
7e9a7c50 1014 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
a390284b 1015 (ADDR) & TARGET_PAGE_MASK)
7e9a7c50 1016
f2553f04
FK
1017/* NOTE: this function can trigger an exception */
1018/* NOTE2: the returned address is not exactly the physical address: it
1019 * is actually a ram_addr_t (in system mode; the user mode emulation
1020 * version of this function returns a guest virtual address).
1021 */
1022tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1023{
383beda9
RH
1024 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1025 uintptr_t index = tlb_index(env, mmu_idx, addr);
1026 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
f2553f04 1027 void *p;
f2553f04 1028
383beda9 1029 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
b493ccf1 1030 if (!VICTIM_TLB_HIT(addr_code, addr)) {
29a0af61 1031 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
6d967cb8
EC
1032 index = tlb_index(env, mmu_idx, addr);
1033 entry = tlb_entry(env, mmu_idx, addr);
71b9a453 1034 }
383beda9 1035 assert(tlb_hit(entry->addr_code, addr));
f2553f04 1036 }
55df6fcf 1037
383beda9 1038 if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
55df6fcf 1039 /*
55a7cb14
PM
1040 * Return -1 if we can't translate and execute from an entire
1041 * page of RAM here, which will cause us to execute by loading
1042 * and translating one insn at a time, without caching:
1043 * - TLB_RECHECK: means the MMU protection covers a smaller range
1044 * than a target page, so we must redo the MMU check every insn
1045 * - TLB_MMIO: region is not backed by RAM
55df6fcf 1046 */
20cb6ae4 1047 return -1;
55df6fcf
PM
1048 }
1049
383beda9 1050 p = (void *)((uintptr_t)addr + entry->addend);
f2553f04
FK
1051 return qemu_ram_addr_from_host_nofail(p);
1052}
1053
3b08f0a9
RH
1054/* Probe for whether the specified guest write access is permitted.
1055 * If it is not permitted then an exception will be taken in the same
1056 * way as if this were a real write access (and we will not return).
1057 * Otherwise the function will return, and there will be a valid
1058 * entry in the TLB for this access.
1059 */
98670d47 1060void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
3b08f0a9
RH
1061 uintptr_t retaddr)
1062{
383beda9
RH
1063 uintptr_t index = tlb_index(env, mmu_idx, addr);
1064 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
3b08f0a9 1065
403f290c 1066 if (!tlb_hit(tlb_addr_write(entry), addr)) {
3b08f0a9
RH
1067 /* TLB entry is for a different page */
1068 if (!VICTIM_TLB_HIT(addr_write, addr)) {
29a0af61 1069 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
98670d47 1070 mmu_idx, retaddr);
3b08f0a9
RH
1071 }
1072 }
1073}
1074
4811e909
RH
1075void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1076 MMUAccessType access_type, int mmu_idx)
1077{
1078 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1079 uintptr_t tlb_addr, page;
1080 size_t elt_ofs;
1081
1082 switch (access_type) {
1083 case MMU_DATA_LOAD:
1084 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1085 break;
1086 case MMU_DATA_STORE:
1087 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1088 break;
1089 case MMU_INST_FETCH:
1090 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1091 break;
1092 default:
1093 g_assert_not_reached();
1094 }
1095
1096 page = addr & TARGET_PAGE_MASK;
1097 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1098
1099 if (!tlb_hit_page(tlb_addr, page)) {
1100 uintptr_t index = tlb_index(env, mmu_idx, addr);
1101
1102 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
29a0af61 1103 CPUState *cs = env_cpu(env);
4811e909
RH
1104 CPUClass *cc = CPU_GET_CLASS(cs);
1105
1106 if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1107 /* Non-faulting page table read failed. */
1108 return NULL;
1109 }
1110
1111 /* TLB resize via tlb_fill may have moved the entry. */
1112 entry = tlb_entry(env, mmu_idx, addr);
1113 }
1114 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1115 }
1116
1117 if (tlb_addr & ~TARGET_PAGE_MASK) {
1118 /* IO access */
1119 return NULL;
1120 }
1121
1122 return (void *)((uintptr_t)addr + entry->addend);
1123}
1124
c482cb11
RH
1125/* Probe for a read-modify-write atomic operation. Do not allow unaligned
1126 * operations, or io operations to proceed. Return the host address. */
1127static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
34d49937
PM
1128 TCGMemOpIdx oi, uintptr_t retaddr,
1129 NotDirtyInfo *ndi)
c482cb11
RH
1130{
1131 size_t mmu_idx = get_mmuidx(oi);
383beda9
RH
1132 uintptr_t index = tlb_index(env, mmu_idx, addr);
1133 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
403f290c 1134 target_ulong tlb_addr = tlb_addr_write(tlbe);
14776ab5 1135 MemOp mop = get_memop(oi);
c482cb11
RH
1136 int a_bits = get_alignment_bits(mop);
1137 int s_bits = mop & MO_SIZE;
34d49937 1138 void *hostaddr;
c482cb11
RH
1139
1140 /* Adjust the given return address. */
1141 retaddr -= GETPC_ADJ;
1142
1143 /* Enforce guest required alignment. */
1144 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1145 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
29a0af61 1146 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
c482cb11
RH
1147 mmu_idx, retaddr);
1148 }
1149
1150 /* Enforce qemu required alignment. */
1151 if (unlikely(addr & ((1 << s_bits) - 1))) {
1152 /* We get here if guest alignment was not requested,
1153 or was not enforced by cpu_unaligned_access above.
1154 We might widen the access and emulate, but for now
1155 mark an exception and exit the cpu loop. */
1156 goto stop_the_world;
1157 }
1158
1159 /* Check TLB entry and enforce page permissions. */
334692bc 1160 if (!tlb_hit(tlb_addr, addr)) {
c482cb11 1161 if (!VICTIM_TLB_HIT(addr_write, addr)) {
29a0af61 1162 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
98670d47 1163 mmu_idx, retaddr);
6d967cb8
EC
1164 index = tlb_index(env, mmu_idx, addr);
1165 tlbe = tlb_entry(env, mmu_idx, addr);
c482cb11 1166 }
403f290c 1167 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
c482cb11
RH
1168 }
1169
55df6fcf
PM
1170 /* Notice an IO access or a needs-MMU-lookup access */
1171 if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
c482cb11
RH
1172 /* There's really nothing that can be done to
1173 support this apart from stop-the-world. */
1174 goto stop_the_world;
1175 }
1176
1177 /* Let the guest notice RMW on a write-only page. */
34d49937 1178 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
29a0af61 1179 tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
98670d47 1180 mmu_idx, retaddr);
c482cb11
RH
1181 /* Since we don't support reads and writes to different addresses,
1182 and we do have the proper page loaded for write, this shouldn't
1183 ever return. But just in case, handle via stop-the-world. */
1184 goto stop_the_world;
1185 }
1186
34d49937
PM
1187 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1188
1189 ndi->active = false;
1190 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1191 ndi->active = true;
29a0af61 1192 memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
34d49937
PM
1193 qemu_ram_addr_from_host_nofail(hostaddr),
1194 1 << s_bits);
1195 }
1196
1197 return hostaddr;
c482cb11
RH
1198
1199 stop_the_world:
29a0af61 1200 cpu_loop_exit_atomic(env_cpu(env), retaddr);
c482cb11
RH
1201}
1202
eed56642
AB
1203/*
1204 * Load Helpers
1205 *
1206 * We support two different access types. SOFTMMU_CODE_ACCESS is
1207 * specifically for reading instructions from system memory. It is
1208 * called by the translation loop and in some helpers where the code
1209 * is disassembled. It shouldn't be called directly by guest code.
1210 */
0f590e74 1211
2dd92606
RH
1212typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1213 TCGMemOpIdx oi, uintptr_t retaddr);
1214
1215static inline uint64_t __attribute__((always_inline))
1216load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
be5c4787 1217 uintptr_t retaddr, MemOp op, bool code_read,
2dd92606 1218 FullLoadHelper *full_load)
eed56642
AB
1219{
1220 uintptr_t mmu_idx = get_mmuidx(oi);
1221 uintptr_t index = tlb_index(env, mmu_idx, addr);
1222 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1223 target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1224 const size_t tlb_off = code_read ?
1225 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
f1be3696
RH
1226 const MMUAccessType access_type =
1227 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
eed56642
AB
1228 unsigned a_bits = get_alignment_bits(get_memop(oi));
1229 void *haddr;
1230 uint64_t res;
be5c4787 1231 size_t size = memop_size(op);
eed56642
AB
1232
1233 /* Handle CPU specific unaligned behaviour */
1234 if (addr & ((1 << a_bits) - 1)) {
29a0af61 1235 cpu_unaligned_access(env_cpu(env), addr, access_type,
eed56642
AB
1236 mmu_idx, retaddr);
1237 }
0f590e74 1238
eed56642
AB
1239 /* If the TLB entry is for a different page, reload and try again. */
1240 if (!tlb_hit(tlb_addr, addr)) {
1241 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1242 addr & TARGET_PAGE_MASK)) {
29a0af61 1243 tlb_fill(env_cpu(env), addr, size,
f1be3696 1244 access_type, mmu_idx, retaddr);
eed56642
AB
1245 index = tlb_index(env, mmu_idx, addr);
1246 entry = tlb_entry(env, mmu_idx, addr);
1247 }
1248 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1249 }
1250
1251 /* Handle an IO access. */
1252 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
eed56642
AB
1253 if ((addr & (size - 1)) != 0) {
1254 goto do_unaligned_access;
1255 }
1256
f1be3696
RH
1257 if (tlb_addr & TLB_RECHECK) {
1258 /*
1259 * This is a TLB_RECHECK access, where the MMU protection
1260 * covers a smaller range than a target page, and we must
1261 * repeat the MMU check here. This tlb_fill() call might
1262 * longjump out if this access should cause a guest exception.
1263 */
29a0af61 1264 tlb_fill(env_cpu(env), addr, size,
f1be3696
RH
1265 access_type, mmu_idx, retaddr);
1266 index = tlb_index(env, mmu_idx, addr);
1267 entry = tlb_entry(env, mmu_idx, addr);
1268
1269 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1270 tlb_addr &= ~TLB_RECHECK;
1271 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1272 /* RAM access */
1273 goto do_aligned_access;
1274 }
1275 }
1276
9bf825bf
TN
1277 return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1278 mmu_idx, addr, retaddr, access_type, op);
eed56642
AB
1279 }
1280
1281 /* Handle slow unaligned access (it spans two pages or IO). */
1282 if (size > 1
1283 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1284 >= TARGET_PAGE_SIZE)) {
1285 target_ulong addr1, addr2;
8c79b288 1286 uint64_t r1, r2;
eed56642
AB
1287 unsigned shift;
1288 do_unaligned_access:
ab7a2009 1289 addr1 = addr & ~((target_ulong)size - 1);
eed56642 1290 addr2 = addr1 + size;
2dd92606
RH
1291 r1 = full_load(env, addr1, oi, retaddr);
1292 r2 = full_load(env, addr2, oi, retaddr);
eed56642
AB
1293 shift = (addr & (size - 1)) * 8;
1294
be5c4787 1295 if (memop_big_endian(op)) {
eed56642
AB
1296 /* Big-endian combine. */
1297 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1298 } else {
1299 /* Little-endian combine. */
1300 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1301 }
1302 return res & MAKE_64BIT_MASK(0, size * 8);
1303 }
1304
f1be3696 1305 do_aligned_access:
eed56642 1306 haddr = (void *)((uintptr_t)addr + entry->addend);
be5c4787
TN
1307 switch (op) {
1308 case MO_UB:
eed56642
AB
1309 res = ldub_p(haddr);
1310 break;
be5c4787
TN
1311 case MO_BEUW:
1312 res = lduw_be_p(haddr);
eed56642 1313 break;
be5c4787
TN
1314 case MO_LEUW:
1315 res = lduw_le_p(haddr);
eed56642 1316 break;
be5c4787
TN
1317 case MO_BEUL:
1318 res = (uint32_t)ldl_be_p(haddr);
1319 break;
1320 case MO_LEUL:
1321 res = (uint32_t)ldl_le_p(haddr);
1322 break;
1323 case MO_BEQ:
1324 res = ldq_be_p(haddr);
1325 break;
1326 case MO_LEQ:
1327 res = ldq_le_p(haddr);
eed56642
AB
1328 break;
1329 default:
1330 g_assert_not_reached();
1331 }
1332
1333 return res;
1334}
1335
1336/*
1337 * For the benefit of TCG generated code, we want to avoid the
1338 * complication of ABI-specific return type promotion and always
1339 * return a value extended to the register size of the host. This is
1340 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1341 * data, and for that we always have uint64_t.
1342 *
1343 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1344 */
1345
2dd92606
RH
1346static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1347 TCGMemOpIdx oi, uintptr_t retaddr)
1348{
be5c4787 1349 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
2dd92606
RH
1350}
1351
fc1bc777
RH
1352tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1353 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1354{
2dd92606
RH
1355 return full_ldub_mmu(env, addr, oi, retaddr);
1356}
1357
1358static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1359 TCGMemOpIdx oi, uintptr_t retaddr)
1360{
be5c4787 1361 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
2dd92606 1362 full_le_lduw_mmu);
eed56642
AB
1363}
1364
fc1bc777
RH
1365tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1366 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1367{
2dd92606
RH
1368 return full_le_lduw_mmu(env, addr, oi, retaddr);
1369}
1370
1371static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1372 TCGMemOpIdx oi, uintptr_t retaddr)
1373{
be5c4787 1374 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
2dd92606 1375 full_be_lduw_mmu);
eed56642
AB
1376}
1377
fc1bc777
RH
1378tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1379 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1380{
2dd92606
RH
1381 return full_be_lduw_mmu(env, addr, oi, retaddr);
1382}
1383
1384static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1385 TCGMemOpIdx oi, uintptr_t retaddr)
1386{
be5c4787 1387 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
2dd92606 1388 full_le_ldul_mmu);
eed56642
AB
1389}
1390
fc1bc777
RH
1391tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1392 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1393{
2dd92606
RH
1394 return full_le_ldul_mmu(env, addr, oi, retaddr);
1395}
1396
1397static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1398 TCGMemOpIdx oi, uintptr_t retaddr)
1399{
be5c4787 1400 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
2dd92606 1401 full_be_ldul_mmu);
eed56642
AB
1402}
1403
fc1bc777
RH
1404tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1405 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1406{
2dd92606 1407 return full_be_ldul_mmu(env, addr, oi, retaddr);
eed56642
AB
1408}
1409
fc1bc777
RH
1410uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1411 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1412{
be5c4787 1413 return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
2dd92606 1414 helper_le_ldq_mmu);
eed56642
AB
1415}
1416
fc1bc777
RH
1417uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1418 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1419{
be5c4787 1420 return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
2dd92606 1421 helper_be_ldq_mmu);
eed56642
AB
1422}
1423
1424/*
1425 * Provide signed versions of the load routines as well. We can of course
1426 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1427 */
1428
1429
1430tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1431 TCGMemOpIdx oi, uintptr_t retaddr)
1432{
1433 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1434}
1435
1436tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1437 TCGMemOpIdx oi, uintptr_t retaddr)
1438{
1439 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1440}
1441
1442tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1443 TCGMemOpIdx oi, uintptr_t retaddr)
1444{
1445 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1446}
1447
1448tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1449 TCGMemOpIdx oi, uintptr_t retaddr)
1450{
1451 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1452}
1453
1454tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1455 TCGMemOpIdx oi, uintptr_t retaddr)
1456{
1457 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1458}
1459
1460/*
1461 * Store Helpers
1462 */
1463
4601f8d1
RH
1464static inline void __attribute__((always_inline))
1465store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
be5c4787 1466 TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
eed56642
AB
1467{
1468 uintptr_t mmu_idx = get_mmuidx(oi);
1469 uintptr_t index = tlb_index(env, mmu_idx, addr);
1470 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1471 target_ulong tlb_addr = tlb_addr_write(entry);
1472 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1473 unsigned a_bits = get_alignment_bits(get_memop(oi));
1474 void *haddr;
be5c4787 1475 size_t size = memop_size(op);
eed56642
AB
1476
1477 /* Handle CPU specific unaligned behaviour */
1478 if (addr & ((1 << a_bits) - 1)) {
29a0af61 1479 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
eed56642
AB
1480 mmu_idx, retaddr);
1481 }
1482
1483 /* If the TLB entry is for a different page, reload and try again. */
1484 if (!tlb_hit(tlb_addr, addr)) {
1485 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1486 addr & TARGET_PAGE_MASK)) {
29a0af61 1487 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
eed56642
AB
1488 mmu_idx, retaddr);
1489 index = tlb_index(env, mmu_idx, addr);
1490 entry = tlb_entry(env, mmu_idx, addr);
1491 }
1492 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1493 }
1494
1495 /* Handle an IO access. */
1496 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
eed56642
AB
1497 if ((addr & (size - 1)) != 0) {
1498 goto do_unaligned_access;
1499 }
1500
f1be3696
RH
1501 if (tlb_addr & TLB_RECHECK) {
1502 /*
1503 * This is a TLB_RECHECK access, where the MMU protection
1504 * covers a smaller range than a target page, and we must
1505 * repeat the MMU check here. This tlb_fill() call might
1506 * longjump out if this access should cause a guest exception.
1507 */
29a0af61 1508 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
f1be3696
RH
1509 mmu_idx, retaddr);
1510 index = tlb_index(env, mmu_idx, addr);
1511 entry = tlb_entry(env, mmu_idx, addr);
1512
1513 tlb_addr = tlb_addr_write(entry);
1514 tlb_addr &= ~TLB_RECHECK;
1515 if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1516 /* RAM access */
1517 goto do_aligned_access;
1518 }
1519 }
1520
a40ec84e 1521 io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
9bf825bf 1522 val, addr, retaddr, op);
eed56642
AB
1523 return;
1524 }
1525
1526 /* Handle slow unaligned access (it spans two pages or IO). */
1527 if (size > 1
1528 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1529 >= TARGET_PAGE_SIZE)) {
1530 int i;
1531 uintptr_t index2;
1532 CPUTLBEntry *entry2;
1533 target_ulong page2, tlb_addr2;
1534 do_unaligned_access:
1535 /*
1536 * Ensure the second page is in the TLB. Note that the first page
1537 * is already guaranteed to be filled, and that the second page
1538 * cannot evict the first.
1539 */
1540 page2 = (addr + size) & TARGET_PAGE_MASK;
1541 index2 = tlb_index(env, mmu_idx, page2);
1542 entry2 = tlb_entry(env, mmu_idx, page2);
1543 tlb_addr2 = tlb_addr_write(entry2);
1544 if (!tlb_hit_page(tlb_addr2, page2)
1545 && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1546 page2 & TARGET_PAGE_MASK)) {
29a0af61 1547 tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
eed56642
AB
1548 mmu_idx, retaddr);
1549 }
1550
1551 /*
1552 * XXX: not efficient, but simple.
1553 * This loop must go in the forward direction to avoid issues
1554 * with self-modifying code in Windows 64-bit.
1555 */
1556 for (i = 0; i < size; ++i) {
1557 uint8_t val8;
be5c4787 1558 if (memop_big_endian(op)) {
eed56642
AB
1559 /* Big-endian extract. */
1560 val8 = val >> (((size - 1) * 8) - (i * 8));
1561 } else {
1562 /* Little-endian extract. */
1563 val8 = val >> (i * 8);
1564 }
4601f8d1 1565 helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
eed56642
AB
1566 }
1567 return;
1568 }
1569
f1be3696 1570 do_aligned_access:
eed56642 1571 haddr = (void *)((uintptr_t)addr + entry->addend);
be5c4787
TN
1572 switch (op) {
1573 case MO_UB:
eed56642
AB
1574 stb_p(haddr, val);
1575 break;
be5c4787
TN
1576 case MO_BEUW:
1577 stw_be_p(haddr, val);
eed56642 1578 break;
be5c4787
TN
1579 case MO_LEUW:
1580 stw_le_p(haddr, val);
eed56642 1581 break;
be5c4787
TN
1582 case MO_BEUL:
1583 stl_be_p(haddr, val);
1584 break;
1585 case MO_LEUL:
1586 stl_le_p(haddr, val);
1587 break;
1588 case MO_BEQ:
1589 stq_be_p(haddr, val);
1590 break;
1591 case MO_LEQ:
1592 stq_le_p(haddr, val);
eed56642
AB
1593 break;
1594 default:
1595 g_assert_not_reached();
1596 break;
1597 }
1598}
1599
fc1bc777
RH
1600void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1601 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1602{
be5c4787 1603 store_helper(env, addr, val, oi, retaddr, MO_UB);
eed56642
AB
1604}
1605
fc1bc777
RH
1606void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1607 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1608{
be5c4787 1609 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
eed56642
AB
1610}
1611
fc1bc777
RH
1612void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1613 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1614{
be5c4787 1615 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
eed56642
AB
1616}
1617
fc1bc777
RH
1618void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1619 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1620{
be5c4787 1621 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
eed56642
AB
1622}
1623
fc1bc777
RH
1624void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1625 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1626{
be5c4787 1627 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
eed56642
AB
1628}
1629
fc1bc777
RH
1630void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1631 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1632{
be5c4787 1633 store_helper(env, addr, val, oi, retaddr, MO_LEQ);
eed56642
AB
1634}
1635
fc1bc777
RH
1636void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1637 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1638{
be5c4787 1639 store_helper(env, addr, val, oi, retaddr, MO_BEQ);
eed56642 1640}
0f590e74 1641
c482cb11
RH
1642/* First set of helpers allows passing in of OI and RETADDR. This makes
1643 them callable from other helpers. */
1644
1645#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1646#define ATOMIC_NAME(X) \
1647 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
34d49937
PM
1648#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1649#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1650#define ATOMIC_MMU_CLEANUP \
1651 do { \
1652 if (unlikely(ndi.active)) { \
1653 memory_notdirty_write_complete(&ndi); \
1654 } \
1655 } while (0)
c482cb11
RH
1656
1657#define DATA_SIZE 1
1658#include "atomic_template.h"
1659
1660#define DATA_SIZE 2
1661#include "atomic_template.h"
1662
1663#define DATA_SIZE 4
1664#include "atomic_template.h"
1665
df79b996 1666#ifdef CONFIG_ATOMIC64
c482cb11
RH
1667#define DATA_SIZE 8
1668#include "atomic_template.h"
df79b996 1669#endif
c482cb11 1670
e6cd4bb5 1671#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
7ebee43e
RH
1672#define DATA_SIZE 16
1673#include "atomic_template.h"
1674#endif
1675
c482cb11
RH
1676/* Second set of helpers are directly callable from TCG as helpers. */
1677
1678#undef EXTRA_ARGS
1679#undef ATOMIC_NAME
1680#undef ATOMIC_MMU_LOOKUP
1681#define EXTRA_ARGS , TCGMemOpIdx oi
1682#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
34d49937 1683#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
c482cb11
RH
1684
1685#define DATA_SIZE 1
1686#include "atomic_template.h"
1687
1688#define DATA_SIZE 2
1689#include "atomic_template.h"
1690
1691#define DATA_SIZE 4
1692#include "atomic_template.h"
1693
df79b996 1694#ifdef CONFIG_ATOMIC64
c482cb11
RH
1695#define DATA_SIZE 8
1696#include "atomic_template.h"
df79b996 1697#endif
c482cb11
RH
1698
1699/* Code access functions. */
1700
2dd92606
RH
1701static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1702 TCGMemOpIdx oi, uintptr_t retaddr)
1703{
be5c4787 1704 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
2dd92606
RH
1705}
1706
fc1bc777
RH
1707uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1708 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1709{
2dd92606
RH
1710 return full_ldub_cmmu(env, addr, oi, retaddr);
1711}
1712
1713static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1714 TCGMemOpIdx oi, uintptr_t retaddr)
1715{
be5c4787 1716 return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
2dd92606 1717 full_le_lduw_cmmu);
eed56642 1718}
0cac1b66 1719
fc1bc777
RH
1720uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1721 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1722{
2dd92606
RH
1723 return full_le_lduw_cmmu(env, addr, oi, retaddr);
1724}
1725
1726static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1727 TCGMemOpIdx oi, uintptr_t retaddr)
1728{
be5c4787 1729 return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
2dd92606 1730 full_be_lduw_cmmu);
eed56642 1731}
0cac1b66 1732
fc1bc777
RH
1733uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1734 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1735{
2dd92606
RH
1736 return full_be_lduw_cmmu(env, addr, oi, retaddr);
1737}
1738
1739static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1740 TCGMemOpIdx oi, uintptr_t retaddr)
1741{
be5c4787 1742 return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
2dd92606 1743 full_le_ldul_cmmu);
eed56642 1744}
0cac1b66 1745
fc1bc777
RH
1746uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1747 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1748{
2dd92606
RH
1749 return full_le_ldul_cmmu(env, addr, oi, retaddr);
1750}
1751
1752static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1753 TCGMemOpIdx oi, uintptr_t retaddr)
1754{
be5c4787 1755 return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
2dd92606 1756 full_be_ldul_cmmu);
eed56642 1757}
0cac1b66 1758
fc1bc777
RH
1759uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1760 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1761{
2dd92606 1762 return full_be_ldul_cmmu(env, addr, oi, retaddr);
eed56642
AB
1763}
1764
fc1bc777
RH
1765uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1766 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1767{
be5c4787 1768 return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
2dd92606 1769 helper_le_ldq_cmmu);
eed56642
AB
1770}
1771
fc1bc777
RH
1772uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1773 TCGMemOpIdx oi, uintptr_t retaddr)
eed56642 1774{
be5c4787 1775 return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
2dd92606 1776 helper_be_ldq_cmmu);
eed56642 1777}