]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - sim/aarch64/memory.c
update copyright year range in GDB files
[thirdparty/binutils-gdb.git] / sim / aarch64 / memory.c
CommitLineData
2e8cf49e
NC
1/* memory.c -- Memory accessor functions for the AArch64 simulator
2
61baf725 3 Copyright (C) 2015-2017 Free Software Foundation, Inc.
2e8cf49e
NC
4
5 Contributed by Red Hat.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include <sys/types.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27
2e8cf49e 28#include "libiberty.h"
2e8cf49e
NC
29
30#include "memory.h"
31#include "simulator.h"
32
33#include "sim-core.h"
34
35static inline void
36mem_error (sim_cpu *cpu, const char *message, uint64_t addr)
37{
2e8cf49e
NC
38 TRACE_MEMORY (cpu, "ERROR: %s: %" PRIx64, message, addr);
39}
40
7517e550 41/* FIXME: AArch64 requires aligned memory access if SCTRLR_ELx.A is set,
5ab6d79e 42 but we are not implementing that here. */
7517e550 43#define FETCH_FUNC64(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
2e8cf49e
NC
44 RETURN_TYPE \
45 aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
46 { \
7517e550
NC
47 RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
48 sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
49 TRACE_MEMORY (cpu, "read of %" PRIx64 " (%d bytes) from %" PRIx64, \
50 val, N, address); \
51 \
52 return val; \
53 }
54
55FETCH_FUNC64 (uint64_t, uint64_t, u64, 8)
56FETCH_FUNC64 (int64_t, int64_t, s64, 8)
57
58#define FETCH_FUNC32(RETURN_TYPE, ACCESS_TYPE, NAME, N) \
59 RETURN_TYPE \
60 aarch64_get_mem_##NAME (sim_cpu *cpu, uint64_t address) \
61 { \
62 RETURN_TYPE val = (RETURN_TYPE) (ACCESS_TYPE) \
63 sim_core_read_unaligned_##N (cpu, 0, read_map, address); \
64 TRACE_MEMORY (cpu, "read of %8x (%d bytes) from %" PRIx64, \
65 val, N, address); \
e101a78b
NC
66 \
67 return val; \
2e8cf49e
NC
68 }
69
7517e550
NC
70FETCH_FUNC32 (uint32_t, uint32_t, u32, 4)
71FETCH_FUNC32 (int32_t, int32_t, s32, 4)
72FETCH_FUNC32 (uint32_t, uint16_t, u16, 2)
73FETCH_FUNC32 (int32_t, int16_t, s16, 2)
74FETCH_FUNC32 (uint32_t, uint8_t, u8, 1)
75FETCH_FUNC32 (int32_t, int8_t, s8, 1)
2e8cf49e
NC
76
77void
78aarch64_get_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister *a)
79{
80 a->v[0] = sim_core_read_unaligned_8 (cpu, 0, read_map, address);
81 a->v[1] = sim_core_read_unaligned_8 (cpu, 0, read_map, address + 8);
82}
83
5ab6d79e
NC
84/* FIXME: Aarch64 requires aligned memory access if SCTRLR_ELx.A is set,
85 but we are not implementing that here. */
2e8cf49e
NC
86#define STORE_FUNC(TYPE, NAME, N) \
87 void \
88 aarch64_set_mem_##NAME (sim_cpu *cpu, uint64_t address, TYPE value) \
89 { \
90 TRACE_MEMORY (cpu, \
91 "write of %" PRIx64 " (%d bytes) to %" PRIx64, \
92 (uint64_t) value, N, address); \
93 \
94 sim_core_write_unaligned_##N (cpu, 0, write_map, address, value); \
95 }
96
5ab6d79e
NC
97STORE_FUNC (uint64_t, u64, 8)
98STORE_FUNC (int64_t, s64, 8)
99STORE_FUNC (uint32_t, u32, 4)
100STORE_FUNC (int32_t, s32, 4)
101STORE_FUNC (uint16_t, u16, 2)
102STORE_FUNC (int16_t, s16, 2)
103STORE_FUNC (uint8_t, u8, 1)
104STORE_FUNC (int8_t, s8, 1)
2e8cf49e
NC
105
106void
107aarch64_set_mem_long_double (sim_cpu *cpu, uint64_t address, FRegister a)
108{
109 TRACE_MEMORY (cpu,
110 "write of long double %" PRIx64 " %" PRIx64 " to %" PRIx64,
111 a.v[0], a.v[1], address);
112
113 sim_core_write_unaligned_8 (cpu, 0, write_map, address, a.v[0]);
114 sim_core_write_unaligned_8 (cpu, 0, write_map, address + 8, a.v[1]);
115}
116
117void
118aarch64_get_mem_blk (sim_cpu * cpu,
119 uint64_t address,
120 char * buffer,
121 unsigned length)
122{
123 unsigned len;
124
125 len = sim_core_read_buffer (CPU_STATE (cpu), cpu, read_map,
126 buffer, address, length);
127 if (len == length)
128 return;
129
130 memset (buffer, 0, length);
131 if (cpu)
132 mem_error (cpu, "read of non-existant mem block at", address);
133
134 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
135 sim_stopped, SIM_SIGBUS);
136}
137
138const char *
139aarch64_get_mem_ptr (sim_cpu *cpu, uint64_t address)
140{
141 char *addr = sim_core_trans_addr (CPU_STATE (cpu), cpu, read_map, address);
142
143 if (addr == NULL)
144 {
145 mem_error (cpu, "request for non-existant mem addr of", address);
146 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
147 sim_stopped, SIM_SIGBUS);
148 }
149
150 return addr;
151}
152
153/* We implement a combined stack and heap. That way the sbrk()
154 function in libgloss/aarch64/syscalls.c has a chance to detect
155 an out-of-memory condition by noticing a stack/heap collision.
156
157 The heap starts at the end of loaded memory and carries on up
158 to an arbitary 2Gb limit. */
159
160uint64_t
161aarch64_get_heap_start (sim_cpu *cpu)
162{
5357150c 163 uint64_t heap = trace_sym_value (CPU_STATE (cpu), "end");
2e8cf49e
NC
164
165 if (heap == 0)
5357150c 166 heap = trace_sym_value (CPU_STATE (cpu), "_end");
2e8cf49e
NC
167 if (heap == 0)
168 {
169 heap = STACK_TOP - 0x100000;
170 sim_io_eprintf (CPU_STATE (cpu),
171 "Unable to find 'end' symbol - using addr based "
172 "upon stack instead %" PRIx64 "\n",
173 heap);
174 }
175 return heap;
176}
177
178uint64_t
179aarch64_get_stack_start (sim_cpu *cpu)
180{
181 if (aarch64_get_heap_start (cpu) >= STACK_TOP)
182 mem_error (cpu, "executable is too big", aarch64_get_heap_start (cpu));
183 return STACK_TOP;
184}