]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target-memory.c
Generated files
[thirdparty/binutils-gdb.git] / gdb / target-memory.c
1 /* Parts of target interface that deal with accessing memory and memory-like
2 objects.
3
4 Copyright (C) 2006-2018 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "vec.h"
23 #include "target.h"
24 #include "memory-map.h"
25
26 #include "gdb_sys_time.h"
27 #include <algorithm>
28
29 static bool
30 compare_block_starting_address (const memory_write_request &a_req,
31 const memory_write_request &b_req)
32 {
33 return a_req.begin < b_req.begin;
34 }
35
36 /* Adds to RESULT all memory write requests from BLOCK that are
37 in [BEGIN, END) range.
38
39 If any memory request is only partially in the specified range,
40 that part of the memory request will be added. */
41
42 static void
43 claim_memory (const std::vector<memory_write_request> &blocks,
44 std::vector<memory_write_request> *result,
45 ULONGEST begin,
46 ULONGEST end)
47 {
48 ULONGEST claimed_begin;
49 ULONGEST claimed_end;
50
51 for (const memory_write_request &r : blocks)
52 {
53 /* If the request doesn't overlap [BEGIN, END), skip it. We
54 must handle END == 0 meaning the top of memory; we don't yet
55 check for R->end == 0, which would also mean the top of
56 memory, but there's an assertion in
57 target_write_memory_blocks which checks for that. */
58
59 if (begin >= r.end)
60 continue;
61 if (end != 0 && end <= r.begin)
62 continue;
63
64 claimed_begin = std::max (begin, r.begin);
65 if (end == 0)
66 claimed_end = r.end;
67 else
68 claimed_end = std::min (end, r.end);
69
70 if (claimed_begin == r.begin && claimed_end == r.end)
71 result->push_back (r);
72 else
73 {
74 struct memory_write_request n = r;
75
76 n.begin = claimed_begin;
77 n.end = claimed_end;
78 n.data += claimed_begin - r.begin;
79
80 result->push_back (n);
81 }
82 }
83 }
84
85 /* Given a vector of struct memory_write_request objects in BLOCKS,
86 add memory requests for flash memory into FLASH_BLOCKS, and for
87 regular memory to REGULAR_BLOCKS. */
88
89 static void
90 split_regular_and_flash_blocks (const std::vector<memory_write_request> &blocks,
91 std::vector<memory_write_request> *regular_blocks,
92 std::vector<memory_write_request> *flash_blocks)
93 {
94 struct mem_region *region;
95 CORE_ADDR cur_address;
96
97 /* This implementation runs in O(length(regions)*length(blocks)) time.
98 However, in most cases the number of blocks will be small, so this does
99 not matter.
100
101 Note also that it's extremely unlikely that a memory write request
102 will span more than one memory region, however for safety we handle
103 such situations. */
104
105 cur_address = 0;
106 while (1)
107 {
108 std::vector<memory_write_request> *r;
109
110 region = lookup_mem_region (cur_address);
111 r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
112 cur_address = region->hi;
113 claim_memory (blocks, r, region->lo, region->hi);
114
115 if (cur_address == 0)
116 break;
117 }
118 }
119
120 /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
121 to the start of the flash block containing the address. Similarly,
122 if END is non-NULL *END will be set to the address one past the end
123 of the block containing the address. */
124
125 static void
126 block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
127 {
128 struct mem_region *region;
129 unsigned blocksize;
130 CORE_ADDR offset_in_region;
131
132 region = lookup_mem_region (address);
133 gdb_assert (region->attrib.mode == MEM_FLASH);
134 blocksize = region->attrib.blocksize;
135
136 offset_in_region = address - region->lo;
137
138 if (begin)
139 *begin = region->lo + offset_in_region / blocksize * blocksize;
140 if (end)
141 *end = region->lo + (offset_in_region + blocksize - 1) / blocksize * blocksize;
142 }
143
144 /* Given the list of memory requests to be WRITTEN, this function
145 returns write requests covering each group of flash blocks which must
146 be erased. */
147
148 static std::vector<memory_write_request>
149 blocks_to_erase (const std::vector<memory_write_request> &written)
150 {
151 std::vector<memory_write_request> result;
152
153 for (const memory_write_request &request : written)
154 {
155 CORE_ADDR begin, end;
156
157 block_boundaries (request.begin, &begin, 0);
158 block_boundaries (request.end - 1, 0, &end);
159
160 if (!result.empty () && result.back ().end >= begin)
161 result.back ().end = end;
162 else
163 result.emplace_back (begin, end);
164 }
165
166 return result;
167 }
168
169 /* Given ERASED_BLOCKS, a list of blocks that will be erased with
170 flash erase commands, and WRITTEN_BLOCKS, the list of memory
171 addresses that will be written, compute the set of memory addresses
172 that will be erased but not rewritten (e.g. padding within a block
173 which is only partially filled by "load"). */
174
175 static std::vector<memory_write_request>
176 compute_garbled_blocks (const std::vector<memory_write_request> &erased_blocks,
177 const std::vector<memory_write_request> &written_blocks)
178 {
179 std::vector<memory_write_request> result;
180
181 unsigned j;
182 unsigned je = written_blocks.size ();
183 struct memory_write_request *erased_p;
184
185 /* Look at each erased memory_write_request in turn, and
186 see what part of it is subsequently written to.
187
188 This implementation is O(length(erased) * length(written)). If
189 the lists are sorted at this point it could be rewritten more
190 efficiently, but the complexity is not generally worthwhile. */
191
192 for (const memory_write_request &erased_iter : erased_blocks)
193 {
194 /* Make a deep copy -- it will be modified inside the loop, but
195 we don't want to modify original vector. */
196 struct memory_write_request erased = erased_iter;
197
198 for (j = 0; j != je;)
199 {
200 const memory_write_request *written = &written_blocks[j];
201
202 /* Now try various cases. */
203
204 /* If WRITTEN is fully to the left of ERASED, check the next
205 written memory_write_request. */
206 if (written->end <= erased.begin)
207 {
208 ++j;
209 continue;
210 }
211
212 /* If WRITTEN is fully to the right of ERASED, then ERASED
213 is not written at all. WRITTEN might affect other
214 blocks. */
215 if (written->begin >= erased.end)
216 {
217 result.push_back (erased);
218 goto next_erased;
219 }
220
221 /* If all of ERASED is completely written, we can move on to
222 the next erased region. */
223 if (written->begin <= erased.begin
224 && written->end >= erased.end)
225 {
226 goto next_erased;
227 }
228
229 /* If there is an unwritten part at the beginning of ERASED,
230 then we should record that part and try this inner loop
231 again for the remainder. */
232 if (written->begin > erased.begin)
233 {
234 result.emplace_back (erased.begin, written->begin);
235 erased.begin = written->begin;
236 continue;
237 }
238
239 /* If there is an unwritten part at the end of ERASED, we
240 forget about the part that was written to and wait to see
241 if the next write request writes more of ERASED. We can't
242 push it yet. */
243 if (written->end < erased.end)
244 {
245 erased.begin = written->end;
246 ++j;
247 continue;
248 }
249 }
250
251 /* If we ran out of write requests without doing anything about
252 ERASED, then that means it's really erased. */
253 result.push_back (erased);
254
255 next_erased:
256 ;
257 }
258
259 return result;
260 }
261
262 int
263 target_write_memory_blocks (const std::vector<memory_write_request> &requests,
264 enum flash_preserve_mode preserve_flash_p,
265 void (*progress_cb) (ULONGEST, void *))
266 {
267 std::vector<memory_write_request> blocks = requests;
268 struct memory_write_request *r;
269 std::vector<memory_write_request> regular;
270 std::vector<memory_write_request> flash;
271 std::vector<memory_write_request> erased, garbled;
272
273 /* END == 0 would represent wraparound: a write to the very last
274 byte of the address space. This file was not written with that
275 possibility in mind. This is fixable, but a lot of work for a
276 rare problem; so for now, fail noisily here instead of obscurely
277 later. */
278 for (const memory_write_request &iter : requests)
279 gdb_assert (iter.end != 0);
280
281 /* Sort the blocks by their start address. */
282 std::sort (blocks.begin (), blocks.end (), compare_block_starting_address);
283
284 /* Split blocks into list of regular memory blocks,
285 and list of flash memory blocks. */
286 split_regular_and_flash_blocks (blocks, &regular, &flash);
287
288 /* If a variable is added to forbid flash write, even during "load",
289 it should be checked here. Similarly, if this function is used
290 for other situations besides "load" in which writing to flash
291 is undesirable, that should be checked here. */
292
293 /* Find flash blocks to erase. */
294 erased = blocks_to_erase (flash);
295
296 /* Find what flash regions will be erased, and not overwritten; then
297 either preserve or discard the old contents. */
298 garbled = compute_garbled_blocks (erased, flash);
299
300 std::vector<gdb::unique_xmalloc_ptr<gdb_byte>> mem_holders;
301 if (!garbled.empty ())
302 {
303 if (preserve_flash_p == flash_preserve)
304 {
305 /* Read in regions that must be preserved and add them to
306 the list of blocks we read. */
307 for (memory_write_request &iter : garbled)
308 {
309 gdb_assert (iter.data == NULL);
310 gdb::unique_xmalloc_ptr<gdb_byte> holder
311 ((gdb_byte *) xmalloc (iter.end - iter.begin));
312 iter.data = holder.get ();
313 mem_holders.push_back (std::move (holder));
314 int err = target_read_memory (iter.begin, iter.data,
315 iter.end - iter.begin);
316 if (err != 0)
317 return err;
318
319 flash.push_back (iter);
320 }
321
322 std::sort (flash.begin (), flash.end (),
323 compare_block_starting_address);
324 }
325 }
326
327 /* We could coalesce adjacent memory blocks here, to reduce the
328 number of write requests for small sections. However, we would
329 have to reallocate and copy the data pointers, which could be
330 large; large sections are more common in loadable objects than
331 large numbers of small sections (although the reverse can be true
332 in object files). So, we issue at least one write request per
333 passed struct memory_write_request. The remote stub will still
334 have the opportunity to batch flash requests. */
335
336 /* Write regular blocks. */
337 for (const memory_write_request &iter : regular)
338 {
339 LONGEST len;
340
341 len = target_write_with_progress (current_top_target (),
342 TARGET_OBJECT_MEMORY, NULL,
343 iter.data, iter.begin,
344 iter.end - iter.begin,
345 progress_cb, iter.baton);
346 if (len < (LONGEST) (iter.end - iter.begin))
347 {
348 /* Call error? */
349 return -1;
350 }
351 }
352
353 if (!erased.empty ())
354 {
355 /* Erase all pages. */
356 for (const memory_write_request &iter : erased)
357 target_flash_erase (iter.begin, iter.end - iter.begin);
358
359 /* Write flash data. */
360 for (const memory_write_request &iter : flash)
361 {
362 LONGEST len;
363
364 len = target_write_with_progress (current_top_target (),
365 TARGET_OBJECT_FLASH, NULL,
366 iter.data, iter.begin,
367 iter.end - iter.begin,
368 progress_cb, iter.baton);
369 if (len < (LONGEST) (iter.end - iter.begin))
370 error (_("Error writing data to flash"));
371 }
372
373 target_flash_done ();
374 }
375
376 return 0;
377 }