]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/target-memory.c
Copyright updates for 2007.
[thirdparty/binutils-gdb.git] / gdb / target-memory.c
CommitLineData
a76d924d
DJ
1/* Parts of target interface that deal with accessing memory and memory-like
2 objects.
3
6aba47ca 4 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
a76d924d
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23#include "defs.h"
24#include "vec.h"
25#include "target.h"
26#include "memory-map.h"
27
28#include "gdb_assert.h"
29
30#include <stdio.h>
31#include <sys/time.h>
32
33static int
34compare_block_starting_address (const void *a, const void *b)
35{
36 const struct memory_write_request *a_req = a;
37 const struct memory_write_request *b_req = b;
38
39 if (a_req->begin < b_req->begin)
40 return -1;
41 else if (a_req->begin == b_req->begin)
42 return 0;
43 else
44 return 1;
45}
46
47/* Adds to RESULT all memory write requests from BLOCK that are
48 in [BEGIN, END) range.
49
50 If any memory request is only partially in the specified range,
51 that part of the memory request will be added. */
52
53static void
54claim_memory (VEC(memory_write_request_s) *blocks,
55 VEC(memory_write_request_s) **result,
56 ULONGEST begin,
57 ULONGEST end)
58{
59 int i;
60 ULONGEST claimed_begin;
61 ULONGEST claimed_end;
62 struct memory_write_request *r;
63
64 for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
65 {
66 /* If the request doesn't overlap [BEGIN, END), skip it. We
67 must handle END == 0 meaning the top of memory; we don't yet
68 check for R->end == 0, which would also mean the top of
69 memory, but there's an assertion in
70 target_write_memory_blocks which checks for that. */
71
72 if (begin >= r->end)
73 continue;
74 if (end != 0 && end <= r->begin)
75 continue;
76
77 claimed_begin = max (begin, r->begin);
78 if (end == 0)
79 claimed_end = r->end;
80 else
81 claimed_end = min (end, r->end);
82
83 if (claimed_begin == r->begin && claimed_end == r->end)
84 VEC_safe_push (memory_write_request_s, *result, r);
85 else
86 {
87 struct memory_write_request *n =
88 VEC_safe_push (memory_write_request_s, *result, NULL);
89 memset (n, 0, sizeof (struct memory_write_request));
90 n->begin = claimed_begin;
91 n->end = claimed_end;
92 n->data = r->data + (claimed_begin - r->begin);
93 }
94 }
95}
96
97/* Given a vector of struct memory_write_request objects in BLOCKS,
98 add memory requests for flash memory into FLASH_BLOCKS, and for
99 regular memory to REGULAR_BLOCKS. */
100
101static void
102split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
103 VEC(memory_write_request_s) **regular_blocks,
104 VEC(memory_write_request_s) **flash_blocks)
105{
106 struct mem_region *region;
107 CORE_ADDR cur_address;
108
109 /* This implementation runs in O(length(regions)*length(blocks)) time.
110 However, in most cases the number of blocks will be small, so this does
111 not matter.
112
113 Note also that it's extremely unlikely that a memory write request
114 will span more than one memory region, however for safety we handle
115 such situations. */
116
117 cur_address = 0;
118 while (1)
119 {
120 VEC(memory_write_request_s) **r;
121 region = lookup_mem_region (cur_address);
122
123 r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
124 cur_address = region->hi;
125 claim_memory (blocks, r, region->lo, region->hi);
126
127 if (cur_address == 0)
128 break;
129 }
130}
131
132/* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
133 to the start of the flash block containing the address. Similarly,
134 if END is non-NULL *END will be set to the address one past the end
135 of the block containing the address. */
136
137static void
138block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
139{
140 struct mem_region *region;
141 unsigned blocksize;
142
143 region = lookup_mem_region (address);
144 gdb_assert (region->attrib.mode == MEM_FLASH);
145 blocksize = region->attrib.blocksize;
146 if (begin)
147 *begin = address / blocksize * blocksize;
148 if (end)
149 *end = (address + blocksize - 1) / blocksize * blocksize;
150}
151
152/* Given the list of memory requests to be WRITTEN, this function
153 returns write requests covering each group of flash blocks which must
154 be erased. */
155
156static VEC(memory_write_request_s) *
157blocks_to_erase (VEC(memory_write_request_s) *written)
158{
159 unsigned i;
160 struct memory_write_request *ptr;
161
162 VEC(memory_write_request_s) *result = NULL;
163
164 for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
165 {
166 CORE_ADDR begin, end;
167
168 block_boundaries (ptr->begin, &begin, 0);
169 block_boundaries (ptr->end, 0, &end);
170
171 if (!VEC_empty (memory_write_request_s, result)
172 && VEC_last (memory_write_request_s, result)->end >= begin)
173 {
174 VEC_last (memory_write_request_s, result)->end = end;
175 }
176 else
177 {
178 struct memory_write_request *n =
179 VEC_safe_push (memory_write_request_s, result, NULL);
180 memset (n, 0, sizeof (struct memory_write_request));
181 n->begin = begin;
182 n->end = end;
183 }
184 }
185
186 return result;
187}
188
189/* Given ERASED_BLOCKS, a list of blocks that will be erased with
190 flash erase commands, and WRITTEN_BLOCKS, the list of memory
191 addresses that will be written, compute the set of memory addresses
192 that will be erased but not rewritten (e.g. padding within a block
193 which is only partially filled by "load"). */
194
195static VEC(memory_write_request_s) *
196compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
197 VEC(memory_write_request_s) *written_blocks)
198{
199 VEC(memory_write_request_s) *result = NULL;
200
201 unsigned i, j;
202 unsigned je = VEC_length (memory_write_request_s, written_blocks);
203 struct memory_write_request *erased_p;
204
205 /* Look at each erased memory_write_request in turn, and
206 see what part of it is subsequently written to.
207
208 This implementation is O(length(erased) * length(written)). If
209 the lists are sorted at this point it could be rewritten more
210 efficiently, but the complexity is not generally worthwhile. */
211
212 for (i = 0;
213 VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
214 ++i)
215 {
216 /* Make a deep copy -- it will be modified inside the loop, but
217 we don't want to modify original vector. */
218 struct memory_write_request erased = *erased_p;
219
220 for (j = 0; j != je;)
221 {
222 struct memory_write_request *written
223 = VEC_index (memory_write_request_s,
224 written_blocks, j);
225
226 /* Now try various cases. */
227
228 /* If WRITTEN is fully to the left of ERASED, check the next
229 written memory_write_request. */
230 if (written->end <= erased.begin)
231 {
232 ++j;
233 continue;
234 }
235
236 /* If WRITTEN is fully to the right of ERASED, then ERASED
237 is not written at all. WRITTEN might affect other
238 blocks. */
239 if (written->begin >= erased.end)
240 {
241 VEC_safe_push (memory_write_request_s, result, &erased);
242 goto next_erased;
243 }
244
245 /* If all of ERASED is completely written, we can move on to
246 the next erased region. */
247 if (written->begin <= erased.begin
248 && written->end >= erased.end)
249 {
250 goto next_erased;
251 }
252
253 /* If there is an unwritten part at the beginning of ERASED,
254 then we should record that part and try this inner loop
255 again for the remainder. */
256 if (written->begin > erased.begin)
257 {
258 struct memory_write_request *n =
259 VEC_safe_push (memory_write_request_s, result, NULL);
260 memset (n, 0, sizeof (struct memory_write_request));
261 n->begin = erased.begin;
262 n->end = written->begin;
263 erased.begin = written->begin;
264 continue;
265 }
266
267 /* If there is an unwritten part at the end of ERASED, we
268 forget about the part that was written to and wait to see
269 if the next write request writes more of ERASED. We can't
270 push it yet. */
271 if (written->end < erased.end)
272 {
273 erased.begin = written->end;
274 ++j;
275 continue;
276 }
277 }
278
279 /* If we ran out of write requests without doing anything about
280 ERASED, then that means it's really erased. */
281 VEC_safe_push (memory_write_request_s, result, &erased);
282
283 next_erased:
284 ;
285 }
286
287 return result;
288}
289
290static void
291cleanup_request_data (void *p)
292{
293 VEC(memory_write_request_s) **v = p;
294 struct memory_write_request *r;
295 int i;
296
297 for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
298 xfree (r->data);
299}
300
301static void
302cleanup_write_requests_vector (void *p)
303{
304 VEC(memory_write_request_s) **v = p;
305 VEC_free (memory_write_request_s, *v);
306}
307
308int
309target_write_memory_blocks (VEC(memory_write_request_s) *requests,
310 enum flash_preserve_mode preserve_flash_p,
311 void (*progress_cb) (ULONGEST, void *))
312{
313 struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
314 VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
315 requests);
316 unsigned i;
317 int err = 0;
318 struct memory_write_request *r;
319 VEC(memory_write_request_s) *regular = NULL;
320 VEC(memory_write_request_s) *flash = NULL;
321 VEC(memory_write_request_s) *erased, *garbled;
322
323 /* END == 0 would represent wraparound: a write to the very last
324 byte of the address space. This file was not written with that
325 possibility in mind. This is fixable, but a lot of work for a
326 rare problem; so for now, fail noisily here instead of obscurely
327 later. */
328 for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
329 gdb_assert (r->end != 0);
330
331 make_cleanup (cleanup_write_requests_vector, &blocks);
332
333 /* Sort the blocks by their start address. */
334 qsort (VEC_address (memory_write_request_s, blocks),
335 VEC_length (memory_write_request_s, blocks),
336 sizeof (struct memory_write_request), compare_block_starting_address);
337
338 /* Split blocks into list of regular memory blocks,
339 and list of flash memory blocks. */
340 make_cleanup (cleanup_write_requests_vector, &regular);
341 make_cleanup (cleanup_write_requests_vector, &flash);
342 split_regular_and_flash_blocks (blocks, &regular, &flash);
343
344 /* If a variable is added to forbid flash write, even during "load",
345 it should be checked here. Similarly, if this function is used
346 for other situations besides "load" in which writing to flash
347 is undesirable, that should be checked here. */
348
349 /* Find flash blocks to erase. */
350 erased = blocks_to_erase (flash);
351 make_cleanup (cleanup_write_requests_vector, &erased);
352
353 /* Find what flash regions will be erased, and not overwritten; then
354 either preserve or discard the old contents. */
355 garbled = compute_garbled_blocks (erased, flash);
356 make_cleanup (cleanup_request_data, &garbled);
357 make_cleanup (cleanup_write_requests_vector, &garbled);
358
359 if (!VEC_empty (memory_write_request_s, garbled))
360 {
361 if (preserve_flash_p == flash_preserve)
362 {
363 struct memory_write_request *r;
364
365 /* Read in regions that must be preserved and add them to
366 the list of blocks we read. */
367 for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
368 {
369 gdb_assert (r->data == NULL);
370 r->data = xmalloc (r->end - r->begin);
371 err = target_read_memory (r->begin, r->data, r->end - r->begin);
372 if (err != 0)
373 goto out;
374
375 VEC_safe_push (memory_write_request_s, flash, r);
376 }
377
378 qsort (VEC_address (memory_write_request_s, flash),
379 VEC_length (memory_write_request_s, flash),
380 sizeof (struct memory_write_request), compare_block_starting_address);
381 }
382 }
383
384 /* We could coalesce adjacent memory blocks here, to reduce the
385 number of write requests for small sections. However, we would
386 have to reallocate and copy the data pointers, which could be
387 large; large sections are more common in loadable objects than
388 large numbers of small sections (although the reverse can be true
389 in object files). So, we issue at least one write request per
390 passed struct memory_write_request. The remote stub will still
391 have the opportunity to batch flash requests. */
392
393 /* Write regular blocks. */
394 for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
395 {
396 LONGEST len;
397
398 len = target_write_with_progress (&current_target,
399 TARGET_OBJECT_MEMORY, NULL,
400 r->data, r->begin, r->end - r->begin,
401 progress_cb, r->baton);
402 if (len < (LONGEST) (r->end - r->begin))
403 {
404 /* Call error? */
405 err = -1;
406 goto out;
407 }
408 }
409
410 if (!VEC_empty (memory_write_request_s, erased))
411 {
412 /* Erase all pages. */
413 for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
414 target_flash_erase (r->begin, r->end - r->begin);
415
416 /* Write flash data. */
417 for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
418 {
419 LONGEST len;
420
421 len = target_write_with_progress (&current_target,
422 TARGET_OBJECT_FLASH, NULL,
423 r->data, r->begin, r->end - r->begin,
424 progress_cb, r->baton);
425 if (len < (LONGEST) (r->end - r->begin))
426 error (_("Error writing data to flash"));
427 }
428
429 target_flash_done ();
430 }
431
432 out:
433 do_cleanups (back_to);
434
435 return err;
436}