]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/memory.c
run copyright.sh for 2011.
[thirdparty/binutils-gdb.git] / sim / frv / memory.c
1 /* frv memory model.
2 Copyright (C) 1999, 2000, 2001, 2003, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Red Hat
5
6 This file is part of the GNU simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
23
24 #include "sim-main.h"
25 #include "cgen-mem.h"
26 #include "bfd.h"
27
28 /* Check for alignment and access restrictions. Return the corrected address.
29 */
30 static SI
31 fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
32 {
33 /* Check access restrictions for double word loads only. */
34 if (align_mask == 7)
35 {
36 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
37 frv_queue_data_access_error_interrupt (current_cpu, address);
38 }
39 return address;
40 }
41
42 static SI
43 fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
44 {
45 if (address & align_mask)
46 {
47 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
48 address &= ~align_mask;
49 }
50
51 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
52 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
53 frv_queue_data_access_error_interrupt (current_cpu, address);
54
55 return address;
56 }
57
58 static SI
59 fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
60 {
61 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
62 || (align_mask > 0x3
63 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
64 frv_queue_data_access_error_interrupt (current_cpu, address);
65
66 return address;
67 }
68
69 static SI
70 check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
71 {
72 SIM_DESC sd = CPU_STATE (current_cpu);
73 switch (STATE_ARCHITECTURE (sd)->mach)
74 {
75 case bfd_mach_fr400:
76 case bfd_mach_fr450:
77 address = fr400_check_data_read_address (current_cpu, address,
78 align_mask);
79 break;
80 case bfd_mach_frvtomcat:
81 case bfd_mach_fr500:
82 case bfd_mach_frv:
83 address = fr500_check_data_read_address (current_cpu, address,
84 align_mask);
85 break;
86 case bfd_mach_fr550:
87 address = fr550_check_data_read_address (current_cpu, address,
88 align_mask);
89 break;
90 default:
91 break;
92 }
93
94 return address;
95 }
96
97 static SI
98 fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
99 {
100 if (address & align_mask)
101 {
102 /* Make sure that this exception is not masked. */
103 USI isr = GET_ISR ();
104 if (! GET_ISR_EMAM (isr))
105 {
106 /* Bad alignment causes a data_access_error on fr400. */
107 frv_queue_data_access_error_interrupt (current_cpu, address);
108 }
109 address &= ~align_mask;
110 }
111 /* Nothing to check. */
112 return address;
113 }
114
115 static SI
116 fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
117 {
118 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
119 || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
120 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
121 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
122 frv_queue_data_access_exception_interrupt (current_cpu);
123
124 return address;
125 }
126
127 static SI
128 fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
129 {
130 /* No alignment restrictions on fr550 */
131
132 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff
133 || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff)
134 frv_queue_data_access_exception_interrupt (current_cpu);
135 else
136 {
137 USI hsr0 = GET_HSR0 ();
138 if (! GET_HSR0_RME (hsr0)
139 && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff)
140 frv_queue_data_access_exception_interrupt (current_cpu);
141 }
142
143 return address;
144 }
145
146 static SI
147 check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
148 {
149 SIM_DESC sd = CPU_STATE (current_cpu);
150 switch (STATE_ARCHITECTURE (sd)->mach)
151 {
152 case bfd_mach_fr400:
153 case bfd_mach_fr450:
154 address = fr400_check_readwrite_address (current_cpu, address,
155 align_mask);
156 break;
157 case bfd_mach_frvtomcat:
158 case bfd_mach_fr500:
159 case bfd_mach_frv:
160 address = fr500_check_readwrite_address (current_cpu, address,
161 align_mask);
162 break;
163 case bfd_mach_fr550:
164 address = fr550_check_readwrite_address (current_cpu, address,
165 align_mask);
166 break;
167 default:
168 break;
169 }
170
171 return address;
172 }
173
174 static PCADDR
175 fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
176 int align_mask)
177 {
178 if (address & align_mask)
179 {
180 frv_queue_instruction_access_error_interrupt (current_cpu);
181 address &= ~align_mask;
182 }
183 else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
184 frv_queue_instruction_access_error_interrupt (current_cpu);
185
186 return address;
187 }
188
189 static PCADDR
190 fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
191 int align_mask)
192 {
193 if (address & align_mask)
194 {
195 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
196 address &= ~align_mask;
197 }
198
199 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
200 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
201 frv_queue_instruction_access_error_interrupt (current_cpu);
202 else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
203 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
204 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
205 frv_queue_instruction_access_exception_interrupt (current_cpu);
206 else
207 {
208 USI hsr0 = GET_HSR0 ();
209 if (! GET_HSR0_RME (hsr0)
210 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
211 frv_queue_instruction_access_exception_interrupt (current_cpu);
212 }
213
214 return address;
215 }
216
217 static PCADDR
218 fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
219 int align_mask)
220 {
221 address &= ~align_mask;
222
223 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
224 frv_queue_instruction_access_error_interrupt (current_cpu);
225 else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
226 frv_queue_instruction_access_exception_interrupt (current_cpu);
227 else
228 {
229 USI hsr0 = GET_HSR0 ();
230 if (! GET_HSR0_RME (hsr0)
231 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
232 frv_queue_instruction_access_exception_interrupt (current_cpu);
233 }
234
235 return address;
236 }
237
238 static PCADDR
239 check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
240 {
241 SIM_DESC sd = CPU_STATE (current_cpu);
242 switch (STATE_ARCHITECTURE (sd)->mach)
243 {
244 case bfd_mach_fr400:
245 case bfd_mach_fr450:
246 address = fr400_check_insn_read_address (current_cpu, address,
247 align_mask);
248 break;
249 case bfd_mach_frvtomcat:
250 case bfd_mach_fr500:
251 case bfd_mach_frv:
252 address = fr500_check_insn_read_address (current_cpu, address,
253 align_mask);
254 break;
255 case bfd_mach_fr550:
256 address = fr550_check_insn_read_address (current_cpu, address,
257 align_mask);
258 break;
259 default:
260 break;
261 }
262
263 return address;
264 }
265
266 /* Memory reads. */
267 QI
268 frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
269 {
270 USI hsr0 = GET_HSR0 ();
271 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
272
273 /* Check for access exceptions. */
274 address = check_data_read_address (current_cpu, address, 0);
275 address = check_readwrite_address (current_cpu, address, 0);
276
277 /* If we need to count cycles, then the cache operation will be
278 initiated from the model profiling functions.
279 See frvbf_model_.... */
280 if (model_insn)
281 {
282 CPU_LOAD_ADDRESS (current_cpu) = address;
283 CPU_LOAD_LENGTH (current_cpu) = 1;
284 CPU_LOAD_SIGNED (current_cpu) = 1;
285 return 0xb7; /* any random value */
286 }
287
288 if (GET_HSR0_DCE (hsr0))
289 {
290 int cycles;
291 cycles = frv_cache_read (cache, 0, address);
292 if (cycles != 0)
293 return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
294 }
295
296 return GETMEMQI (current_cpu, pc, address);
297 }
298
299 UQI
300 frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
301 {
302 USI hsr0 = GET_HSR0 ();
303 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
304
305 /* Check for access exceptions. */
306 address = check_data_read_address (current_cpu, address, 0);
307 address = check_readwrite_address (current_cpu, address, 0);
308
309 /* If we need to count cycles, then the cache operation will be
310 initiated from the model profiling functions.
311 See frvbf_model_.... */
312 if (model_insn)
313 {
314 CPU_LOAD_ADDRESS (current_cpu) = address;
315 CPU_LOAD_LENGTH (current_cpu) = 1;
316 CPU_LOAD_SIGNED (current_cpu) = 0;
317 return 0xb7; /* any random value */
318 }
319
320 if (GET_HSR0_DCE (hsr0))
321 {
322 int cycles;
323 cycles = frv_cache_read (cache, 0, address);
324 if (cycles != 0)
325 return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
326 }
327
328 return GETMEMUQI (current_cpu, pc, address);
329 }
330
331 /* Read a HI which spans two cache lines */
332 static HI
333 read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
334 {
335 HI value = frvbf_read_mem_QI (current_cpu, pc, address);
336 value <<= 8;
337 value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
338 return T2H_2 (value);
339 }
340
341 HI
342 frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
343 {
344 USI hsr0;
345 FRV_CACHE *cache;
346
347 /* Check for access exceptions. */
348 address = check_data_read_address (current_cpu, address, 1);
349 address = check_readwrite_address (current_cpu, address, 1);
350
351 /* If we need to count cycles, then the cache operation will be
352 initiated from the model profiling functions.
353 See frvbf_model_.... */
354 hsr0 = GET_HSR0 ();
355 cache = CPU_DATA_CACHE (current_cpu);
356 if (model_insn)
357 {
358 CPU_LOAD_ADDRESS (current_cpu) = address;
359 CPU_LOAD_LENGTH (current_cpu) = 2;
360 CPU_LOAD_SIGNED (current_cpu) = 1;
361 return 0xb711; /* any random value */
362 }
363
364 if (GET_HSR0_DCE (hsr0))
365 {
366 int cycles;
367 /* Handle access which crosses cache line boundary */
368 SIM_DESC sd = CPU_STATE (current_cpu);
369 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
370 {
371 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
372 return read_mem_unaligned_HI (current_cpu, pc, address);
373 }
374 cycles = frv_cache_read (cache, 0, address);
375 if (cycles != 0)
376 return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
377 }
378
379 return GETMEMHI (current_cpu, pc, address);
380 }
381
382 UHI
383 frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
384 {
385 USI hsr0;
386 FRV_CACHE *cache;
387
388 /* Check for access exceptions. */
389 address = check_data_read_address (current_cpu, address, 1);
390 address = check_readwrite_address (current_cpu, address, 1);
391
392 /* If we need to count cycles, then the cache operation will be
393 initiated from the model profiling functions.
394 See frvbf_model_.... */
395 hsr0 = GET_HSR0 ();
396 cache = CPU_DATA_CACHE (current_cpu);
397 if (model_insn)
398 {
399 CPU_LOAD_ADDRESS (current_cpu) = address;
400 CPU_LOAD_LENGTH (current_cpu) = 2;
401 CPU_LOAD_SIGNED (current_cpu) = 0;
402 return 0xb711; /* any random value */
403 }
404
405 if (GET_HSR0_DCE (hsr0))
406 {
407 int cycles;
408 /* Handle access which crosses cache line boundary */
409 SIM_DESC sd = CPU_STATE (current_cpu);
410 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
411 {
412 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
413 return read_mem_unaligned_HI (current_cpu, pc, address);
414 }
415 cycles = frv_cache_read (cache, 0, address);
416 if (cycles != 0)
417 return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
418 }
419
420 return GETMEMUHI (current_cpu, pc, address);
421 }
422
423 /* Read a SI which spans two cache lines */
424 static SI
425 read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
426 {
427 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
428 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
429 char valarray[4];
430 SI SIvalue;
431 HI HIvalue;
432
433 switch (hi_len)
434 {
435 case 1:
436 valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
437 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
438 SIvalue = H2T_4 (SIvalue);
439 memcpy (valarray + 1, (char*)&SIvalue, 3);
440 break;
441 case 2:
442 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
443 HIvalue = H2T_2 (HIvalue);
444 memcpy (valarray, (char*)&HIvalue, 2);
445 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
446 HIvalue = H2T_2 (HIvalue);
447 memcpy (valarray + 2, (char*)&HIvalue, 2);
448 break;
449 case 3:
450 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
451 SIvalue = H2T_4 (SIvalue);
452 memcpy (valarray, (char*)&SIvalue, 3);
453 valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
454 break;
455 default:
456 abort (); /* can't happen */
457 }
458 return T2H_4 (*(SI*)valarray);
459 }
460
461 SI
462 frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
463 {
464 FRV_CACHE *cache;
465 USI hsr0;
466
467 /* Check for access exceptions. */
468 address = check_data_read_address (current_cpu, address, 3);
469 address = check_readwrite_address (current_cpu, address, 3);
470
471 hsr0 = GET_HSR0 ();
472 cache = CPU_DATA_CACHE (current_cpu);
473 /* If we need to count cycles, then the cache operation will be
474 initiated from the model profiling functions.
475 See frvbf_model_.... */
476 if (model_insn)
477 {
478 CPU_LOAD_ADDRESS (current_cpu) = address;
479 CPU_LOAD_LENGTH (current_cpu) = 4;
480 return 0x37111319; /* any random value */
481 }
482
483 if (GET_HSR0_DCE (hsr0))
484 {
485 int cycles;
486 /* Handle access which crosses cache line boundary */
487 SIM_DESC sd = CPU_STATE (current_cpu);
488 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
489 {
490 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
491 return read_mem_unaligned_SI (current_cpu, pc, address);
492 }
493 cycles = frv_cache_read (cache, 0, address);
494 if (cycles != 0)
495 return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
496 }
497
498 return GETMEMSI (current_cpu, pc, address);
499 }
500
501 SI
502 frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
503 {
504 return frvbf_read_mem_SI (current_cpu, pc, address);
505 }
506
507 /* Read a SI which spans two cache lines */
508 static DI
509 read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
510 {
511 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
512 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
513 DI value, value1;
514
515 switch (hi_len)
516 {
517 case 1:
518 value = frvbf_read_mem_QI (current_cpu, pc, address);
519 value <<= 56;
520 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
521 value1 = H2T_8 (value1);
522 value |= value1 & ((DI)0x00ffffff << 32);
523 value |= value1 & 0xffffffffu;
524 break;
525 case 2:
526 value = frvbf_read_mem_HI (current_cpu, pc, address);
527 value = H2T_2 (value);
528 value <<= 48;
529 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
530 value1 = H2T_8 (value1);
531 value |= value1 & ((DI)0x0000ffff << 32);
532 value |= value1 & 0xffffffffu;
533 break;
534 case 3:
535 value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
536 value = H2T_4 (value);
537 value <<= 40;
538 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
539 value1 = H2T_8 (value1);
540 value |= value1 & ((DI)0x000000ff << 32);
541 value |= value1 & 0xffffffffu;
542 break;
543 case 4:
544 value = frvbf_read_mem_SI (current_cpu, pc, address);
545 value = H2T_4 (value);
546 value <<= 32;
547 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
548 value1 = H2T_4 (value1);
549 value |= value1 & 0xffffffffu;
550 break;
551 case 5:
552 value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
553 value = H2T_8 (value);
554 value <<= 24;
555 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
556 value1 = H2T_4 (value1);
557 value |= value1 & 0x00ffffff;
558 break;
559 case 6:
560 value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
561 value = H2T_8 (value);
562 value <<= 16;
563 value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
564 value1 = H2T_2 (value1);
565 value |= value1 & 0x0000ffff;
566 break;
567 case 7:
568 value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
569 value = H2T_8 (value);
570 value <<= 8;
571 value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
572 value |= value1 & 0x000000ff;
573 break;
574 default:
575 abort (); /* can't happen */
576 }
577 return T2H_8 (value);
578 }
579
580 DI
581 frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
582 {
583 USI hsr0;
584 FRV_CACHE *cache;
585
586 /* Check for access exceptions. */
587 address = check_data_read_address (current_cpu, address, 7);
588 address = check_readwrite_address (current_cpu, address, 7);
589
590 /* If we need to count cycles, then the cache operation will be
591 initiated from the model profiling functions.
592 See frvbf_model_.... */
593 hsr0 = GET_HSR0 ();
594 cache = CPU_DATA_CACHE (current_cpu);
595 if (model_insn)
596 {
597 CPU_LOAD_ADDRESS (current_cpu) = address;
598 CPU_LOAD_LENGTH (current_cpu) = 8;
599 return 0x37111319; /* any random value */
600 }
601
602 if (GET_HSR0_DCE (hsr0))
603 {
604 int cycles;
605 /* Handle access which crosses cache line boundary */
606 SIM_DESC sd = CPU_STATE (current_cpu);
607 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
608 {
609 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
610 return read_mem_unaligned_DI (current_cpu, pc, address);
611 }
612 cycles = frv_cache_read (cache, 0, address);
613 if (cycles != 0)
614 return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
615 }
616
617 return GETMEMDI (current_cpu, pc, address);
618 }
619
620 DF
621 frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
622 {
623 USI hsr0;
624 FRV_CACHE *cache;
625
626 /* Check for access exceptions. */
627 address = check_data_read_address (current_cpu, address, 7);
628 address = check_readwrite_address (current_cpu, address, 7);
629
630 /* If we need to count cycles, then the cache operation will be
631 initiated from the model profiling functions.
632 See frvbf_model_.... */
633 hsr0 = GET_HSR0 ();
634 cache = CPU_DATA_CACHE (current_cpu);
635 if (model_insn)
636 {
637 CPU_LOAD_ADDRESS (current_cpu) = address;
638 CPU_LOAD_LENGTH (current_cpu) = 8;
639 return 0x37111319; /* any random value */
640 }
641
642 if (GET_HSR0_DCE (hsr0))
643 {
644 int cycles;
645 /* Handle access which crosses cache line boundary */
646 SIM_DESC sd = CPU_STATE (current_cpu);
647 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
648 {
649 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
650 return read_mem_unaligned_DI (current_cpu, pc, address);
651 }
652 cycles = frv_cache_read (cache, 0, address);
653 if (cycles != 0)
654 return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
655 }
656
657 return GETMEMDF (current_cpu, pc, address);
658 }
659
660 USI
661 frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
662 {
663 USI hsr0;
664 vpc = check_insn_read_address (current_cpu, vpc, 3);
665
666 hsr0 = GET_HSR0 ();
667 if (GET_HSR0_ICE (hsr0))
668 {
669 FRV_CACHE *cache;
670 USI value;
671
672 /* We don't want this to show up in the cache statistics. That read
673 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
674 passively here. */
675 cache = CPU_INSN_CACHE (current_cpu);
676 if (frv_cache_read_passive_SI (cache, vpc, &value))
677 return value;
678 }
679 return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
680 }
681
682 static SI
683 fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
684 {
685 if (align_mask == 7
686 && address >= 0xfe800000 && address <= 0xfeffffff)
687 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
688
689 return address;
690 }
691
692 static SI
693 fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
694 {
695 if (address & align_mask)
696 {
697 struct frv_interrupt_queue_element *item =
698 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
699 /* Record the correct vliw slot with the interrupt. */
700 if (item != NULL)
701 item->slot = frv_interrupt_state.slot;
702 address &= ~align_mask;
703 }
704 if (address >= 0xfeff0600 && address <= 0xfeff7fff
705 || address >= 0xfe800000 && address <= 0xfefeffff)
706 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
707
708 return address;
709 }
710
711 static SI
712 fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
713 {
714 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
715 || (align_mask > 0x3
716 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
717 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
718
719 return address;
720 }
721
722 static SI
723 check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
724 {
725 SIM_DESC sd = CPU_STATE (current_cpu);
726 switch (STATE_ARCHITECTURE (sd)->mach)
727 {
728 case bfd_mach_fr400:
729 case bfd_mach_fr450:
730 address = fr400_check_write_address (current_cpu, address, align_mask);
731 break;
732 case bfd_mach_frvtomcat:
733 case bfd_mach_fr500:
734 case bfd_mach_frv:
735 address = fr500_check_write_address (current_cpu, address, align_mask);
736 break;
737 case bfd_mach_fr550:
738 address = fr550_check_write_address (current_cpu, address, align_mask);
739 break;
740 default:
741 break;
742 }
743 return address;
744 }
745
746 void
747 frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
748 {
749 USI hsr0;
750 hsr0 = GET_HSR0 ();
751 if (GET_HSR0_DCE (hsr0))
752 sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
753 else
754 sim_queue_mem_qi_write (current_cpu, address, value);
755 frv_set_write_queue_slot (current_cpu);
756 }
757
758 void
759 frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
760 {
761 frvbf_write_mem_QI (current_cpu, pc, address, value);
762 }
763
764 void
765 frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
766 {
767 USI hsr0;
768 hsr0 = GET_HSR0 ();
769 if (GET_HSR0_DCE (hsr0))
770 sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
771 else
772 sim_queue_mem_hi_write (current_cpu, address, value);
773 frv_set_write_queue_slot (current_cpu);
774 }
775
776 void
777 frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
778 {
779 frvbf_write_mem_HI (current_cpu, pc, address, value);
780 }
781
782 void
783 frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
784 {
785 USI hsr0;
786 hsr0 = GET_HSR0 ();
787 if (GET_HSR0_DCE (hsr0))
788 sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
789 else
790 sim_queue_mem_si_write (current_cpu, address, value);
791 frv_set_write_queue_slot (current_cpu);
792 }
793
794 void
795 frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
796 {
797 frvbf_write_mem_SI (current_cpu, pc, address, value);
798 }
799
800 void
801 frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
802 {
803 USI hsr0;
804 hsr0 = GET_HSR0 ();
805 if (GET_HSR0_DCE (hsr0))
806 sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
807 else
808 sim_queue_mem_di_write (current_cpu, address, value);
809 frv_set_write_queue_slot (current_cpu);
810 }
811
812 void
813 frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
814 {
815 USI hsr0;
816 hsr0 = GET_HSR0 ();
817 if (GET_HSR0_DCE (hsr0))
818 sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
819 else
820 sim_queue_mem_df_write (current_cpu, address, value);
821 frv_set_write_queue_slot (current_cpu);
822 }
823
824 /* Memory writes. These do the actual writing through the cache. */
825 void
826 frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
827 {
828 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
829
830 /* Check for access errors. */
831 address = check_write_address (current_cpu, address, 0);
832 address = check_readwrite_address (current_cpu, address, 0);
833
834 /* If we need to count cycles, then submit the write request to the cache
835 and let it prioritize the request. Otherwise perform the write now. */
836 if (model_insn)
837 {
838 int slot = UNIT_I0;
839 frv_cache_request_store (cache, address, slot, (char *)&value,
840 sizeof (value));
841 }
842 else
843 frv_cache_write (cache, address, (char *)&value, sizeof (value));
844 }
845
846 /* Write a HI which spans two cache lines */
847 static void
848 mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
849 {
850 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
851 /* value is already in target byte order */
852 frv_cache_write (cache, address, (char *)&value, 1);
853 frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
854 }
855
856 void
857 frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
858 {
859 FRV_CACHE *cache;
860
861 /* Check for access errors. */
862 address = check_write_address (current_cpu, address, 1);
863 address = check_readwrite_address (current_cpu, address, 1);
864
865 /* If we need to count cycles, then submit the write request to the cache
866 and let it prioritize the request. Otherwise perform the write now. */
867 value = H2T_2 (value);
868 cache = CPU_DATA_CACHE (current_cpu);
869 if (model_insn)
870 {
871 int slot = UNIT_I0;
872 frv_cache_request_store (cache, address, slot,
873 (char *)&value, sizeof (value));
874 }
875 else
876 {
877 /* Handle access which crosses cache line boundary */
878 SIM_DESC sd = CPU_STATE (current_cpu);
879 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
880 {
881 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
882 {
883 mem_set_unaligned_HI (current_cpu, pc, address, value);
884 return;
885 }
886 }
887 frv_cache_write (cache, address, (char *)&value, sizeof (value));
888 }
889 }
890
891 /* Write a SI which spans two cache lines */
892 static void
893 mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
894 {
895 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
896 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
897 /* value is already in target byte order */
898 frv_cache_write (cache, address, (char *)&value, hi_len);
899 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
900 }
901
902 void
903 frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
904 {
905 FRV_CACHE *cache;
906
907 /* Check for access errors. */
908 address = check_write_address (current_cpu, address, 3);
909 address = check_readwrite_address (current_cpu, address, 3);
910
911 /* If we need to count cycles, then submit the write request to the cache
912 and let it prioritize the request. Otherwise perform the write now. */
913 cache = CPU_DATA_CACHE (current_cpu);
914 value = H2T_4 (value);
915 if (model_insn)
916 {
917 int slot = UNIT_I0;
918 frv_cache_request_store (cache, address, slot,
919 (char *)&value, sizeof (value));
920 }
921 else
922 {
923 /* Handle access which crosses cache line boundary */
924 SIM_DESC sd = CPU_STATE (current_cpu);
925 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
926 {
927 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
928 {
929 mem_set_unaligned_SI (current_cpu, pc, address, value);
930 return;
931 }
932 }
933 frv_cache_write (cache, address, (char *)&value, sizeof (value));
934 }
935 }
936
937 /* Write a DI which spans two cache lines */
938 static void
939 mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
940 {
941 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
942 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
943 /* value is already in target byte order */
944 frv_cache_write (cache, address, (char *)&value, hi_len);
945 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
946 }
947
948 void
949 frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
950 {
951 FRV_CACHE *cache;
952
953 /* Check for access errors. */
954 address = check_write_address (current_cpu, address, 7);
955 address = check_readwrite_address (current_cpu, address, 7);
956
957 /* If we need to count cycles, then submit the write request to the cache
958 and let it prioritize the request. Otherwise perform the write now. */
959 value = H2T_8 (value);
960 cache = CPU_DATA_CACHE (current_cpu);
961 if (model_insn)
962 {
963 int slot = UNIT_I0;
964 frv_cache_request_store (cache, address, slot,
965 (char *)&value, sizeof (value));
966 }
967 else
968 {
969 /* Handle access which crosses cache line boundary */
970 SIM_DESC sd = CPU_STATE (current_cpu);
971 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
972 {
973 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
974 {
975 mem_set_unaligned_DI (current_cpu, pc, address, value);
976 return;
977 }
978 }
979 frv_cache_write (cache, address, (char *)&value, sizeof (value));
980 }
981 }
982
983 void
984 frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
985 {
986 FRV_CACHE *cache;
987
988 /* Check for access errors. */
989 address = check_write_address (current_cpu, address, 7);
990 address = check_readwrite_address (current_cpu, address, 7);
991
992 /* If we need to count cycles, then submit the write request to the cache
993 and let it prioritize the request. Otherwise perform the write now. */
994 value = H2T_8 (value);
995 cache = CPU_DATA_CACHE (current_cpu);
996 if (model_insn)
997 {
998 int slot = UNIT_I0;
999 frv_cache_request_store (cache, address, slot,
1000 (char *)&value, sizeof (value));
1001 }
1002 else
1003 {
1004 /* Handle access which crosses cache line boundary */
1005 SIM_DESC sd = CPU_STATE (current_cpu);
1006 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1007 {
1008 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1009 {
1010 mem_set_unaligned_DI (current_cpu, pc, address, value);
1011 return;
1012 }
1013 }
1014 frv_cache_write (cache, address, (char *)&value, sizeof (value));
1015 }
1016 }
1017
1018 void
1019 frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1020 {
1021 int i;
1022 FRV_CACHE *cache;
1023
1024 /* Check for access errors. */
1025 address = check_write_address (current_cpu, address, 0xf);
1026 address = check_readwrite_address (current_cpu, address, 0xf);
1027
1028 /* TODO -- reverse word order as well? */
1029 for (i = 0; i < 4; ++i)
1030 value[i] = H2T_4 (value[i]);
1031
1032 /* If we need to count cycles, then submit the write request to the cache
1033 and let it prioritize the request. Otherwise perform the write now. */
1034 cache = CPU_DATA_CACHE (current_cpu);
1035 if (model_insn)
1036 {
1037 int slot = UNIT_I0;
1038 frv_cache_request_store (cache, address, slot, (char*)value, 16);
1039 }
1040 else
1041 frv_cache_write (cache, address, (char*)value, 16);
1042 }
1043
1044 /* Record the current VLIW slot on the element at the top of the write queue.
1045 */
1046 void
1047 frv_set_write_queue_slot (SIM_CPU *current_cpu)
1048 {
1049 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1050 int slot = vliw->next_slot - 1;
1051 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1052 int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1053 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1054 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
1055 }