]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - sim/frv/memory.c
Update years in copyright notice for the GDB files.
[thirdparty/binutils-gdb.git] / sim / frv / memory.c
CommitLineData
b34f6357 1/* frv memory model.
8acc9f48 2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
e930b1f5 3 Contributed by Red Hat
b34f6357
DB
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
4744ac1b
JB
9the Free Software Foundation; either version 3 of the License, or
10(at your option) any later version.
b34f6357
DB
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
4744ac1b
JB
17You should have received a copy of the GNU General Public License
18along with this program. If not, see <http://www.gnu.org/licenses/>. */
b34f6357
DB
19
20#define WANT_CPU frvbf
21#define WANT_CPU_FRVBF
22
23#include "sim-main.h"
24#include "cgen-mem.h"
25#include "bfd.h"
26
27/* Check for alignment and access restrictions. Return the corrected address.
28 */
29static SI
30fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
31{
32 /* Check access restrictions for double word loads only. */
33 if (align_mask == 7)
34 {
35 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
36 frv_queue_data_access_error_interrupt (current_cpu, address);
37 }
38 return address;
39}
40
41static SI
42fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
43{
44 if (address & align_mask)
45 {
46 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
47 address &= ~align_mask;
48 }
49
50 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
51 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
52 frv_queue_data_access_error_interrupt (current_cpu, address);
53
54 return address;
55}
56
e930b1f5
DB
57static SI
58fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
59{
60 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
61 || (align_mask > 0x3
62 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
63 frv_queue_data_access_error_interrupt (current_cpu, address);
64
65 return address;
66}
67
b34f6357
DB
68static SI
69check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
70{
71 SIM_DESC sd = CPU_STATE (current_cpu);
72 switch (STATE_ARCHITECTURE (sd)->mach)
73 {
74 case bfd_mach_fr400:
676a64f4 75 case bfd_mach_fr450:
b34f6357
DB
76 address = fr400_check_data_read_address (current_cpu, address,
77 align_mask);
78 break;
79 case bfd_mach_frvtomcat:
80 case bfd_mach_fr500:
81 case bfd_mach_frv:
82 address = fr500_check_data_read_address (current_cpu, address,
83 align_mask);
84 break;
e930b1f5
DB
85 case bfd_mach_fr550:
86 address = fr550_check_data_read_address (current_cpu, address,
87 align_mask);
88 break;
b34f6357
DB
89 default:
90 break;
91 }
92
93 return address;
94}
95
96static SI
97fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
98{
99 if (address & align_mask)
100 {
101 /* Make sure that this exception is not masked. */
102 USI isr = GET_ISR ();
103 if (! GET_ISR_EMAM (isr))
104 {
105 /* Bad alignment causes a data_access_error on fr400. */
106 frv_queue_data_access_error_interrupt (current_cpu, address);
107 }
108 address &= ~align_mask;
109 }
110 /* Nothing to check. */
111 return address;
112}
113
114static SI
115fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
116{
117 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
118 || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
119 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
120 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
121 frv_queue_data_access_exception_interrupt (current_cpu);
122
123 return address;
124}
125
e930b1f5
DB
126static SI
127fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
128{
129 /* No alignment restrictions on fr550 */
130
131 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff
132 || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff)
133 frv_queue_data_access_exception_interrupt (current_cpu);
134 else
135 {
136 USI hsr0 = GET_HSR0 ();
137 if (! GET_HSR0_RME (hsr0)
138 && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff)
139 frv_queue_data_access_exception_interrupt (current_cpu);
140 }
141
142 return address;
143}
144
b34f6357
DB
145static SI
146check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
147{
148 SIM_DESC sd = CPU_STATE (current_cpu);
149 switch (STATE_ARCHITECTURE (sd)->mach)
150 {
151 case bfd_mach_fr400:
676a64f4 152 case bfd_mach_fr450:
b34f6357
DB
153 address = fr400_check_readwrite_address (current_cpu, address,
154 align_mask);
155 break;
156 case bfd_mach_frvtomcat:
157 case bfd_mach_fr500:
158 case bfd_mach_frv:
159 address = fr500_check_readwrite_address (current_cpu, address,
160 align_mask);
161 break;
e930b1f5
DB
162 case bfd_mach_fr550:
163 address = fr550_check_readwrite_address (current_cpu, address,
164 align_mask);
165 break;
b34f6357
DB
166 default:
167 break;
168 }
169
170 return address;
171}
172
173static PCADDR
174fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
175 int align_mask)
176{
177 if (address & align_mask)
178 {
179 frv_queue_instruction_access_error_interrupt (current_cpu);
180 address &= ~align_mask;
181 }
182 else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
183 frv_queue_instruction_access_error_interrupt (current_cpu);
184
185 return address;
186}
187
188static PCADDR
189fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
190 int align_mask)
191{
192 if (address & align_mask)
193 {
194 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
195 address &= ~align_mask;
196 }
197
198 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
199 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
200 frv_queue_instruction_access_error_interrupt (current_cpu);
201 else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
202 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
203 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
204 frv_queue_instruction_access_exception_interrupt (current_cpu);
205 else
206 {
207 USI hsr0 = GET_HSR0 ();
208 if (! GET_HSR0_RME (hsr0)
209 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
210 frv_queue_instruction_access_exception_interrupt (current_cpu);
211 }
212
213 return address;
214}
215
e930b1f5
DB
216static PCADDR
217fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
218 int align_mask)
219{
220 address &= ~align_mask;
221
222 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
223 frv_queue_instruction_access_error_interrupt (current_cpu);
224 else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
225 frv_queue_instruction_access_exception_interrupt (current_cpu);
226 else
227 {
228 USI hsr0 = GET_HSR0 ();
229 if (! GET_HSR0_RME (hsr0)
230 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
231 frv_queue_instruction_access_exception_interrupt (current_cpu);
232 }
233
234 return address;
235}
236
b34f6357
DB
237static PCADDR
238check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
239{
240 SIM_DESC sd = CPU_STATE (current_cpu);
241 switch (STATE_ARCHITECTURE (sd)->mach)
242 {
243 case bfd_mach_fr400:
676a64f4 244 case bfd_mach_fr450:
b34f6357
DB
245 address = fr400_check_insn_read_address (current_cpu, address,
246 align_mask);
247 break;
248 case bfd_mach_frvtomcat:
249 case bfd_mach_fr500:
250 case bfd_mach_frv:
251 address = fr500_check_insn_read_address (current_cpu, address,
252 align_mask);
253 break;
e930b1f5
DB
254 case bfd_mach_fr550:
255 address = fr550_check_insn_read_address (current_cpu, address,
256 align_mask);
257 break;
b34f6357
DB
258 default:
259 break;
260 }
261
262 return address;
263}
264
265/* Memory reads. */
266QI
267frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
268{
269 USI hsr0 = GET_HSR0 ();
270 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
271
272 /* Check for access exceptions. */
273 address = check_data_read_address (current_cpu, address, 0);
274 address = check_readwrite_address (current_cpu, address, 0);
275
276 /* If we need to count cycles, then the cache operation will be
277 initiated from the model profiling functions.
278 See frvbf_model_.... */
279 if (model_insn)
280 {
281 CPU_LOAD_ADDRESS (current_cpu) = address;
282 CPU_LOAD_LENGTH (current_cpu) = 1;
283 CPU_LOAD_SIGNED (current_cpu) = 1;
284 return 0xb7; /* any random value */
285 }
286
287 if (GET_HSR0_DCE (hsr0))
288 {
289 int cycles;
290 cycles = frv_cache_read (cache, 0, address);
291 if (cycles != 0)
292 return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
293 }
294
295 return GETMEMQI (current_cpu, pc, address);
296}
297
298UQI
299frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
300{
301 USI hsr0 = GET_HSR0 ();
302 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
303
304 /* Check for access exceptions. */
305 address = check_data_read_address (current_cpu, address, 0);
306 address = check_readwrite_address (current_cpu, address, 0);
307
308 /* If we need to count cycles, then the cache operation will be
309 initiated from the model profiling functions.
310 See frvbf_model_.... */
311 if (model_insn)
312 {
313 CPU_LOAD_ADDRESS (current_cpu) = address;
314 CPU_LOAD_LENGTH (current_cpu) = 1;
315 CPU_LOAD_SIGNED (current_cpu) = 0;
316 return 0xb7; /* any random value */
317 }
318
319 if (GET_HSR0_DCE (hsr0))
320 {
321 int cycles;
322 cycles = frv_cache_read (cache, 0, address);
323 if (cycles != 0)
324 return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
325 }
326
327 return GETMEMUQI (current_cpu, pc, address);
328}
329
e930b1f5
DB
330/* Read a HI which spans two cache lines */
331static HI
332read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
333{
334 HI value = frvbf_read_mem_QI (current_cpu, pc, address);
335 value <<= 8;
336 value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
337 return T2H_2 (value);
338}
339
b34f6357
DB
340HI
341frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
342{
343 USI hsr0;
344 FRV_CACHE *cache;
345
346 /* Check for access exceptions. */
347 address = check_data_read_address (current_cpu, address, 1);
348 address = check_readwrite_address (current_cpu, address, 1);
349
350 /* If we need to count cycles, then the cache operation will be
351 initiated from the model profiling functions.
352 See frvbf_model_.... */
353 hsr0 = GET_HSR0 ();
354 cache = CPU_DATA_CACHE (current_cpu);
355 if (model_insn)
356 {
357 CPU_LOAD_ADDRESS (current_cpu) = address;
358 CPU_LOAD_LENGTH (current_cpu) = 2;
359 CPU_LOAD_SIGNED (current_cpu) = 1;
360 return 0xb711; /* any random value */
361 }
362
363 if (GET_HSR0_DCE (hsr0))
364 {
365 int cycles;
e930b1f5
DB
366 /* Handle access which crosses cache line boundary */
367 SIM_DESC sd = CPU_STATE (current_cpu);
368 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
369 {
370 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
371 return read_mem_unaligned_HI (current_cpu, pc, address);
372 }
b34f6357
DB
373 cycles = frv_cache_read (cache, 0, address);
374 if (cycles != 0)
375 return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
376 }
377
378 return GETMEMHI (current_cpu, pc, address);
379}
380
381UHI
382frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
383{
384 USI hsr0;
385 FRV_CACHE *cache;
386
387 /* Check for access exceptions. */
388 address = check_data_read_address (current_cpu, address, 1);
389 address = check_readwrite_address (current_cpu, address, 1);
390
391 /* If we need to count cycles, then the cache operation will be
392 initiated from the model profiling functions.
393 See frvbf_model_.... */
394 hsr0 = GET_HSR0 ();
395 cache = CPU_DATA_CACHE (current_cpu);
396 if (model_insn)
397 {
398 CPU_LOAD_ADDRESS (current_cpu) = address;
399 CPU_LOAD_LENGTH (current_cpu) = 2;
400 CPU_LOAD_SIGNED (current_cpu) = 0;
401 return 0xb711; /* any random value */
402 }
403
404 if (GET_HSR0_DCE (hsr0))
405 {
406 int cycles;
e930b1f5
DB
407 /* Handle access which crosses cache line boundary */
408 SIM_DESC sd = CPU_STATE (current_cpu);
409 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
410 {
411 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
412 return read_mem_unaligned_HI (current_cpu, pc, address);
413 }
b34f6357
DB
414 cycles = frv_cache_read (cache, 0, address);
415 if (cycles != 0)
416 return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
417 }
418
419 return GETMEMUHI (current_cpu, pc, address);
420}
421
e930b1f5
DB
422/* Read a SI which spans two cache lines */
423static SI
424read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
425{
426 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
427 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
428 char valarray[4];
429 SI SIvalue;
430 HI HIvalue;
431
432 switch (hi_len)
433 {
434 case 1:
435 valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
436 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
437 SIvalue = H2T_4 (SIvalue);
438 memcpy (valarray + 1, (char*)&SIvalue, 3);
439 break;
440 case 2:
441 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
442 HIvalue = H2T_2 (HIvalue);
443 memcpy (valarray, (char*)&HIvalue, 2);
444 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
445 HIvalue = H2T_2 (HIvalue);
446 memcpy (valarray + 2, (char*)&HIvalue, 2);
447 break;
448 case 3:
449 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
450 SIvalue = H2T_4 (SIvalue);
451 memcpy (valarray, (char*)&SIvalue, 3);
452 valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
453 break;
454 default:
455 abort (); /* can't happen */
456 }
457 return T2H_4 (*(SI*)valarray);
458}
459
b34f6357
DB
460SI
461frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
462{
463 FRV_CACHE *cache;
464 USI hsr0;
465
466 /* Check for access exceptions. */
467 address = check_data_read_address (current_cpu, address, 3);
468 address = check_readwrite_address (current_cpu, address, 3);
469
470 hsr0 = GET_HSR0 ();
471 cache = CPU_DATA_CACHE (current_cpu);
472 /* If we need to count cycles, then the cache operation will be
473 initiated from the model profiling functions.
474 See frvbf_model_.... */
475 if (model_insn)
476 {
477 CPU_LOAD_ADDRESS (current_cpu) = address;
478 CPU_LOAD_LENGTH (current_cpu) = 4;
479 return 0x37111319; /* any random value */
480 }
481
482 if (GET_HSR0_DCE (hsr0))
483 {
484 int cycles;
e930b1f5
DB
485 /* Handle access which crosses cache line boundary */
486 SIM_DESC sd = CPU_STATE (current_cpu);
487 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
488 {
489 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
490 return read_mem_unaligned_SI (current_cpu, pc, address);
491 }
b34f6357
DB
492 cycles = frv_cache_read (cache, 0, address);
493 if (cycles != 0)
494 return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
495 }
496
497 return GETMEMSI (current_cpu, pc, address);
498}
499
500SI
501frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
502{
503 return frvbf_read_mem_SI (current_cpu, pc, address);
504}
505
e930b1f5
DB
506/* Read a SI which spans two cache lines */
507static DI
508read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
509{
510 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
511 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
512 DI value, value1;
513
514 switch (hi_len)
515 {
516 case 1:
517 value = frvbf_read_mem_QI (current_cpu, pc, address);
518 value <<= 56;
519 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
520 value1 = H2T_8 (value1);
521 value |= value1 & ((DI)0x00ffffff << 32);
522 value |= value1 & 0xffffffffu;
523 break;
524 case 2:
525 value = frvbf_read_mem_HI (current_cpu, pc, address);
526 value = H2T_2 (value);
527 value <<= 48;
528 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
529 value1 = H2T_8 (value1);
530 value |= value1 & ((DI)0x0000ffff << 32);
531 value |= value1 & 0xffffffffu;
532 break;
533 case 3:
534 value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
535 value = H2T_4 (value);
536 value <<= 40;
537 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
538 value1 = H2T_8 (value1);
539 value |= value1 & ((DI)0x000000ff << 32);
540 value |= value1 & 0xffffffffu;
541 break;
542 case 4:
543 value = frvbf_read_mem_SI (current_cpu, pc, address);
544 value = H2T_4 (value);
545 value <<= 32;
546 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
547 value1 = H2T_4 (value1);
548 value |= value1 & 0xffffffffu;
549 break;
550 case 5:
551 value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
552 value = H2T_8 (value);
553 value <<= 24;
554 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
555 value1 = H2T_4 (value1);
556 value |= value1 & 0x00ffffff;
557 break;
558 case 6:
559 value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
560 value = H2T_8 (value);
561 value <<= 16;
562 value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
563 value1 = H2T_2 (value1);
564 value |= value1 & 0x0000ffff;
565 break;
566 case 7:
567 value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
568 value = H2T_8 (value);
569 value <<= 8;
570 value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
571 value |= value1 & 0x000000ff;
572 break;
573 default:
574 abort (); /* can't happen */
575 }
576 return T2H_8 (value);
577}
578
b34f6357
DB
579DI
580frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
581{
582 USI hsr0;
583 FRV_CACHE *cache;
584
585 /* Check for access exceptions. */
586 address = check_data_read_address (current_cpu, address, 7);
587 address = check_readwrite_address (current_cpu, address, 7);
588
589 /* If we need to count cycles, then the cache operation will be
590 initiated from the model profiling functions.
591 See frvbf_model_.... */
592 hsr0 = GET_HSR0 ();
593 cache = CPU_DATA_CACHE (current_cpu);
594 if (model_insn)
595 {
596 CPU_LOAD_ADDRESS (current_cpu) = address;
597 CPU_LOAD_LENGTH (current_cpu) = 8;
598 return 0x37111319; /* any random value */
599 }
600
601 if (GET_HSR0_DCE (hsr0))
602 {
603 int cycles;
e930b1f5
DB
604 /* Handle access which crosses cache line boundary */
605 SIM_DESC sd = CPU_STATE (current_cpu);
606 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
607 {
608 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
609 return read_mem_unaligned_DI (current_cpu, pc, address);
610 }
b34f6357
DB
611 cycles = frv_cache_read (cache, 0, address);
612 if (cycles != 0)
613 return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
614 }
615
616 return GETMEMDI (current_cpu, pc, address);
617}
618
619DF
620frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
621{
622 USI hsr0;
623 FRV_CACHE *cache;
624
625 /* Check for access exceptions. */
626 address = check_data_read_address (current_cpu, address, 7);
627 address = check_readwrite_address (current_cpu, address, 7);
628
629 /* If we need to count cycles, then the cache operation will be
630 initiated from the model profiling functions.
631 See frvbf_model_.... */
632 hsr0 = GET_HSR0 ();
633 cache = CPU_DATA_CACHE (current_cpu);
634 if (model_insn)
635 {
636 CPU_LOAD_ADDRESS (current_cpu) = address;
637 CPU_LOAD_LENGTH (current_cpu) = 8;
638 return 0x37111319; /* any random value */
639 }
640
641 if (GET_HSR0_DCE (hsr0))
642 {
643 int cycles;
e930b1f5
DB
644 /* Handle access which crosses cache line boundary */
645 SIM_DESC sd = CPU_STATE (current_cpu);
646 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
647 {
648 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
649 return read_mem_unaligned_DI (current_cpu, pc, address);
650 }
b34f6357
DB
651 cycles = frv_cache_read (cache, 0, address);
652 if (cycles != 0)
653 return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
654 }
655
656 return GETMEMDF (current_cpu, pc, address);
657}
658
659USI
660frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
661{
662 USI hsr0;
663 vpc = check_insn_read_address (current_cpu, vpc, 3);
664
665 hsr0 = GET_HSR0 ();
666 if (GET_HSR0_ICE (hsr0))
667 {
668 FRV_CACHE *cache;
669 USI value;
670
671 /* We don't want this to show up in the cache statistics. That read
672 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
673 passively here. */
674 cache = CPU_INSN_CACHE (current_cpu);
675 if (frv_cache_read_passive_SI (cache, vpc, &value))
676 return value;
677 }
678 return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
679}
680
681static SI
682fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
683{
b34f6357
DB
684 if (align_mask == 7
685 && address >= 0xfe800000 && address <= 0xfeffffff)
686 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
687
688 return address;
689}
690
691static SI
692fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
693{
694 if (address & align_mask)
695 {
696 struct frv_interrupt_queue_element *item =
697 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
698 /* Record the correct vliw slot with the interrupt. */
699 if (item != NULL)
700 item->slot = frv_interrupt_state.slot;
701 address &= ~align_mask;
702 }
703 if (address >= 0xfeff0600 && address <= 0xfeff7fff
704 || address >= 0xfe800000 && address <= 0xfefeffff)
705 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
706
707 return address;
708}
709
e930b1f5
DB
710static SI
711fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
712{
713 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
714 || (align_mask > 0x3
715 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
716 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
717
718 return address;
719}
720
b34f6357
DB
721static SI
722check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
723{
724 SIM_DESC sd = CPU_STATE (current_cpu);
725 switch (STATE_ARCHITECTURE (sd)->mach)
726 {
727 case bfd_mach_fr400:
676a64f4 728 case bfd_mach_fr450:
b34f6357
DB
729 address = fr400_check_write_address (current_cpu, address, align_mask);
730 break;
731 case bfd_mach_frvtomcat:
732 case bfd_mach_fr500:
733 case bfd_mach_frv:
734 address = fr500_check_write_address (current_cpu, address, align_mask);
735 break;
e930b1f5
DB
736 case bfd_mach_fr550:
737 address = fr550_check_write_address (current_cpu, address, align_mask);
738 break;
b34f6357
DB
739 default:
740 break;
741 }
742 return address;
743}
744
745void
746frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
747{
748 USI hsr0;
749 hsr0 = GET_HSR0 ();
750 if (GET_HSR0_DCE (hsr0))
751 sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
752 else
753 sim_queue_mem_qi_write (current_cpu, address, value);
754 frv_set_write_queue_slot (current_cpu);
755}
756
757void
758frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
759{
760 frvbf_write_mem_QI (current_cpu, pc, address, value);
761}
762
763void
764frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
765{
766 USI hsr0;
767 hsr0 = GET_HSR0 ();
768 if (GET_HSR0_DCE (hsr0))
769 sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
770 else
771 sim_queue_mem_hi_write (current_cpu, address, value);
772 frv_set_write_queue_slot (current_cpu);
773}
774
775void
776frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
777{
778 frvbf_write_mem_HI (current_cpu, pc, address, value);
779}
780
781void
782frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
783{
784 USI hsr0;
785 hsr0 = GET_HSR0 ();
786 if (GET_HSR0_DCE (hsr0))
787 sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
788 else
789 sim_queue_mem_si_write (current_cpu, address, value);
790 frv_set_write_queue_slot (current_cpu);
791}
792
793void
794frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
795{
796 frvbf_write_mem_SI (current_cpu, pc, address, value);
797}
798
799void
800frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
801{
802 USI hsr0;
803 hsr0 = GET_HSR0 ();
804 if (GET_HSR0_DCE (hsr0))
805 sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
806 else
807 sim_queue_mem_di_write (current_cpu, address, value);
808 frv_set_write_queue_slot (current_cpu);
809}
810
811void
812frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
813{
814 USI hsr0;
815 hsr0 = GET_HSR0 ();
816 if (GET_HSR0_DCE (hsr0))
817 sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
818 else
819 sim_queue_mem_df_write (current_cpu, address, value);
820 frv_set_write_queue_slot (current_cpu);
821}
822
823/* Memory writes. These do the actual writing through the cache. */
824void
825frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
826{
827 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
828
829 /* Check for access errors. */
830 address = check_write_address (current_cpu, address, 0);
831 address = check_readwrite_address (current_cpu, address, 0);
832
833 /* If we need to count cycles, then submit the write request to the cache
834 and let it prioritize the request. Otherwise perform the write now. */
835 if (model_insn)
836 {
837 int slot = UNIT_I0;
838 frv_cache_request_store (cache, address, slot, (char *)&value,
839 sizeof (value));
840 }
841 else
842 frv_cache_write (cache, address, (char *)&value, sizeof (value));
843}
844
e930b1f5
DB
845/* Write a HI which spans two cache lines */
846static void
847mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
848{
849 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
850 /* value is already in target byte order */
851 frv_cache_write (cache, address, (char *)&value, 1);
852 frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
853}
854
b34f6357
DB
855void
856frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
857{
858 FRV_CACHE *cache;
859
860 /* Check for access errors. */
861 address = check_write_address (current_cpu, address, 1);
862 address = check_readwrite_address (current_cpu, address, 1);
863
864 /* If we need to count cycles, then submit the write request to the cache
865 and let it prioritize the request. Otherwise perform the write now. */
866 value = H2T_2 (value);
867 cache = CPU_DATA_CACHE (current_cpu);
868 if (model_insn)
869 {
870 int slot = UNIT_I0;
871 frv_cache_request_store (cache, address, slot,
872 (char *)&value, sizeof (value));
873 }
874 else
e930b1f5
DB
875 {
876 /* Handle access which crosses cache line boundary */
877 SIM_DESC sd = CPU_STATE (current_cpu);
878 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
879 {
880 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
881 {
882 mem_set_unaligned_HI (current_cpu, pc, address, value);
883 return;
884 }
885 }
886 frv_cache_write (cache, address, (char *)&value, sizeof (value));
887 }
888}
889
890/* Write a SI which spans two cache lines */
891static void
892mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
893{
894 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
895 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
896 /* value is already in target byte order */
897 frv_cache_write (cache, address, (char *)&value, hi_len);
898 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
b34f6357
DB
899}
900
901void
902frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
903{
904 FRV_CACHE *cache;
905
906 /* Check for access errors. */
907 address = check_write_address (current_cpu, address, 3);
908 address = check_readwrite_address (current_cpu, address, 3);
909
910 /* If we need to count cycles, then submit the write request to the cache
911 and let it prioritize the request. Otherwise perform the write now. */
912 cache = CPU_DATA_CACHE (current_cpu);
913 value = H2T_4 (value);
914 if (model_insn)
915 {
916 int slot = UNIT_I0;
917 frv_cache_request_store (cache, address, slot,
918 (char *)&value, sizeof (value));
919 }
920 else
e930b1f5
DB
921 {
922 /* Handle access which crosses cache line boundary */
923 SIM_DESC sd = CPU_STATE (current_cpu);
924 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
925 {
926 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
927 {
928 mem_set_unaligned_SI (current_cpu, pc, address, value);
929 return;
930 }
931 }
932 frv_cache_write (cache, address, (char *)&value, sizeof (value));
933 }
934}
935
936/* Write a DI which spans two cache lines */
937static void
938mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
939{
940 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
941 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
942 /* value is already in target byte order */
943 frv_cache_write (cache, address, (char *)&value, hi_len);
944 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
b34f6357
DB
945}
946
947void
948frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
949{
950 FRV_CACHE *cache;
951
952 /* Check for access errors. */
953 address = check_write_address (current_cpu, address, 7);
954 address = check_readwrite_address (current_cpu, address, 7);
955
956 /* If we need to count cycles, then submit the write request to the cache
957 and let it prioritize the request. Otherwise perform the write now. */
958 value = H2T_8 (value);
959 cache = CPU_DATA_CACHE (current_cpu);
960 if (model_insn)
961 {
962 int slot = UNIT_I0;
963 frv_cache_request_store (cache, address, slot,
964 (char *)&value, sizeof (value));
965 }
966 else
e930b1f5
DB
967 {
968 /* Handle access which crosses cache line boundary */
969 SIM_DESC sd = CPU_STATE (current_cpu);
970 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
971 {
972 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
973 {
974 mem_set_unaligned_DI (current_cpu, pc, address, value);
975 return;
976 }
977 }
978 frv_cache_write (cache, address, (char *)&value, sizeof (value));
979 }
b34f6357
DB
980}
981
982void
983frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
984{
985 FRV_CACHE *cache;
986
987 /* Check for access errors. */
988 address = check_write_address (current_cpu, address, 7);
989 address = check_readwrite_address (current_cpu, address, 7);
990
991 /* If we need to count cycles, then submit the write request to the cache
992 and let it prioritize the request. Otherwise perform the write now. */
993 value = H2T_8 (value);
994 cache = CPU_DATA_CACHE (current_cpu);
995 if (model_insn)
996 {
997 int slot = UNIT_I0;
998 frv_cache_request_store (cache, address, slot,
999 (char *)&value, sizeof (value));
1000 }
1001 else
e930b1f5
DB
1002 {
1003 /* Handle access which crosses cache line boundary */
1004 SIM_DESC sd = CPU_STATE (current_cpu);
1005 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1006 {
1007 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1008 {
1009 mem_set_unaligned_DI (current_cpu, pc, address, value);
1010 return;
1011 }
1012 }
1013 frv_cache_write (cache, address, (char *)&value, sizeof (value));
1014 }
b34f6357
DB
1015}
1016
1017void
1018frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1019{
1020 int i;
1021 FRV_CACHE *cache;
1022
1023 /* Check for access errors. */
1024 address = check_write_address (current_cpu, address, 0xf);
1025 address = check_readwrite_address (current_cpu, address, 0xf);
1026
1027 /* TODO -- reverse word order as well? */
1028 for (i = 0; i < 4; ++i)
1029 value[i] = H2T_4 (value[i]);
1030
1031 /* If we need to count cycles, then submit the write request to the cache
1032 and let it prioritize the request. Otherwise perform the write now. */
1033 cache = CPU_DATA_CACHE (current_cpu);
1034 if (model_insn)
1035 {
1036 int slot = UNIT_I0;
1037 frv_cache_request_store (cache, address, slot, (char*)value, 16);
1038 }
1039 else
1040 frv_cache_write (cache, address, (char*)value, 16);
1041}
1042
1043/* Record the current VLIW slot on the element at the top of the write queue.
1044*/
1045void
1046frv_set_write_queue_slot (SIM_CPU *current_cpu)
1047{
1048 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1049 int slot = vliw->next_slot - 1;
1050 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1051 int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1052 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1053 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
1054}