1 /* frv simulator support code
2 Copyright (C) 1998-2021 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* This must come before any other includes. */
24 #define WANT_CPU_FRVBF
29 #include "cgen-engine.h"
32 #include "gdb/sim-frv.h"
36 /* Maintain a flag in order to know when to write the address of the next
37 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
39 int frvbf_write_next_vliw_addr_to_LR
;
41 /* The contents of BUF are in target byte order. */
43 frvbf_fetch_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
45 if (SIM_FRV_GR0_REGNUM
<= rn
&& rn
<= SIM_FRV_GR63_REGNUM
)
47 int hi_available
, lo_available
;
48 int grn
= rn
- SIM_FRV_GR0_REGNUM
;
50 frv_gr_registers_available (current_cpu
, &hi_available
, &lo_available
);
52 if ((grn
< 32 && !lo_available
) || (grn
>= 32 && !hi_available
))
55 SETTSI (buf
, GET_H_GR (grn
));
57 else if (SIM_FRV_FR0_REGNUM
<= rn
&& rn
<= SIM_FRV_FR63_REGNUM
)
59 int hi_available
, lo_available
;
60 int frn
= rn
- SIM_FRV_FR0_REGNUM
;
62 frv_fr_registers_available (current_cpu
, &hi_available
, &lo_available
);
64 if ((frn
< 32 && !lo_available
) || (frn
>= 32 && !hi_available
))
67 SETTSI (buf
, GET_H_FR (frn
));
69 else if (rn
== SIM_FRV_PC_REGNUM
)
70 SETTSI (buf
, GET_H_PC ());
71 else if (SIM_FRV_SPR0_REGNUM
<= rn
&& rn
<= SIM_FRV_SPR4095_REGNUM
)
73 /* Make sure the register is implemented. */
74 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
75 int spr
= rn
- SIM_FRV_SPR0_REGNUM
;
76 if (! control
->spr
[spr
].implemented
)
78 SETTSI (buf
, GET_H_SPR (spr
));
82 SETTSI (buf
, 0xdeadbeef);
89 /* The contents of BUF are in target byte order. */
92 frvbf_store_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
94 if (SIM_FRV_GR0_REGNUM
<= rn
&& rn
<= SIM_FRV_GR63_REGNUM
)
96 int hi_available
, lo_available
;
97 int grn
= rn
- SIM_FRV_GR0_REGNUM
;
99 frv_gr_registers_available (current_cpu
, &hi_available
, &lo_available
);
101 if ((grn
< 32 && !lo_available
) || (grn
>= 32 && !hi_available
))
104 SET_H_GR (grn
, GETTSI (buf
));
106 else if (SIM_FRV_FR0_REGNUM
<= rn
&& rn
<= SIM_FRV_FR63_REGNUM
)
108 int hi_available
, lo_available
;
109 int frn
= rn
- SIM_FRV_FR0_REGNUM
;
111 frv_fr_registers_available (current_cpu
, &hi_available
, &lo_available
);
113 if ((frn
< 32 && !lo_available
) || (frn
>= 32 && !hi_available
))
116 SET_H_FR (frn
, GETTSI (buf
));
118 else if (rn
== SIM_FRV_PC_REGNUM
)
119 SET_H_PC (GETTSI (buf
));
120 else if (SIM_FRV_SPR0_REGNUM
<= rn
&& rn
<= SIM_FRV_SPR4095_REGNUM
)
122 /* Make sure the register is implemented. */
123 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
124 int spr
= rn
- SIM_FRV_SPR0_REGNUM
;
125 if (! control
->spr
[spr
].implemented
)
127 SET_H_SPR (spr
, GETTSI (buf
));
135 /* Cover fns to access the general registers. */
137 frvbf_h_gr_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
139 frv_check_gr_access (current_cpu
, gr
);
140 return CPU (h_gr
[gr
]);
144 frvbf_h_gr_set_handler (SIM_CPU
*current_cpu
, UINT gr
, USI newval
)
146 frv_check_gr_access (current_cpu
, gr
);
149 return; /* Storing into gr0 has no effect. */
151 CPU (h_gr
[gr
]) = newval
;
154 /* Cover fns to access the floating point registers. */
156 frvbf_h_fr_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
158 frv_check_fr_access (current_cpu
, fr
);
159 return CPU (h_fr
[fr
]);
163 frvbf_h_fr_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SF newval
)
165 frv_check_fr_access (current_cpu
, fr
);
166 CPU (h_fr
[fr
]) = newval
;
169 /* Cover fns to access the general registers as double words. */
171 check_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
173 if (reg
& align_mask
)
175 SIM_DESC sd
= CPU_STATE (current_cpu
);
176 switch (STATE_ARCHITECTURE (sd
)->mach
)
178 /* Note: there is a discrepancy between V2.2 of the FR400
179 instruction manual and the various FR4xx LSI specs.
180 The former claims that unaligned registers cause a
181 register_exception while the latter say it's an
182 illegal_instruction. The LSI specs appear to be
183 correct; in fact, the FR4xx series is not documented
184 as having a register_exception. */
188 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
190 case bfd_mach_frvtomcat
:
193 frv_queue_register_exception_interrupt (current_cpu
,
207 check_fr_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
209 if (reg
& align_mask
)
211 SIM_DESC sd
= CPU_STATE (current_cpu
);
212 switch (STATE_ARCHITECTURE (sd
)->mach
)
214 /* See comment in check_register_alignment(). */
218 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
220 case bfd_mach_frvtomcat
:
224 struct frv_fp_exception_info fp_info
= {
225 FSR_NO_EXCEPTION
, FTT_INVALID_FR
227 frv_queue_fp_exception_interrupt (current_cpu
, & fp_info
);
241 check_memory_alignment (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
243 if (address
& align_mask
)
245 SIM_DESC sd
= CPU_STATE (current_cpu
);
246 switch (STATE_ARCHITECTURE (sd
)->mach
)
248 /* See comment in check_register_alignment(). */
251 frv_queue_data_access_error_interrupt (current_cpu
, address
);
253 case bfd_mach_frvtomcat
:
256 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
262 address
&= ~align_mask
;
269 frvbf_h_gr_double_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
274 return 0; /* gr0 is always 0. */
276 /* Check the register alignment. */
277 gr
= check_register_alignment (current_cpu
, gr
, 1);
279 value
= GET_H_GR (gr
);
281 value
|= (USI
) GET_H_GR (gr
+ 1);
286 frvbf_h_gr_double_set_handler (SIM_CPU
*current_cpu
, UINT gr
, DI newval
)
289 return; /* Storing into gr0 has no effect. */
291 /* Check the register alignment. */
292 gr
= check_register_alignment (current_cpu
, gr
, 1);
294 SET_H_GR (gr
, (newval
>> 32) & 0xffffffff);
295 SET_H_GR (gr
+ 1, (newval
) & 0xffffffff);
298 /* Cover fns to access the floating point register as double words. */
300 frvbf_h_fr_double_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
307 /* Check the register alignment. */
308 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
310 if (HOST_BYTE_ORDER
== BFD_ENDIAN_LITTLE
)
312 value
.as_sf
[1] = GET_H_FR (fr
);
313 value
.as_sf
[0] = GET_H_FR (fr
+ 1);
317 value
.as_sf
[0] = GET_H_FR (fr
);
318 value
.as_sf
[1] = GET_H_FR (fr
+ 1);
325 frvbf_h_fr_double_set_handler (SIM_CPU
*current_cpu
, UINT fr
, DF newval
)
332 /* Check the register alignment. */
333 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
335 value
.as_df
= newval
;
336 if (HOST_BYTE_ORDER
== BFD_ENDIAN_LITTLE
)
338 SET_H_FR (fr
, value
.as_sf
[1]);
339 SET_H_FR (fr
+ 1, value
.as_sf
[0]);
343 SET_H_FR (fr
, value
.as_sf
[0]);
344 SET_H_FR (fr
+ 1, value
.as_sf
[1]);
348 /* Cover fns to access the floating point register as integer words. */
350 frvbf_h_fr_int_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
357 value
.as_sf
= GET_H_FR (fr
);
362 frvbf_h_fr_int_set_handler (SIM_CPU
*current_cpu
, UINT fr
, USI newval
)
369 value
.as_usi
= newval
;
370 SET_H_FR (fr
, value
.as_sf
);
373 /* Cover fns to access the coprocessor registers as double words. */
375 frvbf_h_cpr_double_get_handler (SIM_CPU
*current_cpu
, UINT cpr
)
379 /* Check the register alignment. */
380 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
382 value
= GET_H_CPR (cpr
);
384 value
|= (USI
) GET_H_CPR (cpr
+ 1);
389 frvbf_h_cpr_double_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, DI newval
)
391 /* Check the register alignment. */
392 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
394 SET_H_CPR (cpr
, (newval
>> 32) & 0xffffffff);
395 SET_H_CPR (cpr
+ 1, (newval
) & 0xffffffff);
398 /* Cover fns to write registers as quad words. */
400 frvbf_h_gr_quad_set_handler (SIM_CPU
*current_cpu
, UINT gr
, SI
*newval
)
403 return; /* Storing into gr0 has no effect. */
405 /* Check the register alignment. */
406 gr
= check_register_alignment (current_cpu
, gr
, 3);
408 SET_H_GR (gr
, newval
[0]);
409 SET_H_GR (gr
+ 1, newval
[1]);
410 SET_H_GR (gr
+ 2, newval
[2]);
411 SET_H_GR (gr
+ 3, newval
[3]);
415 frvbf_h_fr_quad_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SI
*newval
)
417 /* Check the register alignment. */
418 fr
= check_fr_register_alignment (current_cpu
, fr
, 3);
420 SET_H_FR (fr
, newval
[0]);
421 SET_H_FR (fr
+ 1, newval
[1]);
422 SET_H_FR (fr
+ 2, newval
[2]);
423 SET_H_FR (fr
+ 3, newval
[3]);
427 frvbf_h_cpr_quad_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, SI
*newval
)
429 /* Check the register alignment. */
430 cpr
= check_register_alignment (current_cpu
, cpr
, 3);
432 SET_H_CPR (cpr
, newval
[0]);
433 SET_H_CPR (cpr
+ 1, newval
[1]);
434 SET_H_CPR (cpr
+ 2, newval
[2]);
435 SET_H_CPR (cpr
+ 3, newval
[3]);
438 /* Cover fns to access the special purpose registers. */
440 frvbf_h_spr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
442 /* Check access restrictions. */
443 frv_check_spr_read_access (current_cpu
, spr
);
448 return spr_psr_get_handler (current_cpu
);
450 return spr_tbr_get_handler (current_cpu
);
452 return spr_bpsr_get_handler (current_cpu
);
454 return spr_ccr_get_handler (current_cpu
);
456 return spr_cccr_get_handler (current_cpu
);
461 return spr_sr_get_handler (current_cpu
, spr
);
464 return CPU (h_spr
[spr
]);
470 frvbf_h_spr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
472 FRV_REGISTER_CONTROL
*control
;
476 /* Check access restrictions. */
477 frv_check_spr_write_access (current_cpu
, spr
);
479 /* Only set those fields which are writeable. */
480 control
= CPU_REGISTER_CONTROL (current_cpu
);
481 mask
= control
->spr
[spr
].read_only_mask
;
482 oldval
= GET_H_SPR (spr
);
484 newval
= (newval
& ~mask
) | (oldval
& mask
);
486 /* Some registers are represented by individual components which are
487 referenced more often than the register itself. */
491 spr_psr_set_handler (current_cpu
, newval
);
494 spr_tbr_set_handler (current_cpu
, newval
);
497 spr_bpsr_set_handler (current_cpu
, newval
);
500 spr_ccr_set_handler (current_cpu
, newval
);
503 spr_cccr_set_handler (current_cpu
, newval
);
509 spr_sr_set_handler (current_cpu
, spr
, newval
);
512 frv_cache_reconfigure (current_cpu
, CPU_INSN_CACHE (current_cpu
));
515 CPU (h_spr
[spr
]) = newval
;
520 /* Cover fns to access the gr_hi and gr_lo registers. */
522 frvbf_h_gr_hi_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
524 return (GET_H_GR(gr
) >> 16) & 0xffff;
528 frvbf_h_gr_hi_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
530 USI value
= (GET_H_GR (gr
) & 0xffff) | (newval
<< 16);
531 SET_H_GR (gr
, value
);
535 frvbf_h_gr_lo_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
537 return GET_H_GR(gr
) & 0xffff;
541 frvbf_h_gr_lo_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
543 USI value
= (GET_H_GR (gr
) & 0xffff0000) | (newval
& 0xffff);
544 SET_H_GR (gr
, value
);
547 /* Cover fns to access the tbr bits. */
549 spr_tbr_get_handler (SIM_CPU
*current_cpu
)
551 int tbr
= ((GET_H_TBR_TBA () & 0xfffff) << 12) |
552 ((GET_H_TBR_TT () & 0xff) << 4);
558 spr_tbr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
562 SET_H_TBR_TBA ((tbr
>> 12) & 0xfffff) ;
563 SET_H_TBR_TT ((tbr
>> 4) & 0xff) ;
566 /* Cover fns to access the bpsr bits. */
568 spr_bpsr_get_handler (SIM_CPU
*current_cpu
)
570 int bpsr
= ((GET_H_BPSR_BS () & 0x1) << 12) |
571 ((GET_H_BPSR_BET () & 0x1) );
577 spr_bpsr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
581 SET_H_BPSR_BS ((bpsr
>> 12) & 1);
582 SET_H_BPSR_BET ((bpsr
) & 1);
585 /* Cover fns to access the psr bits. */
587 spr_psr_get_handler (SIM_CPU
*current_cpu
)
589 int psr
= ((GET_H_PSR_IMPLE () & 0xf) << 28) |
590 ((GET_H_PSR_VER () & 0xf) << 24) |
591 ((GET_H_PSR_ICE () & 0x1) << 16) |
592 ((GET_H_PSR_NEM () & 0x1) << 14) |
593 ((GET_H_PSR_CM () & 0x1) << 13) |
594 ((GET_H_PSR_BE () & 0x1) << 12) |
595 ((GET_H_PSR_ESR () & 0x1) << 11) |
596 ((GET_H_PSR_EF () & 0x1) << 8) |
597 ((GET_H_PSR_EM () & 0x1) << 7) |
598 ((GET_H_PSR_PIL () & 0xf) << 3) |
599 ((GET_H_PSR_S () & 0x1) << 2) |
600 ((GET_H_PSR_PS () & 0x1) << 1) |
601 ((GET_H_PSR_ET () & 0x1) );
607 spr_psr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
609 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
611 SET_H_PSR_S ((newval
>> 2) & 1);
613 SET_H_PSR_IMPLE ((newval
>> 28) & 0xf);
614 SET_H_PSR_VER ((newval
>> 24) & 0xf);
615 SET_H_PSR_ICE ((newval
>> 16) & 1);
616 SET_H_PSR_NEM ((newval
>> 14) & 1);
617 SET_H_PSR_CM ((newval
>> 13) & 1);
618 SET_H_PSR_BE ((newval
>> 12) & 1);
619 SET_H_PSR_ESR ((newval
>> 11) & 1);
620 SET_H_PSR_EF ((newval
>> 8) & 1);
621 SET_H_PSR_EM ((newval
>> 7) & 1);
622 SET_H_PSR_PIL ((newval
>> 3) & 0xf);
623 SET_H_PSR_PS ((newval
>> 1) & 1);
624 SET_H_PSR_ET ((newval
) & 1);
628 frvbf_h_psr_s_set_handler (SIM_CPU
*current_cpu
, BI newval
)
630 /* If switching from user to supervisor mode, or vice-versa, then switch
631 the supervisor/user context. */
632 int psr_s
= GET_H_PSR_S ();
633 if (psr_s
!= (newval
& 1))
635 frvbf_switch_supervisor_user_context (current_cpu
);
636 CPU (h_psr_s
) = newval
& 1;
640 /* Cover fns to access the ccr bits. */
642 spr_ccr_get_handler (SIM_CPU
*current_cpu
)
644 int ccr
= ((GET_H_ICCR (H_ICCR_ICC3
) & 0xf) << 28) |
645 ((GET_H_ICCR (H_ICCR_ICC2
) & 0xf) << 24) |
646 ((GET_H_ICCR (H_ICCR_ICC1
) & 0xf) << 20) |
647 ((GET_H_ICCR (H_ICCR_ICC0
) & 0xf) << 16) |
648 ((GET_H_FCCR (H_FCCR_FCC3
) & 0xf) << 12) |
649 ((GET_H_FCCR (H_FCCR_FCC2
) & 0xf) << 8) |
650 ((GET_H_FCCR (H_FCCR_FCC1
) & 0xf) << 4) |
651 ((GET_H_FCCR (H_FCCR_FCC0
) & 0xf) );
657 spr_ccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
661 SET_H_ICCR (H_ICCR_ICC3
, (newval
>> 28) & 0xf);
662 SET_H_ICCR (H_ICCR_ICC2
, (newval
>> 24) & 0xf);
663 SET_H_ICCR (H_ICCR_ICC1
, (newval
>> 20) & 0xf);
664 SET_H_ICCR (H_ICCR_ICC0
, (newval
>> 16) & 0xf);
665 SET_H_FCCR (H_FCCR_FCC3
, (newval
>> 12) & 0xf);
666 SET_H_FCCR (H_FCCR_FCC2
, (newval
>> 8) & 0xf);
667 SET_H_FCCR (H_FCCR_FCC1
, (newval
>> 4) & 0xf);
668 SET_H_FCCR (H_FCCR_FCC0
, (newval
) & 0xf);
672 frvbf_set_icc_for_shift_right (
673 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
676 /* Set the C flag of the given icc to the logical OR of the bits shifted
678 int mask
= (1 << shift
) - 1;
679 if ((value
& mask
) != 0)
686 frvbf_set_icc_for_shift_left (
687 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
690 /* Set the V flag of the given icc to the logical OR of the bits shifted
692 int mask
= ((1 << shift
) - 1) << (32 - shift
);
693 if ((value
& mask
) != 0)
699 /* Cover fns to access the cccr bits. */
701 spr_cccr_get_handler (SIM_CPU
*current_cpu
)
703 int cccr
= ((GET_H_CCCR (H_CCCR_CC7
) & 0x3) << 14) |
704 ((GET_H_CCCR (H_CCCR_CC6
) & 0x3) << 12) |
705 ((GET_H_CCCR (H_CCCR_CC5
) & 0x3) << 10) |
706 ((GET_H_CCCR (H_CCCR_CC4
) & 0x3) << 8) |
707 ((GET_H_CCCR (H_CCCR_CC3
) & 0x3) << 6) |
708 ((GET_H_CCCR (H_CCCR_CC2
) & 0x3) << 4) |
709 ((GET_H_CCCR (H_CCCR_CC1
) & 0x3) << 2) |
710 ((GET_H_CCCR (H_CCCR_CC0
) & 0x3) );
716 spr_cccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
720 SET_H_CCCR (H_CCCR_CC7
, (newval
>> 14) & 0x3);
721 SET_H_CCCR (H_CCCR_CC6
, (newval
>> 12) & 0x3);
722 SET_H_CCCR (H_CCCR_CC5
, (newval
>> 10) & 0x3);
723 SET_H_CCCR (H_CCCR_CC4
, (newval
>> 8) & 0x3);
724 SET_H_CCCR (H_CCCR_CC3
, (newval
>> 6) & 0x3);
725 SET_H_CCCR (H_CCCR_CC2
, (newval
>> 4) & 0x3);
726 SET_H_CCCR (H_CCCR_CC1
, (newval
>> 2) & 0x3);
727 SET_H_CCCR (H_CCCR_CC0
, (newval
) & 0x3);
730 /* Cover fns to access the sr bits. */
732 spr_sr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
734 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
735 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
736 int psr_esr
= GET_H_PSR_ESR ();
738 return GET_H_GR (4 + (spr
- H_SPR_SR0
));
740 return CPU (h_spr
[spr
]);
744 spr_sr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
746 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
747 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
748 int psr_esr
= GET_H_PSR_ESR ();
750 SET_H_GR (4 + (spr
- H_SPR_SR0
), newval
);
752 CPU (h_spr
[spr
]) = newval
;
755 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
757 frvbf_switch_supervisor_user_context (SIM_CPU
*current_cpu
)
759 if (GET_H_PSR_ESR ())
761 /* We need to be in supervisor mode to swap the registers. Access the
762 PSR.S directly in order to avoid recursive context switches. */
764 int save_psr_s
= CPU (h_psr_s
);
766 for (i
= 0; i
< 4; ++i
)
769 int spr
= i
+ H_SPR_SR0
;
770 SI tmp
= GET_H_SPR (spr
);
771 SET_H_SPR (spr
, GET_H_GR (gr
));
774 CPU (h_psr_s
) = save_psr_s
;
778 /* Handle load/store of quad registers. */
780 frvbf_load_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
785 /* Check memory alignment */
786 address
= check_memory_alignment (current_cpu
, address
, 0xf);
788 /* If we need to count cycles, then the cache operation will be
789 initiated from the model profiling functions.
790 See frvbf_model_.... */
793 CPU_LOAD_ADDRESS (current_cpu
) = address
;
794 CPU_LOAD_LENGTH (current_cpu
) = 16;
798 for (i
= 0; i
< 4; ++i
)
800 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
803 sim_queue_fn_xi_write (current_cpu
, frvbf_h_gr_quad_set_handler
, targ_ix
,
809 frvbf_store_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
815 /* Check register and memory alignment. */
816 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
817 address
= check_memory_alignment (current_cpu
, address
, 0xf);
819 for (i
= 0; i
< 4; ++i
)
821 /* GR0 is always 0. */
825 value
[i
] = GET_H_GR (src_ix
+ i
);
828 if (GET_HSR0_DCE (hsr0
))
829 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
831 sim_queue_mem_xi_write (current_cpu
, address
, value
);
835 frvbf_load_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
840 /* Check memory alignment */
841 address
= check_memory_alignment (current_cpu
, address
, 0xf);
843 /* If we need to count cycles, then the cache operation will be
844 initiated from the model profiling functions.
845 See frvbf_model_.... */
848 CPU_LOAD_ADDRESS (current_cpu
) = address
;
849 CPU_LOAD_LENGTH (current_cpu
) = 16;
853 for (i
= 0; i
< 4; ++i
)
855 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
858 sim_queue_fn_xi_write (current_cpu
, frvbf_h_fr_quad_set_handler
, targ_ix
,
864 frvbf_store_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
870 /* Check register and memory alignment. */
871 src_ix
= check_fr_register_alignment (current_cpu
, src_ix
, 3);
872 address
= check_memory_alignment (current_cpu
, address
, 0xf);
874 for (i
= 0; i
< 4; ++i
)
875 value
[i
] = GET_H_FR (src_ix
+ i
);
878 if (GET_HSR0_DCE (hsr0
))
879 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
881 sim_queue_mem_xi_write (current_cpu
, address
, value
);
885 frvbf_load_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
890 /* Check memory alignment */
891 address
= check_memory_alignment (current_cpu
, address
, 0xf);
893 /* If we need to count cycles, then the cache operation will be
894 initiated from the model profiling functions.
895 See frvbf_model_.... */
898 CPU_LOAD_ADDRESS (current_cpu
) = address
;
899 CPU_LOAD_LENGTH (current_cpu
) = 16;
903 for (i
= 0; i
< 4; ++i
)
905 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
908 sim_queue_fn_xi_write (current_cpu
, frvbf_h_cpr_quad_set_handler
, targ_ix
,
914 frvbf_store_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
920 /* Check register and memory alignment. */
921 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
922 address
= check_memory_alignment (current_cpu
, address
, 0xf);
924 for (i
= 0; i
< 4; ++i
)
925 value
[i
] = GET_H_CPR (src_ix
+ i
);
928 if (GET_HSR0_DCE (hsr0
))
929 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
931 sim_queue_mem_xi_write (current_cpu
, address
, value
);
935 frvbf_signed_integer_divide (
936 SIM_CPU
*current_cpu
, SI arg1
, SI arg2
, int target_index
, int non_excepting
939 enum frv_dtt dtt
= FRV_DTT_NO_EXCEPTION
;
940 if (arg1
== 0x80000000 && arg2
== -1)
942 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
943 otherwise it may result in 0x7fffffff (sparc compatibility) or
944 0x80000000 (C language compatibility). */
946 dtt
= FRV_DTT_OVERFLOW
;
949 if (GET_ISR_EDE (isr
))
950 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
953 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
955 frvbf_force_update (current_cpu
); /* Force update of target register. */
958 dtt
= FRV_DTT_DIVISION_BY_ZERO
;
960 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
963 /* Check for exceptions. */
964 if (dtt
!= FRV_DTT_NO_EXCEPTION
)
965 dtt
= frvbf_division_exception (current_cpu
, dtt
, target_index
,
967 if (non_excepting
&& dtt
== FRV_DTT_NO_EXCEPTION
)
969 /* Non excepting instruction. Clear the NE flag for the target
972 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
973 CLEAR_NE_FLAG (NE_flags
, target_index
);
974 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
979 frvbf_unsigned_integer_divide (
980 SIM_CPU
*current_cpu
, USI arg1
, USI arg2
, int target_index
, int non_excepting
984 frvbf_division_exception (current_cpu
, FRV_DTT_DIVISION_BY_ZERO
,
985 target_index
, non_excepting
);
988 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
992 /* Non excepting instruction. Clear the NE flag for the target
995 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
996 CLEAR_NE_FLAG (NE_flags
, target_index
);
997 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
1002 /* Clear accumulators. */
1004 frvbf_clear_accumulators (SIM_CPU
*current_cpu
, SI acc_ix
, int A
)
1006 SIM_DESC sd
= CPU_STATE (current_cpu
);
1008 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
) ? 7 :
1009 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
) ? 7 :
1010 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
) ? 11 :
1011 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
) ? 3 :
1013 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1015 ps
->mclracc_acc
= acc_ix
;
1017 if (A
== 0 || acc_ix
!= 0) /* Clear 1 accumuator? */
1019 /* This instruction is a nop if the referenced accumulator is not
1021 if ((acc_ix
& acc_mask
) == acc_ix
)
1022 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, acc_ix
, 0);
1026 /* Clear all implemented accumulators. */
1028 for (i
= 0; i
<= acc_mask
; ++i
)
1029 if ((i
& acc_mask
) == i
)
1030 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, i
, 0);
1034 /* Functions to aid insn semantics. */
1036 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1038 frvbf_scan_result (SIM_CPU
*current_cpu
, SI value
)
1046 /* Find the position of the first non-zero bit.
1047 The loop will terminate since there is guaranteed to be at least one
1049 mask
= 1 << (sizeof (mask
) * 8 - 1);
1050 for (i
= 0; (value
& mask
) == 0; ++i
)
1056 /* Compute the result of the cut insns. */
1058 frvbf_cut (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
, SI cut_point
)
1064 result
= reg1
<< cut_point
;
1065 result
|= (reg2
>> (32 - cut_point
)) & ((1 << cut_point
) - 1);
1068 result
= reg2
<< (cut_point
- 32);
1073 /* Compute the result of the cut insns. */
1075 frvbf_media_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1077 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1078 cut_point
= cut_point
<< 26 >> 26;
1080 /* The cut_point is relative to bit 40 of 64 bits. */
1082 return (acc
<< (cut_point
+ 24)) >> 32;
1084 /* Extend the sign bit (bit 40) for negative cuts. */
1085 if (cut_point
== -32)
1086 return (acc
<< 24) >> 63; /* Special case for full shiftout. */
1088 return (acc
<< 24) >> (32 + -cut_point
);
1091 /* Compute the result of the cut insns. */
1093 frvbf_media_cut_ss (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1095 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1096 cut_point
= cut_point
<< 26 >> 26;
1100 /* The cut_point is relative to bit 40 of 64 bits. */
1101 DI shifted
= acc
<< (cut_point
+ 24);
1102 DI unshifted
= shifted
>> (cut_point
+ 24);
1104 /* The result will be saturated if significant bits are shifted out. */
1105 if (unshifted
!= acc
)
1113 /* The result will not be saturated, so use the code for the normal cut. */
1114 return frvbf_media_cut (current_cpu
, acc
, cut_point
);
1117 /* Compute the result of int accumulator cut (SCUTSS). */
1119 frvbf_iacc_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1123 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1124 cut_point
= cut_point
<< 25 >> 25;
1126 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1127 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1128 of this 128-bit value.
1130 Since we can't deal with 128-bit values very easily, convert the
1131 operation into an equivalent 64-bit one. */
1134 /* Avoid an undefined shift operation. */
1135 if (cut_point
== -64)
1142 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1143 32 bits of the result and UPPER to the result >> 31. */
1146 /* The cut loses the (32 - CUT_POINT) least significant bits.
1147 Round the result up if the most significant of these lost bits
1149 lower
= acc
>> (32 - cut_point
);
1150 if (lower
< 0x7fffffff)
1151 if (acc
& LSBIT64 (32 - cut_point
- 1))
1153 upper
= lower
>> 31;
1157 lower
= acc
<< (cut_point
- 32);
1158 upper
= acc
>> (63 - cut_point
);
1161 /* Saturate the result. */
1170 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1172 frvbf_shift_left_arith_saturate (SIM_CPU
*current_cpu
, SI arg1
, SI arg2
)
1176 /* FIXME: what to do with negative shift amt? */
1183 /* Signed shift by 31 or greater saturates by definition. */
1186 return (SI
) 0x7fffffff;
1188 return (SI
) 0x80000000;
1190 /* OK, arg2 is between 1 and 31. */
1191 neg_arg1
= (arg1
< 0);
1194 /* Check for sign bit change (saturation). */
1195 if (neg_arg1
&& (arg1
>= 0))
1196 return (SI
) 0x80000000;
1197 else if (!neg_arg1
&& (arg1
< 0))
1198 return (SI
) 0x7fffffff;
1199 } while (--arg2
> 0);
1204 /* Simulate the media custom insns. */
1206 frvbf_media_cop (SIM_CPU
*current_cpu
, int cop_num
)
1208 /* The semantics of the insn are a nop, since it is implementation defined.
1209 We do need to check whether it's implemented and set up for MTRAP
1211 USI msr0
= GET_MSR (0);
1212 if (GET_MSR_EMCI (msr0
) == 0)
1214 /* no interrupt queued at this time. */
1215 frv_set_mp_exception_registers (current_cpu
, MTT_UNIMPLEMENTED_MPOP
, 0);
1219 /* Simulate the media average (MAVEH) insn. */
1221 do_media_average (SIM_CPU
*current_cpu
, HI arg1
, HI arg2
)
1223 SIM_DESC sd
= CPU_STATE (current_cpu
);
1224 SI sum
= (arg1
+ arg2
);
1225 HI result
= sum
>> 1;
1228 /* On fr4xx and fr550, check the rounding mode. On other machines
1229 rounding is always toward negative infinity and the result is
1230 already correctly rounded. */
1231 switch (STATE_ARCHITECTURE (sd
)->mach
)
1233 /* Need to check rounding mode. */
1234 case bfd_mach_fr400
:
1235 case bfd_mach_fr450
:
1236 case bfd_mach_fr550
:
1237 /* Check whether rounding will be required. Rounding will be required
1238 if the sum is an odd number. */
1239 rounding_value
= sum
& 1;
1242 USI msr0
= GET_MSR (0);
1243 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1244 if (GET_MSR_SRDAV (msr0
))
1246 /* MSR0.RD controls rounding. */
1247 switch (GET_MSR_RD (msr0
))
1250 /* Round to nearest. */
1255 /* Round toward 0. */
1260 /* Round toward positive infinity. */
1264 /* Round toward negative infinity. The result is already
1265 correctly rounded. */
1274 /* MSR0.RDAV controls rounding. If set, round toward positive
1275 infinity. Otherwise the result is already rounded correctly
1276 toward negative infinity. */
1277 if (GET_MSR_RDAV (msr0
))
1290 frvbf_media_average (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
)
1293 result
= do_media_average (current_cpu
, reg1
& 0xffff, reg2
& 0xffff);
1295 result
|= do_media_average (current_cpu
, (reg1
>> 16) & 0xffff,
1296 (reg2
>> 16) & 0xffff) << 16;
1300 /* Maintain a flag in order to know when to write the address of the next
1301 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1303 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU
*current_cpu
, int value
)
1305 frvbf_write_next_vliw_addr_to_LR
= value
;
1309 frvbf_set_ne_index (SIM_CPU
*current_cpu
, int index
)
1313 /* Save the target register so interrupt processing can set its NE flag
1314 in the event of an exception. */
1315 frv_interrupt_state
.ne_index
= index
;
1317 /* Clear the NE flag of the target register. It will be reset if necessary
1318 in the event of an exception. */
1319 GET_NE_FLAGS (NE_flags
, H_SPR_FNER0
);
1320 CLEAR_NE_FLAG (NE_flags
, index
);
1321 SET_NE_FLAGS (H_SPR_FNER0
, NE_flags
);
1325 frvbf_force_update (SIM_CPU
*current_cpu
)
1327 CGEN_WRITE_QUEUE
*q
= CPU_WRITE_QUEUE (current_cpu
);
1328 int ix
= CGEN_WRITE_QUEUE_INDEX (q
);
1331 CGEN_WRITE_QUEUE_ELEMENT
*item
= CGEN_WRITE_QUEUE_ELEMENT (q
, ix
- 1);
1332 item
->flags
|= FRV_WRITE_QUEUE_FORCE_WRITE
;
1336 /* Condition code logic. */
1338 andcr
, orcr
, xorcr
, nandcr
, norcr
, andncr
, orncr
, nandncr
, norncr
,
1342 enum cr_result
{cr_undefined
, cr_undefined1
, cr_false
, cr_true
};
1344 static enum cr_result
1345 cr_logic
[num_cr_ops
][4][4] = {
1348 /* undefined undefined false true */
1349 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1350 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1351 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1352 /* true */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
}
1356 /* undefined undefined false true */
1357 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1358 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1359 /* false */ {cr_false
, cr_false
, cr_false
, cr_true
},
1360 /* true */ {cr_true
, cr_true
, cr_true
, cr_true
}
1364 /* undefined undefined false true */
1365 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1366 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1367 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1368 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1372 /* undefined undefined false true */
1373 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1374 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1375 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1376 /* true */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
}
1380 /* undefined undefined false true */
1381 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1382 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1383 /* false */ {cr_true
, cr_true
, cr_true
, cr_false
},
1384 /* true */ {cr_false
, cr_false
, cr_false
, cr_false
}
1388 /* undefined undefined false true */
1389 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1390 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1391 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1392 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1396 /* undefined undefined false true */
1397 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1398 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1399 /* false */ {cr_true
, cr_true
, cr_true
, cr_true
},
1400 /* true */ {cr_false
, cr_false
, cr_false
, cr_true
}
1404 /* undefined undefined false true */
1405 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1406 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1407 /* false */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1408 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1412 /* undefined undefined false true */
1413 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1414 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1415 /* false */ {cr_false
, cr_false
, cr_false
, cr_false
},
1416 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1421 frvbf_cr_logic (SIM_CPU
*current_cpu
, SI operation
, UQI arg1
, UQI arg2
)
1423 return cr_logic
[operation
][arg1
][arg2
];
1426 /* Cache Manipulation. */
1428 frvbf_insn_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1430 /* If we need to count cycles, then the cache operation will be
1431 initiated from the model profiling functions.
1432 See frvbf_model_.... */
1433 int hsr0
= GET_HSR0 ();
1434 if (GET_HSR0_ICE (hsr0
))
1438 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1439 CPU_LOAD_LENGTH (current_cpu
) = length
;
1440 CPU_LOAD_LOCK (current_cpu
) = lock
;
1444 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1445 frv_cache_preload (cache
, address
, length
, lock
);
1451 frvbf_data_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1453 /* If we need to count cycles, then the cache operation will be
1454 initiated from the model profiling functions.
1455 See frvbf_model_.... */
1456 int hsr0
= GET_HSR0 ();
1457 if (GET_HSR0_DCE (hsr0
))
1461 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1462 CPU_LOAD_LENGTH (current_cpu
) = length
;
1463 CPU_LOAD_LOCK (current_cpu
) = lock
;
1467 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1468 frv_cache_preload (cache
, address
, length
, lock
);
1474 frvbf_insn_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1476 /* If we need to count cycles, then the cache operation will be
1477 initiated from the model profiling functions.
1478 See frvbf_model_.... */
1479 int hsr0
= GET_HSR0 ();
1480 if (GET_HSR0_ICE (hsr0
))
1483 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1486 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1487 frv_cache_unlock (cache
, address
);
1493 frvbf_data_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1495 /* If we need to count cycles, then the cache operation will be
1496 initiated from the model profiling functions.
1497 See frvbf_model_.... */
1498 int hsr0
= GET_HSR0 ();
1499 if (GET_HSR0_DCE (hsr0
))
1502 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1505 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1506 frv_cache_unlock (cache
, address
);
1512 frvbf_insn_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1514 /* Make sure the insn was specified properly. -1 will be passed for ALL
1515 for a icei with A=0. */
1518 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1522 /* If we need to count cycles, then the cache operation will be
1523 initiated from the model profiling functions.
1524 See frvbf_model_.... */
1527 /* Record the all-entries flag for use in profiling. */
1528 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1529 ps
->all_cache_entries
= all
;
1530 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1534 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1536 frv_cache_invalidate_all (cache
, 0/* flush? */);
1538 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1543 frvbf_data_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1545 /* Make sure the insn was specified properly. -1 will be passed for ALL
1546 for a dcei with A=0. */
1549 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1553 /* If we need to count cycles, then the cache operation will be
1554 initiated from the model profiling functions.
1555 See frvbf_model_.... */
1558 /* Record the all-entries flag for use in profiling. */
1559 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1560 ps
->all_cache_entries
= all
;
1561 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1565 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1567 frv_cache_invalidate_all (cache
, 0/* flush? */);
1569 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1574 frvbf_data_cache_flush (SIM_CPU
*current_cpu
, SI address
, int all
)
1576 /* Make sure the insn was specified properly. -1 will be passed for ALL
1577 for a dcef with A=0. */
1580 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1584 /* If we need to count cycles, then the cache operation will be
1585 initiated from the model profiling functions.
1586 See frvbf_model_.... */
1589 /* Record the all-entries flag for use in profiling. */
1590 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1591 ps
->all_cache_entries
= all
;
1592 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1596 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1598 frv_cache_invalidate_all (cache
, 1/* flush? */);
1600 frv_cache_invalidate (cache
, address
, 1/* flush? */);